Documentation
¶
Overview ¶
Package svcconfig is a generated protocol buffer package.
It is generated from these files:
github.com/luci/luci-go/logdog/api/config/svcconfig/archival.proto github.com/luci/luci-go/logdog/api/config/svcconfig/config.proto github.com/luci/luci-go/logdog/api/config/svcconfig/project.proto github.com/luci/luci-go/logdog/api/config/svcconfig/storage.proto github.com/luci/luci-go/logdog/api/config/svcconfig/transport.proto
It has these top-level messages:
ArchiveIndexConfig Config Coordinator Collector Archivist ProjectConfig Storage Transport
Package svcconfig stores service configuration for a LogDog instance.
Each LogDog instantiation will have a single Config protobuf. It will be located under config set "services/<app-id>", path "services.cfg". The path is exposed via ServiceConfigFilename.
Each LogDog project will have its own project-specific configuration. It will be located under config set "projects/<project-name>", path "<app-id>.cfg".
Package svcconfig contains LogDog service configuration protobufs.
The package name here must match the protobuf package name, as the generated files will reside in the same directory.
Index ¶
- Constants
- func ProjectConfigPath(serviceName string) string
- type ArchiveIndexConfig
- func (*ArchiveIndexConfig) Descriptor() ([]byte, []int)
- func (m *ArchiveIndexConfig) GetByteRange() int32
- func (m *ArchiveIndexConfig) GetPrefixRange() int32
- func (m *ArchiveIndexConfig) GetStreamRange() int32
- func (*ArchiveIndexConfig) ProtoMessage()
- func (m *ArchiveIndexConfig) Reset()
- func (m *ArchiveIndexConfig) String() string
- type Archivist
- func (*Archivist) Descriptor() ([]byte, []int)
- func (m *Archivist) GetArchiveIndexConfig() *ArchiveIndexConfig
- func (m *Archivist) GetGsStagingBucket() string
- func (m *Archivist) GetRenderAllStreams() bool
- func (m *Archivist) GetSubscription() string
- func (m *Archivist) GetTasks() int32
- func (*Archivist) ProtoMessage()
- func (m *Archivist) Reset()
- func (m *Archivist) String() string
- type Collector
- func (*Collector) Descriptor() ([]byte, []int)
- func (m *Collector) GetMaxConcurrentMessages() int32
- func (m *Collector) GetMaxMessageWorkers() int32
- func (m *Collector) GetStateCacheExpiration() *google_protobuf.Duration
- func (m *Collector) GetStateCacheSize() int32
- func (*Collector) ProtoMessage()
- func (m *Collector) Reset()
- func (m *Collector) String() string
- type Config
- func (*Config) Descriptor() ([]byte, []int)
- func (m *Config) GetArchivist() *Archivist
- func (m *Config) GetCollector() *Collector
- func (m *Config) GetCoordinator() *Coordinator
- func (m *Config) GetStorage() *Storage
- func (m *Config) GetTransport() *Transport
- func (*Config) ProtoMessage()
- func (m *Config) Reset()
- func (m *Config) String() string
- type Coordinator
- func (*Coordinator) Descriptor() ([]byte, []int)
- func (m *Coordinator) GetAdminAuthGroup() string
- func (m *Coordinator) GetArchiveDelayMax() *google_protobuf.Duration
- func (m *Coordinator) GetArchiveSettleDelay() *google_protobuf.Duration
- func (m *Coordinator) GetArchiveTopic() string
- func (m *Coordinator) GetPrefixExpiration() *google_protobuf.Duration
- func (m *Coordinator) GetRpcAllowOrigins() []string
- func (m *Coordinator) GetServiceAuthGroup() string
- func (*Coordinator) ProtoMessage()
- func (m *Coordinator) Reset()
- func (m *Coordinator) String() string
- type ProjectConfig
- func (*ProjectConfig) Descriptor() ([]byte, []int)
- func (m *ProjectConfig) GetArchiveGsBucket() string
- func (m *ProjectConfig) GetArchiveIndexConfig() *ArchiveIndexConfig
- func (m *ProjectConfig) GetMaxStreamAge() *google_protobuf.Duration
- func (m *ProjectConfig) GetPrefixExpiration() *google_protobuf.Duration
- func (m *ProjectConfig) GetReaderAuthGroups() []string
- func (m *ProjectConfig) GetRenderAllStreams() bool
- func (m *ProjectConfig) GetWriterAuthGroups() []string
- func (*ProjectConfig) ProtoMessage()
- func (m *ProjectConfig) Reset()
- func (m *ProjectConfig) String() string
- type Storage
- func (*Storage) Descriptor() ([]byte, []int)
- func (m *Storage) GetBigtable() *Storage_BigTable
- func (m *Storage) GetMaxLogAge() *google_protobuf.Duration
- func (m *Storage) GetType() isStorage_Type
- func (*Storage) ProtoMessage()
- func (m *Storage) Reset()
- func (m *Storage) String() string
- func (*Storage) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, ...)
- type Storage_BigTable
- func (*Storage_BigTable) Descriptor() ([]byte, []int)
- func (m *Storage_BigTable) GetInstance() string
- func (m *Storage_BigTable) GetLogTableName() string
- func (m *Storage_BigTable) GetProject() string
- func (*Storage_BigTable) ProtoMessage()
- func (m *Storage_BigTable) Reset()
- func (m *Storage_BigTable) String() string
- type Storage_Bigtable
- type Transport
- func (*Transport) Descriptor() ([]byte, []int)
- func (m *Transport) GetPubsub() *Transport_PubSub
- func (m *Transport) GetType() isTransport_Type
- func (*Transport) ProtoMessage()
- func (m *Transport) Reset()
- func (m *Transport) String() string
- func (*Transport) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, ...)
- type Transport_PubSub
- func (*Transport_PubSub) Descriptor() ([]byte, []int)
- func (m *Transport_PubSub) GetProject() string
- func (m *Transport_PubSub) GetSubscription() string
- func (m *Transport_PubSub) GetTopic() string
- func (*Transport_PubSub) ProtoMessage()
- func (m *Transport_PubSub) Reset()
- func (m *Transport_PubSub) String() string
- type Transport_Pubsub
Constants ¶
const (
// ServiceConfigPath is the config service path of the Config protobuf.
ServiceConfigPath = "services.cfg"
)
Variables ¶
This section is empty.
Functions ¶
Types ¶
type ArchiveIndexConfig ¶
type ArchiveIndexConfig struct {
// If not zero, the maximum number of stream indices between index entries.
StreamRange int32 `protobuf:"varint,1,opt,name=stream_range,json=streamRange" json:"stream_range,omitempty"`
// If not zero, the maximum number of prefix indices between index entries.
PrefixRange int32 `protobuf:"varint,2,opt,name=prefix_range,json=prefixRange" json:"prefix_range,omitempty"`
// If not zero, the maximum number of log data bytes between index entries.
ByteRange int32 `protobuf:"varint,3,opt,name=byte_range,json=byteRange" json:"byte_range,omitempty"`
}
ArchiveIndexConfig specifies how archive indexes should be generated.
By default, each log entry will be present in the index. This is generally overkill; instead, the index can be more sparse at the expense of a slightly higher data load.
func (*ArchiveIndexConfig) Descriptor ¶
func (*ArchiveIndexConfig) Descriptor() ([]byte, []int)
func (*ArchiveIndexConfig) GetByteRange ¶
func (m *ArchiveIndexConfig) GetByteRange() int32
func (*ArchiveIndexConfig) GetPrefixRange ¶
func (m *ArchiveIndexConfig) GetPrefixRange() int32
func (*ArchiveIndexConfig) GetStreamRange ¶
func (m *ArchiveIndexConfig) GetStreamRange() int32
func (*ArchiveIndexConfig) ProtoMessage ¶
func (*ArchiveIndexConfig) ProtoMessage()
func (*ArchiveIndexConfig) Reset ¶
func (m *ArchiveIndexConfig) Reset()
func (*ArchiveIndexConfig) String ¶
func (m *ArchiveIndexConfig) String() string
type Archivist ¶
type Archivist struct {
// The name of the archival Pub/Sub subscription.
//
// This should be connected to "archive_topic", and the Archivist must have
// permission to consume from this subscription.
Subscription string `protobuf:"bytes,1,opt,name=subscription" json:"subscription,omitempty"`
// The number of tasks to run at a time. If blank, the archivist will choose a
// default value.
Tasks int32 `protobuf:"varint,2,opt,name=tasks" json:"tasks,omitempty"`
// The name of the staging storage bucket. All projects will share the same
// staging bucket. Logs for a project will be staged under:
//
// gs://<gs_staging_bucket>/<app-id>/<project-name>/...
GsStagingBucket string `protobuf:"bytes,3,opt,name=gs_staging_bucket,json=gsStagingBucket" json:"gs_staging_bucket,omitempty"`
// Service-wide index configuration. This is used if per-project configuration
// is not specified.
ArchiveIndexConfig *ArchiveIndexConfig `protobuf:"bytes,10,opt,name=archive_index_config,json=archiveIndexConfig" json:"archive_index_config,omitempty"`
// If true, always render the log entries as a binary file during archival,
// regardless of whether a specific stream has a binary file extension.
//
// By default, a stream will only be rendered as a binary if its descriptor
// includes a non-empty binary file extension field.
//
// The binary stream consists of each log entry's data rendered back-to-back.
// - For text streams, this produces a text document similar to the source
// text.
// - For binary streams and datagram streams, this reproduces the source
// contiguous binary file.
// - For datagram streams, the size-prefixed datagrams are written back-to-
// back.
//
// Enabling this option will consume roughly twice the archival space, as each
// stream's data will be archived once as a series of log entries and once as
// a binary file.
//
// Streams without an explicit binary file extension will default to ".bin" if
// this is enabled.
RenderAllStreams bool `protobuf:"varint,13,opt,name=render_all_streams,json=renderAllStreams" json:"render_all_streams,omitempty"`
}
Configuration for the Archivist microservice.
func (*Archivist) GetArchiveIndexConfig ¶
func (m *Archivist) GetArchiveIndexConfig() *ArchiveIndexConfig
type Collector ¶
type Collector struct {
// The maximum number of concurrent transport messages to process. If <= 0,
// a default will be chosen based on the transport.
MaxConcurrentMessages int32 `protobuf:"varint,1,opt,name=max_concurrent_messages,json=maxConcurrentMessages" json:"max_concurrent_messages,omitempty"`
// The maximum number of concurrent workers to process each ingested message.
// If <= 0, collector.DefaultMaxMessageWorkers will be used.
MaxMessageWorkers int32 `protobuf:"varint,2,opt,name=max_message_workers,json=maxMessageWorkers" json:"max_message_workers,omitempty"`
// The maximum number of log stream states to cache locally. If <= 0, a
// default will be used.
StateCacheSize int32 `protobuf:"varint,3,opt,name=state_cache_size,json=stateCacheSize" json:"state_cache_size,omitempty"`
// The maximum amount of time that cached stream state is valid. If <= 0, a
// default will be used.
StateCacheExpiration *google_protobuf.Duration `protobuf:"bytes,4,opt,name=state_cache_expiration,json=stateCacheExpiration" json:"state_cache_expiration,omitempty"`
}
Collector is the set of configuration parameters for Collector instances.
func (*Collector) GetStateCacheExpiration ¶
func (m *Collector) GetStateCacheExpiration() *google_protobuf.Duration
type Config ¶
type Config struct {
// Configuration for the Butler's log transport.
Transport *Transport `protobuf:"bytes,10,opt,name=transport" json:"transport,omitempty"`
// Configuration for intermediate Storage.
Storage *Storage `protobuf:"bytes,11,opt,name=storage" json:"storage,omitempty"`
// Coordinator is the coordinator service configuration.
Coordinator *Coordinator `protobuf:"bytes,20,opt,name=coordinator" json:"coordinator,omitempty"`
// Collector is the collector fleet configuration.
Collector *Collector `protobuf:"bytes,21,opt,name=collector" json:"collector,omitempty"`
// Archivist microservice configuration.
Archivist *Archivist `protobuf:"bytes,22,opt,name=archivist" json:"archivist,omitempty"`
}
Config is the overall instance configuration.
func (*Config) GetCoordinator ¶
func (m *Config) GetCoordinator() *Coordinator
type Coordinator ¶
type Coordinator struct {
// The name of the authentication group for administrators.
AdminAuthGroup string `protobuf:"bytes,10,opt,name=admin_auth_group,json=adminAuthGroup" json:"admin_auth_group,omitempty"`
// The name of the authentication group for backend services.
ServiceAuthGroup string `protobuf:"bytes,11,opt,name=service_auth_group,json=serviceAuthGroup" json:"service_auth_group,omitempty"`
// A list of origin URLs that are allowed to perform CORS RPC calls.
RpcAllowOrigins []string `protobuf:"bytes,20,rep,name=rpc_allow_origins,json=rpcAllowOrigins" json:"rpc_allow_origins,omitempty"`
// The maximum amount of time after a prefix has been registered when log
// streams may also be registered under that prefix.
//
// After the expiration period has passed, new log stream registration will
// fail.
//
// Project configurations or stream prefix regitrations may override this by
// providing >= 0 values for prefix expiration. The smallest configured
// expiration will be applied.
PrefixExpiration *google_protobuf.Duration `protobuf:"bytes,21,opt,name=prefix_expiration,json=prefixExpiration" json:"prefix_expiration,omitempty"`
// The full path of the archival Pub/Sub topic.
//
// The Coordinator must have permission to publish to this topic.
ArchiveTopic string `protobuf:"bytes,30,opt,name=archive_topic,json=archiveTopic" json:"archive_topic,omitempty"`
// The amount of time after an archive request has been dispatched before it
// should be executed.
//
// Since terminal messages can arrive out of order, the archival request may
// be kicked off before all of the log stream data has been loaded into
// intermediate storage. If this happens, the Archivist will retry archival
// later autometically.
//
// This parameter is an optimization to stop the archivist from wasting its
// time until the log stream has a reasonable expectation of being available.
ArchiveSettleDelay *google_protobuf.Duration `protobuf:"bytes,31,opt,name=archive_settle_delay,json=archiveSettleDelay" json:"archive_settle_delay,omitempty"`
// The amount of time before a log stream is candidate for archival regardless
// of whether or not it's been terminated or complete.
//
// This is a failsafe designed to ensure that log streams with missing records
// or no terminal record (e.g., Butler crashed) are eventually archived.
//
// This should be fairly large (days) to avoid prematurely archiving
// long-running streams, but should be considerably smaller than the
// intermediate storage data retention period.
//
// If a project's "max_stream_age" is smaller than this value, it will be used
// on that project's streams.
ArchiveDelayMax *google_protobuf.Duration `protobuf:"bytes,32,opt,name=archive_delay_max,json=archiveDelayMax" json:"archive_delay_max,omitempty"`
}
Coordinator is the Coordinator service configuration.
func (*Coordinator) Descriptor ¶
func (*Coordinator) Descriptor() ([]byte, []int)
func (*Coordinator) GetAdminAuthGroup ¶
func (m *Coordinator) GetAdminAuthGroup() string
func (*Coordinator) GetArchiveDelayMax ¶
func (m *Coordinator) GetArchiveDelayMax() *google_protobuf.Duration
func (*Coordinator) GetArchiveSettleDelay ¶
func (m *Coordinator) GetArchiveSettleDelay() *google_protobuf.Duration
func (*Coordinator) GetArchiveTopic ¶
func (m *Coordinator) GetArchiveTopic() string
func (*Coordinator) GetPrefixExpiration ¶
func (m *Coordinator) GetPrefixExpiration() *google_protobuf.Duration
func (*Coordinator) GetRpcAllowOrigins ¶
func (m *Coordinator) GetRpcAllowOrigins() []string
func (*Coordinator) GetServiceAuthGroup ¶
func (m *Coordinator) GetServiceAuthGroup() string
func (*Coordinator) ProtoMessage ¶
func (*Coordinator) ProtoMessage()
func (*Coordinator) Reset ¶
func (m *Coordinator) Reset()
func (*Coordinator) String ¶
func (m *Coordinator) String() string
type ProjectConfig ¶
type ProjectConfig struct {
// The set of auth service groups that are permitted READ access to this
// project's log streams.
ReaderAuthGroups []string `protobuf:"bytes,2,rep,name=reader_auth_groups,json=readerAuthGroups" json:"reader_auth_groups,omitempty"`
// The set of chrome-infra-auth groups that are permitted WRITE access to this
// project's log streams.
WriterAuthGroups []string `protobuf:"bytes,3,rep,name=writer_auth_groups,json=writerAuthGroups" json:"writer_auth_groups,omitempty"`
// The maximum lifetime of a log stream.
//
// If a stream has not terminated after this period of time, it will be
// forcefully archived, and additional stream data will be discarded.
//
// This is upper-bounded by the global "archive_delay_max" parameter.
MaxStreamAge *google_protobuf.Duration `protobuf:"bytes,4,opt,name=max_stream_age,json=maxStreamAge" json:"max_stream_age,omitempty"`
// The maximum amount of time after a prefix has been registered when log
// streams may also be registered under that prefix.
//
// See Config's "prefix_expiration" for more information.
PrefixExpiration *google_protobuf.Duration `protobuf:"bytes,5,opt,name=prefix_expiration,json=prefixExpiration" json:"prefix_expiration,omitempty"`
// The archival Google Storage bucket name.
//
// Log streams artifacts will be stored in a subdirectory of this bucket:
// gs://<archive_gs_bucket>/<app-id>/<project-name>/<log-path>/artifact...
//
// Note that the Archivist microservice must have WRITE access to this
// bucket, and the Coordinator must have READ access.
//
// If this is not set, the logs will be archived in a project-named
// subdirectory in the global "archive_gs_base" location.
ArchiveGsBucket string `protobuf:"bytes,10,opt,name=archive_gs_bucket,json=archiveGsBucket" json:"archive_gs_bucket,omitempty"`
// If true, always create an additional data file that is the rendered content
// of the stream data. By default, only streams that explicitly register a
// binary file extension must be rendered.
//
// See Config's "always_create_binary" for more information.
RenderAllStreams bool `protobuf:"varint,11,opt,name=render_all_streams,json=renderAllStreams" json:"render_all_streams,omitempty"`
// Project-specific archive index configuration.
//
// Any unspecified index configuration will default to the service archival
// config.
ArchiveIndexConfig *ArchiveIndexConfig `protobuf:"bytes,12,opt,name=archive_index_config,json=archiveIndexConfig" json:"archive_index_config,omitempty"`
}
ProjectConfig is a set of per-project configuration parameters. Each luci-config project must include one of these configs in order to register or view log streams in that project's log stream space.
A project's configuration should reside in the "projects/<project>" config set and be named "<app-id>.cfg".
Many of the parameters here can be bounded by GlobalConfig parameters.
func (*ProjectConfig) Descriptor ¶
func (*ProjectConfig) Descriptor() ([]byte, []int)
func (*ProjectConfig) GetArchiveGsBucket ¶
func (m *ProjectConfig) GetArchiveGsBucket() string
func (*ProjectConfig) GetArchiveIndexConfig ¶
func (m *ProjectConfig) GetArchiveIndexConfig() *ArchiveIndexConfig
func (*ProjectConfig) GetMaxStreamAge ¶
func (m *ProjectConfig) GetMaxStreamAge() *google_protobuf.Duration
func (*ProjectConfig) GetPrefixExpiration ¶
func (m *ProjectConfig) GetPrefixExpiration() *google_protobuf.Duration
func (*ProjectConfig) GetReaderAuthGroups ¶
func (m *ProjectConfig) GetReaderAuthGroups() []string
func (*ProjectConfig) GetRenderAllStreams ¶
func (m *ProjectConfig) GetRenderAllStreams() bool
func (*ProjectConfig) GetWriterAuthGroups ¶
func (m *ProjectConfig) GetWriterAuthGroups() []string
func (*ProjectConfig) ProtoMessage ¶
func (*ProjectConfig) ProtoMessage()
func (*ProjectConfig) Reset ¶
func (m *ProjectConfig) Reset()
func (*ProjectConfig) String ¶
func (m *ProjectConfig) String() string
type Storage ¶
type Storage struct {
// Type is the transport configuration that is being used.
//
// Types that are valid to be assigned to Type:
// *Storage_Bigtable
Type isStorage_Type `protobuf_oneof:"Type"`
// The maximum lifetime of a log's intermediate storage entries. The Storage
// instance is free to begin deleting log entries if they are older than this.
//
// It is recommended that this be set to 4*[terminal archival threshold],
// where the terminal archival threshold is the amount of time that the
// Coordinator will wait on a log stream that has not been terminated before
// constructing an archive.
//
// Waiting at least the archival threshold ensures that the log entries are
// available for streams that expire. Waiting longer than the threshold is
// good because a user may be streaming logs from intermediate storage as they
// become archived. Waiting a *lot* longer is useful to prevent data loss in
// the event of issues with the archival process.
MaxLogAge *google_protobuf.Duration `protobuf:"bytes,2,opt,name=max_log_age,json=maxLogAge" json:"max_log_age,omitempty"`
}
Storage is the in-transit storage configuration.
func (*Storage) GetBigtable ¶
func (m *Storage) GetBigtable() *Storage_BigTable
func (*Storage) GetMaxLogAge ¶
func (m *Storage) GetMaxLogAge() *google_protobuf.Duration
type Storage_BigTable ¶
type Storage_BigTable struct {
// The name of the BigTable instance project.
Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"`
// The name of the BigTable instance.
Instance string `protobuf:"bytes,2,opt,name=instance" json:"instance,omitempty"`
// The name of the BigTable instance's log table.
LogTableName string `protobuf:"bytes,3,opt,name=log_table_name,json=logTableName" json:"log_table_name,omitempty"`
}
BigTable is the set of BigTable configuration parameters.
func (*Storage_BigTable) Descriptor ¶
func (*Storage_BigTable) Descriptor() ([]byte, []int)
func (*Storage_BigTable) GetInstance ¶
func (m *Storage_BigTable) GetInstance() string
func (*Storage_BigTable) GetLogTableName ¶
func (m *Storage_BigTable) GetLogTableName() string
func (*Storage_BigTable) GetProject ¶
func (m *Storage_BigTable) GetProject() string
func (*Storage_BigTable) ProtoMessage ¶
func (*Storage_BigTable) ProtoMessage()
func (*Storage_BigTable) Reset ¶
func (m *Storage_BigTable) Reset()
func (*Storage_BigTable) String ¶
func (m *Storage_BigTable) String() string
type Storage_Bigtable ¶
type Storage_Bigtable struct {
Bigtable *Storage_BigTable `protobuf:"bytes,1,opt,name=bigtable,oneof"`
}
type Transport ¶
type Transport struct {
// Type is the transport configuration that is being used.
//
// Types that are valid to be assigned to Type:
// *Transport_Pubsub
Type isTransport_Type `protobuf_oneof:"Type"`
}
Transport is the transport configuration.
func (*Transport) GetPubsub ¶
func (m *Transport) GetPubsub() *Transport_PubSub
type Transport_PubSub ¶
type Transport_PubSub struct {
// The name of the authentication group for administrators.
Project string `protobuf:"bytes,1,opt,name=project" json:"project,omitempty"`
// The name of the authentication group for administrators.
Topic string `protobuf:"bytes,2,opt,name=topic" json:"topic,omitempty"`
// The name of the authentication group for administrators.
Subscription string `protobuf:"bytes,3,opt,name=subscription" json:"subscription,omitempty"`
}
PubSub is a transport configuration for Google Cloud Pub/Sub.
func (*Transport_PubSub) Descriptor ¶
func (*Transport_PubSub) Descriptor() ([]byte, []int)
func (*Transport_PubSub) GetProject ¶
func (m *Transport_PubSub) GetProject() string
func (*Transport_PubSub) GetSubscription ¶
func (m *Transport_PubSub) GetSubscription() string
func (*Transport_PubSub) GetTopic ¶
func (m *Transport_PubSub) GetTopic() string
func (*Transport_PubSub) ProtoMessage ¶
func (*Transport_PubSub) ProtoMessage()
func (*Transport_PubSub) Reset ¶
func (m *Transport_PubSub) Reset()
func (*Transport_PubSub) String ¶
func (m *Transport_PubSub) String() string
type Transport_Pubsub ¶
type Transport_Pubsub struct {
Pubsub *Transport_PubSub `protobuf:"bytes,1,opt,name=pubsub,oneof"`
}
Source Files
¶
- archival.pb.go
- config.pb.go
- doc.go
- gen.go
- project.pb.go
- storage.pb.go
- transport.pb.go
- util.go