cache

package
v0.5.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 28, 2023 License: Apache-2.0 Imports: 30 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

func IsS3NotFound

func IsS3NotFound(err error) bool

Types

type CacheKey

type CacheKey struct {
	ID      string
	Results []Result
}

type Config

type Config struct {
	S3            *S3LayerStoreConfig
	ImportPeriod  time.Duration
	ExportPeriod  time.Duration
	ExportTimeout time.Duration
}

func (Config) String

func (c Config) String() string

type ExportRecord

type ExportRecord struct {
	Digest     digest.Digest // record digest
	CacheRefID string        // worker cache id
}

type GetConfigRequest

type GetConfigRequest struct {
	CacheMountIDs []string
}

func (GetConfigRequest) String

func (r GetConfigRequest) String() string

type LayerStore

type LayerStore interface {
	content.Provider
	PushLayer(ctx context.Context, layer ocispecs.Descriptor, provider content.Provider) error
}

func NewS3LayerStore

func NewS3LayerStore(ctx context.Context, config S3LayerStoreConfig) (LayerStore, error)
type Link struct {
	ID       string
	LinkedID string
	Input    int
	Digest   digest.Digest
	Selector digest.Digest
}

type Manager

type Manager interface {
	solver.CacheManager
	StartCacheMountSynchronization(context.Context, *dagger.Client) error
	Close(context.Context) error
}

func NewManager

func NewManager(ctx context.Context, managerConfig ManagerConfig) (Manager, error)

type ManagerConfig

type ManagerConfig struct {
	KeyStore    solver.CacheKeyStorage
	ResultStore solver.CacheResultStorage
	Worker      worker.Worker
	ServiceURL  string
}

type RecordLayers

type RecordLayers struct {
	RecordDigest digest.Digest
	Layers       []ocispecs.Descriptor
}

type Result

type Result struct {
	ID          string
	CreatedAt   time.Time
	Description string
}

type S3LayerStore

type S3LayerStore struct {
	// contains filtered or unexported fields
}

func (*S3LayerStore) PushLayer

func (c *S3LayerStore) PushLayer(ctx context.Context, layer ocispecs.Descriptor, provider content.Provider) error

func (*S3LayerStore) ReaderAt

type S3LayerStoreConfig

type S3LayerStoreConfig struct {
	Bucket             string
	Region             string
	EndpointURL        string
	UsePathStyle       bool
	BlobsPrefix        string
	CacheMountPrefixes []string
}

type S3ReaderAt

type S3ReaderAt struct {
	Ctx      context.Context
	Client   *s3.Client
	Bucket   string
	Key      string
	BlobSize int64
	// contains filtered or unexported fields
}

S3ReaderAt is optimized for reading a layer into the content store. Layers are read sequentially and in 1MB chunks by the underlying containerd content code. We therefore initialize the reader at the first offset and after that keep reading sequentially. If an attempt is made at a non-sequental read the reader is re-opened from the new offset, which is slow but not expected to happen often.

The relevant code currently lives here: https://github.com/containerd/containerd/blob/7a77da2c26007fbf4b8526fd01d5ab06ac12d452/content/helpers.go#L150

func (*S3ReaderAt) Close

func (r *S3ReaderAt) Close() error

func (*S3ReaderAt) ReadAt

func (r *S3ReaderAt) ReadAt(p []byte, off int64) (int, error)

func (*S3ReaderAt) Size

func (r *S3ReaderAt) Size() int64

type Service

type Service interface {
	// GetConfig returns configuration needed for the engine to push layer blobs
	GetConfig(context.Context, GetConfigRequest) (*Config, error)

	// UpdateCacheRecords informs the cache service of the current state of the cache metadata.
	// It returns a list of cache refs that should be prepared for export and pushed.
	UpdateCacheRecords(context.Context, UpdateCacheRecordsRequest) (*UpdateCacheRecordsResponse, error)

	// UpdateCacheLayers tells the cache service that layers for the given records have been
	// uploaded with the given digests.
	UpdateCacheLayers(context.Context, UpdateCacheLayersRequest) error

	// ImportCache returns a cache config that the engine can turn into cache manager.
	ImportCache(ctx context.Context) (*remotecache.CacheConfig, error)
}

The process on export is as follows:

  • Engine gathers metadata for current state of its local cache and sends it to the cache service via UpdateCacheRecords
  • The cache service responds with a list of cache refs that should be exported, if any
  • The engine compresses those them into layers, pushes them and then updates the cache service on what the digests of the layers ended up being via UpdateCacheLayers

The process on import is as follows:

  • The engine asks for a cache config from the cache service via ImportCache. This cache config is the same format used by buildkit to create cache managers from remote caches.
  • The cache service responds with that cache config
  • The engine creates a cache manager from the cache config and plugs it into the combined cache manager with the actual local cache

type UpdateCacheLayersRequest

type UpdateCacheLayersRequest struct {
	UpdatedRecords []RecordLayers
}

func (UpdateCacheLayersRequest) String

func (r UpdateCacheLayersRequest) String() string

type UpdateCacheRecordsRequest

type UpdateCacheRecordsRequest struct {
	CacheKeys []CacheKey
	Links     []Link
}

func (UpdateCacheRecordsRequest) String

func (r UpdateCacheRecordsRequest) String() string

type UpdateCacheRecordsResponse

type UpdateCacheRecordsResponse struct {
	// cache records that the engine should prepare layers for and push
	ExportRecords []ExportRecord
}

func (UpdateCacheRecordsResponse) String

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL