storage

package
v0.0.0-...-ba108f2 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 15, 2026 License: Apache-2.0 Imports: 59 Imported by: 32

Documentation

Index

Constants

View Source
const (
	GuestEnvdPath = "/usr/bin/envd"

	MemfileName  = "memfile"
	RootfsName   = "rootfs.ext4"
	SnapfileName = "snapfile"
	MetadataName = "metadata.json"

	HeaderSuffix = ".header"
)

Variables

View Source
var (
	ErrOffsetUnaligned = errors.New("offset must be a multiple of chunk size")
	ErrBufferTooSmall  = errors.New("buffer is too small")
	ErrMultipleChunks  = errors.New("cannot read multiple chunks")
	ErrBufferTooLarge  = errors.New("buffer is too large")
)
View Source
var BuildCacheStorageConfig = StorageConfig{
	GetLocalBasePath: func() string {
		return env.GetEnv("LOCAL_BUILD_CACHE_STORAGE_BASE_PATH", "/tmp/build-cache")
	},
	GetBucketName: func() string {
		return utils.RequiredEnv("BUILD_CACHE_BUCKET_NAME", "Bucket for storing build cache files")
	},
}
View Source
var ErrObjectNotExist = errors.New("object does not exist")
View Source
var ErrObjectRateLimited = errors.New("object access rate limited")

ErrObjectRateLimited means per-object mutation rate limiting — multiple concurrent writers racing to write the same content-addressed object.

View Source
var TemplateStorageConfig = StorageConfig{
	GetLocalBasePath: func() string {
		return env.GetEnv("LOCAL_TEMPLATE_STORAGE_BASE_PATH", "/tmp/templates")
	},
	GetBucketName: func() string {
		return utils.RequiredEnv("TEMPLATE_BUCKET_NAME", "Bucket for storing template files")
	},
}

Functions

func ComputeUploadHMAC

func ComputeUploadHMAC(key []byte, path string, expires int64) string

func GetBlob

func GetBlob(ctx context.Context, b Blob) ([]byte, error)

GetBlob is a convenience wrapper that wraps b.WriteTo interface to return a byte slice.

func IsLocal

func IsLocal() bool

IsLocal reports whether the configured storage provider is the local filesystem backend.

func SplitPath

func SplitPath(path string) (buildID, fileName string)

SplitPath splits a storage path of the form "{buildID}/{fileName}" back into its components. This is the inverse of the path methods.

func ValidateUploadToken

func ValidateUploadToken(key []byte, path string, expires int64, token string) bool

ValidateUploadToken validates an HMAC token for a local upload URL. Exported so that the upload handler in the orchestrator can use it.

func WithSkipCacheWriteback

func WithSkipCacheWriteback(ctx context.Context) context.Context

WithSkipCacheWriteback returns a context that signals the NFS cache layer to skip writing fetched data back to the local cache. This is used by the prefetcher to avoid polluting the shared NFS cache with prefetch-specific reads.

Types

type Blob

type Blob interface {
	WriteTo(ctx context.Context, dst io.Writer) (int64, error)
	Put(ctx context.Context, data []byte) error
	Exists(ctx context.Context) (bool, error)
}

type CachePaths

type CachePaths struct {
	Paths

	// CacheIdentifier is used to distinguish between each entry in the cache to prevent deleting the cache files when the template cache entry is being closed and a new one is being created.
	CacheIdentifier string
	// contains filtered or unexported fields
}

func (CachePaths) CacheMetadata

func (c CachePaths) CacheMetadata() string

func (CachePaths) CacheSnapfile

func (c CachePaths) CacheSnapfile() string

func (CachePaths) Close

func (c CachePaths) Close() error

func (CachePaths) NewSandboxFiles

func (c CachePaths) NewSandboxFiles(sandboxID string) *SandboxFiles

func (CachePaths) NewSandboxFilesWithStaticID

func (c CachePaths) NewSandboxFilesWithStaticID(sandboxID string, staticID string) *SandboxFiles

type CompleteMultipartUpload

type CompleteMultipartUpload struct {
	XMLName string `xml:"CompleteMultipartUpload"`
	Parts   []Part `xml:"Part"`
}

type Config

type Config struct {
	SandboxCacheDir  string `env:"SANDBOX_CACHE_DIR,expand"  envDefault:"${ORCHESTRATOR_BASE_PATH}/sandbox"`
	SnapshotCacheDir string `env:"SNAPSHOT_CACHE_DIR,expand" envDefault:"/mnt/snapshot-cache"`
	TemplateCacheDir string `env:"TEMPLATE_CACHE_DIR,expand" envDefault:"${ORCHESTRATOR_BASE_PATH}/template"`
}

type InitiateMultipartUploadResult

type InitiateMultipartUploadResult struct {
	Bucket   string `xml:"Bucket"`
	Key      string `xml:"Key"`
	UploadID string `xml:"UploadId"`
}

type MultipartUploader

type MultipartUploader struct {
	// contains filtered or unexported fields
}

func NewMultipartUploaderWithRetryConfig

func NewMultipartUploaderWithRetryConfig(ctx context.Context, bucketName, objectName string, retryConfig RetryConfig) (*MultipartUploader, error)

func (*MultipartUploader) UploadFileInParallel

func (m *MultipartUploader) UploadFileInParallel(ctx context.Context, filePath string, maxConcurrency int) (int64, error)

type ObjectType

type ObjectType int
const (
	UnknownObjectType ObjectType = iota
	MemfileHeaderObjectType
	RootFSHeaderObjectType
	SnapfileObjectType
	MetadataObjectType
	BuildLayerFileObjectType
	LayerMetadataObjectType
)

type Part

type Part struct {
	PartNumber int    `xml:"PartNumber"`
	ETag       string `xml:"ETag"`
}

type Paths

type Paths struct {
	BuildID string `json:"build_id"`
}

func (Paths) Cache

func (p Paths) Cache(config Config) (CachePaths, error)

func (Paths) CacheKey

func (p Paths) CacheKey() string

Key for the cache. Unique for template-build pair.

func (Paths) Memfile

func (p Paths) Memfile() string

func (Paths) MemfileHeader

func (p Paths) MemfileHeader() string

func (Paths) Metadata

func (p Paths) Metadata() string

func (Paths) Rootfs

func (p Paths) Rootfs() string

func (Paths) RootfsHeader

func (p Paths) RootfsHeader() string

func (Paths) Snapfile

func (p Paths) Snapfile() string

func (Paths) StorageDir

func (p Paths) StorageDir() string

type Provider

type Provider string
const (
	GCPStorageProvider   Provider = "GCPBucket"
	AWSStorageProvider   Provider = "AWSBucket"
	LocalStorageProvider Provider = "Local"

	DefaultStorageProvider Provider = GCPStorageProvider

	// MemoryChunkSize must always be bigger or equal to the block size.
	MemoryChunkSize = 4 * 1024 * 1024 // 4 MB
)

func GetProviderType

func GetProviderType() Provider

GetProviderType returns the configured storage provider type from the STORAGE_PROVIDER environment variable, defaulting to GCPBucket.

type RetryConfig

type RetryConfig struct {
	MaxAttempts       int
	InitialBackoff    time.Duration
	MaxBackoff        time.Duration
	BackoffMultiplier float64
}

RetryConfig holds the configuration for retry logic

func DefaultRetryConfig

func DefaultRetryConfig() RetryConfig

DefaultRetryConfig returns the default retry configuration matching storage_google.go

type SandboxFiles

type SandboxFiles struct {
	CachePaths

	SandboxID string
	// contains filtered or unexported fields
}

func (*SandboxFiles) SandboxCacheRootfsLinkPath

func (s *SandboxFiles) SandboxCacheRootfsLinkPath(config Config) string

func (*SandboxFiles) SandboxCacheRootfsPath

func (s *SandboxFiles) SandboxCacheRootfsPath(config Config) string

func (*SandboxFiles) SandboxCgroupName

func (s *SandboxFiles) SandboxCgroupName() string

func (*SandboxFiles) SandboxFirecrackerSocketPath

func (s *SandboxFiles) SandboxFirecrackerSocketPath() string

func (*SandboxFiles) SandboxMetricsFifoPath

func (s *SandboxFiles) SandboxMetricsFifoPath() string

func (*SandboxFiles) SandboxUffdSocketPath

func (s *SandboxFiles) SandboxUffdSocketPath() string

type Seekable

type Seekable interface {
	SeekableReader
	SeekableWriter
	StreamingReader
}

type SeekableObjectType

type SeekableObjectType int
const (
	UnknownSeekableObjectType SeekableObjectType = iota
	MemfileObjectType
	RootFSObjectType
)

type SeekableReader

type SeekableReader interface {
	// Random slice access, off and buffer length must be aligned to block size
	ReadAt(ctx context.Context, buffer []byte, off int64) (int, error)
	Size(ctx context.Context) (int64, error)
}

type SeekableWriter

type SeekableWriter interface {
	// Store entire file
	StoreFile(ctx context.Context, path string) error
}

type StorageConfig

type StorageConfig struct {
	GetLocalBasePath func() string
	GetBucketName    func() string
	// contains filtered or unexported fields
}

StorageConfig holds the configuration for creating a storage provider. Both GetLocalBasePath and GetBucketName are evaluated lazily so that callers who set environment variables at runtime (e.g. via os.Setenv or t.Setenv in tests) see their overrides respected.

func (StorageConfig) WithLimiter

func (c StorageConfig) WithLimiter(limiter *limit.Limiter) StorageConfig

WithLimiter returns a copy of the config with the given limiter set.

func (StorageConfig) WithLocalUpload

func (c StorageConfig) WithLocalUpload(uploadBaseURL string, hmacKey []byte) StorageConfig

WithLocalUpload returns a copy of the config with the given local upload parameters set. These are only used when STORAGE_PROVIDER=Local to let the filesystem storage provider generate signed URLs for file uploads.

type StorageProvider

type StorageProvider interface {
	DeleteObjectsWithPrefix(ctx context.Context, prefix string) error
	UploadSignedURL(ctx context.Context, path string, ttl time.Duration) (string, error)
	OpenBlob(ctx context.Context, path string, objectType ObjectType) (Blob, error)
	OpenSeekable(ctx context.Context, path string, seekableObjectType SeekableObjectType) (Seekable, error)
	GetDetails() string
}

func GetStorageProvider

func GetStorageProvider(ctx context.Context, cfg StorageConfig) (StorageProvider, error)

func NewGCP

func NewGCP(ctx context.Context, bucketName string, limiter *limit.Limiter) (StorageProvider, error)

func WrapInNFSCache

func WrapInNFSCache(
	ctx context.Context,
	rootPath string,
	inner StorageProvider,
	flags *featureflags.Client,
) StorageProvider

type StreamingReader

type StreamingReader interface {
	OpenRangeReader(ctx context.Context, off, length int64) (io.ReadCloser, error)
}

StreamingReader supports progressive reads via a streaming range reader.

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL