db

package
v0.0.0-...-7d4eda5 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Aug 21, 2020 License: Apache-2.0 Imports: 43 Imported by: 0

Documentation

Index

Constants

View Source
const (
	// Blip default vals
	BlipDefaultBatchSize = uint64(200)
	BlipMinimumBatchSize = uint64(10) // Not in the replication spec - is this required?
)
View Source
const (
	MessageSetCheckpoint   = "setCheckpoint"
	MessageGetCheckpoint   = "getCheckpoint"
	MessageSubChanges      = "subChanges"
	MessageChanges         = "changes"
	MessageRev             = "rev"
	MessageNoRev           = "norev"
	MessageGetAttachment   = "getAttachment"
	MessageProposeChanges  = "proposeChanges"
	MessageProveAttachment = "proveAttachment"
)

Message types

View Source
const (

	// Common message properties
	BlipClient   = "client"
	BlipCompress = "compress"
	BlipProfile  = "Profile"

	// setCheckpoint message properties
	SetCheckpointRev         = "rev"
	SetCheckpointClient      = "client"
	SetCheckpointResponseRev = "rev"

	// getCheckpoint message properties
	GetCheckpointResponseRev = "rev"
	GetCheckpointClient      = "client"

	// subChanges message properties
	SubChangesActiveOnly = "activeOnly"
	SubChangesFilter     = "filter"
	SubChangesChannels   = "channels"
	SubChangesSince      = "since"
	SubChangesContinuous = "continuous"
	SubChangesBatch      = "batch"

	// rev message properties
	RevMessageId          = "id"
	RevMessageRev         = "rev"
	RevMessageDeleted     = "deleted"
	RevMessageSequence    = "sequence"
	RevMessageHistory     = "history"
	RevMessageNoConflicts = "noconflicts"
	RevMessageDeltaSrc    = "deltaSrc"

	// norev message properties
	NorevMessageId     = "id"
	NorevMessageRev    = "rev"
	NorevMessageSeq    = "seq"
	NorevMessageError  = "error"
	NorevMessageReason = "reason"

	// changes message properties
	ChangesMessageIgnoreNoConflicts = "ignoreNoConflicts"

	// changes response properties
	ChangesResponseMaxHistory = "maxHistory"
	ChangesResponseDeltas     = "deltas"

	// proposeChanges message properties
	ProposeChangesResponseDeltas = "deltas"

	// getAttachment message properties
	GetAttachmentDigest = "digest"

	// proveAttachment
	ProveAttachmentDigest = "digest"

	// Sync Gateway specific properties (used for testing)
	SGShowHandler = "sgShowHandler" // Used to request a response with sgHandler
	SGHandler     = "sgHandler"     // Used to show which handler processed the message
)

Message properties

View Source
const (
	DefaultCachePendingSeqMaxNum  = 10000            // Max number of waiting sequences
	DefaultCachePendingSeqMaxWait = 5 * time.Second  // Max time we'll wait for a pending sequence before sending to missed queue
	DefaultSkippedSeqMaxWait      = 60 * time.Minute // Max time we'll wait for an entry in the missing before purging
	QueryTombstoneBatch           = 250              // Max number of tombstones checked per query during Compact
)
View Source
const (
	WaiterClosed uint32 = iota
	WaiterHasChanges
	WaiterCheckTerminated
)
View Source
const (
	BackfillFlag_None backfillFlag = iota
	BackfillFlag_Pending
	BackfillFlag_Complete
)
View Source
const (
	DBOffline uint32 = iota
	DBStarting
	DBOnline
	DBStopping
	DBResyncing
)
View Source
const (
	DBCompactNotRunning uint32 = iota
	DBCompactRunning
)
View Source
const (
	DefaultRevsLimitNoConflicts             = 50
	DefaultRevsLimitConflicts               = 100
	DefaultPurgeInterval                    = 30 // Default metadata purge interval, in days.  Used if server's purge interval is unavailable
	DefaultSGReplicateEnabled               = true
	DefaultSGReplicateWebsocketPingInterval = time.Minute * 5
)
View Source
const (
	CompactIntervalMinDays = float32(0.04) // ~1 Hour in days
	CompactIntervalMaxDays = float32(60)   // 60 Days in days
)
View Source
const (
	DesignDocSyncGatewayPrefix      = "sync_gateway"
	DesignDocSyncHousekeepingPrefix = "sync_housekeeping"
	ViewPrincipals                  = "principals"
	ViewChannels                    = "channels"
	ViewAccess                      = "access"
	ViewAccessVbSeq                 = "access_vbseq"
	ViewRoleAccess                  = "role_access"
	ViewRoleAccessVbSeq             = "role_access_vbseq"
	ViewAllDocs                     = "all_docs"
	ViewImport                      = "import"
	ViewSessions                    = "sessions"
	ViewTombstones                  = "tombstones"
)
View Source
const (
	DocUnmarshalAll       = DocumentUnmarshalLevel(iota) // Unmarshals sync metadata and body
	DocUnmarshalSync                                     // Unmarshals all sync metadata
	DocUnmarshalNoHistory                                // Unmarshals sync metadata excluding history
	DocUnmarshalRev                                      // Unmarshals rev + CAS only
	DocUnmarshalCAS                                      // Unmarshals CAS (for import check) only
	DocUnmarshalNone                                     // No unmarshalling (skips import/upgrade check)
)
View Source
const (
	// RemovedRedactedDocument is returned by SG when a given document has been dropped out of a channel
	RemovedRedactedDocument = `{"` + BodyRemoved + `":true}`
	// DeletedDocument is returned by SG when a given document has been deleted
	DeletedDocument = `{"` + BodyDeleted + `":true}`
)
View Source
const (
	ImportFromFeed = ImportMode(iota) // Feed-based import.  Attempt to import once - cancels import on cas write failure of the imported doc.
	ImportOnDemand                    // On-demand import. Reattempt import on cas write failure of the imported doc until either the import succeeds, or existing doc is an SG write.
)
View Source
const (
	IdxFlagXattrOnly       = SGIndexFlags(1 << iota) // Index should only be created when running w/ xattrs=true
	IdxFlagIndexTombstones                           // When xattrs=true, index should be created with {“retain_deleted_xattr”:true} in order to index tombstones
)
View Source
const (
	QueryTypeAccess       = "access"
	QueryTypeRoleAccess   = "roleAccess"
	QueryTypeChannels     = "channels"
	QueryTypeChannelsStar = "channelsStar"
	QueryTypeSequences    = "sequences"
	QueryTypePrincipals   = "principals"
	QueryTypeSessions     = "sessions"
	QueryTypeTombstones   = "tombstones"
	QueryTypeResync       = "resync"
	QueryTypeAllDocs      = "allDocs"
)
View Source
const (
	QueryParamChannelName = "channelName"
	QueryParamStartSeq    = "startSeq"
	QueryParamEndSeq      = "endSeq"
	QueryParamUserName    = "userName"
	QueryParamOlderThan   = "olderThan"
	QueryParamInSequences = "inSequences"
	QueryParamStartKey    = "startkey"
	QueryParamEndKey      = "endkey"
	QueryParamLimit       = "limit"

	// Variables in the select clause can't be parameterized, require additional handling
	QuerySelectUserName = "$$selectUserName"
)

Query Parameters used as parameters in prepared statements. Note that these are hardcoded into the query definitions above, for improved query readability.

View Source
const (
	BodyDeleted     = "_deleted"
	BodyRev         = "_rev"
	BodyId          = "_id"
	BodyRevisions   = "_revisions"
	BodyAttachments = "_attachments"
	BodyPurged      = "_purged"
	BodyExpiry      = "_exp"
	BodyRemoved     = "_removed"
)
View Source
const (
	RevisionsStart = "start"
	RevisionsIds   = "ids"
)
View Source
const (
	// DefaultRevisionCacheSize is the number of recently-accessed doc revisions to cache in RAM
	DefaultRevisionCacheSize = 5000

	// DefaultRevisionCacheShardCount is the default number of shards to use for the revision cache
	DefaultRevisionCacheShardCount = 16
)
View Source
const (
	RevCacheIncludeBody  = true
	RevCacheOmitBody     = false
	RevCacheIncludeDelta = true
	RevCacheOmitDelta    = false
)
View Source
const (
	ReplicationStateStopped      = "stopped"
	ReplicationStateRunning      = "running"
	ReplicationStateReconnecting = "reconnecting"
	ReplicationStateResetting    = "resetting"
	ReplicationStateError        = "error"
)
View Source
const (
	ConfigErrorIDTooLong            = "Replication ID must be less than 160 characters"
	ConfigErrorUnknownFilter        = "Unknown replication filter; try sync_gateway/bychannel"
	ConfigErrorMissingQueryParams   = "Replication specifies sync_gateway/bychannel filter but is missing query_params"
	ConfigErrorMissingRemote        = "Replication remote must be specified"
	ConfigErrorMissingDirection     = "Replication direction must be specified"
	ConfigErrorDuplicateCredentials = "Auth credentials can be specified using username/password config properties or remote URL, but not both"
	ConfigErrorConfigBasedAdhoc     = "adhoc=true is invalid for replication in Sync Gateway configuration"
	ConfigErrorConfigBasedCancel    = "cancel=true is invalid for replication in Sync Gateway configuration"
	ConfigErrorInvalidDirectionFmt  = "Invalid replication direction %q, valid values are %s/%s/%s"
	ConfigErrorBadChannelsArray     = "Bad channels array in query_params for sync_gateway/bychannel filter"
)

Replication config validation error messages

View Source
const DesignDocFormat = "%s_%s" // Design doc prefix, view version
View Source
const DesignDocVersion = "2.1"

ViewVersion should be incremented every time any view definition changes. Currently both Sync Gateway design docs share the same view version, but this is subject to change if the update schedule diverges

View Source
const DocTypeLocal = "local"
View Source
const MaximumInlineBodySize = 250

When external revision storage is used, maximum body size (in bytes) to store inline. Non-winning bodies smaller than this size are more efficient to store inline.

View Source
const (
	MinimumChannelCacheMaxNumber = 100 // Minimum size for channel cache capacity
)
View Source
const (
	RepairRevTreeCycles = RepairJobType("RepairRevTreeCycles")
)
View Source
const (

	// N1ql-encoded wildcard expression matching the '_sync:' prefix used for all sync gateway's system documents.
	// Need to escape the underscore in '_sync' to prevent it being treated as a N1QL wildcard
	SyncDocWildcard = `\\_sync:%`
)
View Source
const (
	// 10 minute expiry for unused sequence docs.
	UnusedSequenceTTL = 10 * 60
)

Variables

View Source
var (
	DefaultChannelCacheMinLength       = 50               // Keep at least this many entries in cache
	DefaultChannelCacheMaxLength       = 500              // Don't put more than this many entries in cache
	DefaultChannelCacheAge             = 60 * time.Second // Keep entries at least this long
	DefaultChannelCacheMaxNumber       = 50000            // Default of 50k channel caches
	DefaultCompactHighWatermarkPercent = 80               // Default compaction high watermark (percent of MaxNumber)
	DefaultCompactLowWatermarkPercent  = 60               // Default compaction low watermark (percent of MaxNumber)
	DefaultChannelQueryLimit           = 5000             // Default channel query limit
)
View Source
var (
	DefaultDeltaSyncEnabled   = false
	DefaultDeltaSyncRevMaxAge = uint32(60 * 60 * 24) // 24 hours in seconds
)

Default values for delta sync

View Source
var DefaultCompactInterval = uint32(60 * 60 * 24) // Default compact interval in seconds = 1 Day
View Source
var DesignDocPreviousVersions = []string{"", "2.0"}

DesignDocPreviousVersions defines the set of versions included during removal of obsolete design docs. Must be updated whenever DesignDocVersion is incremented. Uses a hardcoded list instead of version comparison to simpify the processing (particularly since there aren't expected to be many view versions before moving to GSI).

View Source
var EnableStarChannelLog = true

Enable keeping a channel-log for the "*" channel (channel.UserStarChannel). The only time this channel is needed is if someone has access to "*" (e.g. admin-party) and tracks its changes feed.

View Source
var ErrClosedBLIPSender = errors.New("use of closed BLIP sender")
View Source
var ErrForbidden = base.HTTPErrorf(403, "forbidden")

ErrForbidden is returned when the user requests a document without a revision that they do not have access to. this is different from a client specifically requesting a revision they know about, which are treated as a _removal.

View Source
var MaxSequenceID = SequenceID{
	Seq: math.MaxUint64,
}
View Source
var MaxSequenceIncrFrequency = 1000 * time.Millisecond

MaxSequenceIncrFrequency is the maximum frequency we want to perform incr operations. Incr operations occurring more frequently that this value trigger an increase in batch size. Defined as var to simplify test usage

View Source
var QueryAccess = SGQuery{
	// contains filtered or unexported fields
}
View Source
var QueryAllDocs = SGQuery{
	// contains filtered or unexported fields
}

QueryAllDocs is using the star channel's index, which is indexed by sequence, then ordering the results by doc id. We currently don't have a performance-tuned use of AllDocs today - if needed, should create a custom index indexed by doc id. Note: QueryAllDocs function may appends additional filter and ordering of the form:

AND META(`bucket`).id >= '%s'
AND META(`bucket`).id <= '%s'
ORDER BY META(`bucket`).id
View Source
var QueryChannels = SGQuery{
	// contains filtered or unexported fields
}
View Source
var QueryPrincipals = SGQuery{
	// contains filtered or unexported fields
}
View Source
var QueryResync = SGQuery{
	// contains filtered or unexported fields
}

QueryResync and QueryImport both use IndexAllDocs. If these need to be revisited for performance reasons, they could be retooled to use covering indexes, where the id filtering is done at indexing time. Given that this code doesn't even do pagination currently, it's likely that this functionality should just be replaced by an ad-hoc DCP stream.

View Source
var QueryRoleAccess = SGQuery{
	// contains filtered or unexported fields
}
View Source
var QuerySequences = SGQuery{
	// contains filtered or unexported fields
}
View Source
var QuerySessions = SGQuery{
	// contains filtered or unexported fields
}
View Source
var QueryStarChannel = SGQuery{
	// contains filtered or unexported fields
}
View Source
var QueryTombstones = SGQuery{
	// contains filtered or unexported fields
}
View Source
var RunStateString = []string{
	DBOffline:   "Offline",
	DBStarting:  "Starting",
	DBOnline:    "Online",
	DBStopping:  "Stopping",
	DBResyncing: "Resyncing",
}
View Source
var SkippedSeqCleanViewBatch = 50 // Max number of sequences checked per query during CleanSkippedSequence.  Var to support testing
View Source
var ViewsAndGSIBucketInit base.TBPBucketInitFunc = func(ctx context.Context, b base.Bucket, tbp *base.TestBucketPool) error {
	gocbBucket, ok := base.AsGoCBBucket(b)
	if !ok {

		if base.TestUseXattrs() {
			return fmt.Errorf("xattrs not supported when using Walrus buckets")
		}

		tbp.Logf(ctx, "bucket not a gocb bucket... skipping GSI setup")
		return viewBucketReadier(ctx, b, tbp)
	}

	if base.TestsDisableGSI() {
		return nil
	}

	if empty, err := isIndexEmpty(gocbBucket, base.TestUseXattrs()); empty && err == nil {
		tbp.Logf(ctx, "indexes already created, and already empty - skipping")
		return nil
	} else {
		tbp.Logf(ctx, "indexes not empty (or doesn't exist) - %v %v", empty, err)
	}

	tbp.Logf(ctx, "dropping existing bucket indexes")
	if err := base.DropAllBucketIndexes(gocbBucket); err != nil {
		tbp.Logf(ctx, "Failed to drop bucket indexes: %v", err)
		return err
	}

	tbp.Logf(ctx, "creating SG bucket indexes")
	if err := InitializeIndexes(gocbBucket, base.TestUseXattrs(), 0); err != nil {
		return err
	}

	err := gocbBucket.CreatePrimaryIndex(base.PrimaryIndexName, nil)
	if err != nil {
		return err
	}

	return nil
}

ViewsAndGSIBucketInit is run synchronously only once per-bucket to do any initial setup. For non-integration Walrus buckets, this is run for each new Walrus bucket.

View Source
var ViewsAndGSIBucketReadier base.TBPBucketReadierFunc = func(ctx context.Context, b *base.CouchbaseBucketGoCB, tbp *base.TestBucketPool) error {

	if base.TestsDisableGSI() {
		tbp.Logf(ctx, "flushing bucket and readying views only")
		if err := base.FlushBucketEmptierFunc(ctx, b, tbp); err != nil {
			return err
		}

		return viewBucketReadier(ctx, b, tbp)
	}

	tbp.Logf(ctx, "emptying bucket via N1QL, readying views and indexes")
	if err := base.N1QLBucketEmptierFunc(ctx, b, tbp); err != nil {
		return err
	}
	if err := viewBucketReadier(ctx, b, tbp); err != nil {
		return err
	}

	tbp.Logf(ctx, "waiting for empty bucket indexes")

	if err := WaitForIndexEmpty(b, base.TestUseXattrs()); err != nil {
		tbp.Logf(ctx, "WaitForIndexEmpty returned an error: %v", err)
		return err
	}
	tbp.Logf(ctx, "bucket indexes empty")

	return nil
}

ViewsAndGSIBucketReadier empties the bucket, initializes Views, and waits until GSI indexes are empty. It is run asynchronously as soon as a test is finished with a bucket.

Functions

func AsSingleChannelCache

func AsSingleChannelCache(cacheValue interface{}) *singleChannelCacheImpl

Converts an RangeSafeCollection value to a singleChannelCacheImpl. On type conversion error, logs a warning and returns nil.

func AssertEqualBodies

func AssertEqualBodies(t *testing.T, expected, actual Body)

func AttachmentDigests

func AttachmentDigests(attachments AttachmentsMeta) []string

AttachmentDigests returns a list of attachment digests contained in the given AttachmentsMeta

func ChannelsFromQueryParams

func ChannelsFromQueryParams(queryParams interface{}) (channels []string, err error)

QueryParams retrieves the channels associated with the byChannels a replication filter from the generic queryParams interface{}. The Channels may be passed as a JSON array of strings directly, or embedded in a JSON object with the "channels" property and array value

func ConnectToBucket

func ConnectToBucket(spec base.BucketSpec) (bucket base.Bucket, err error)

Helper function to open a Couchbase connection and return a specific bucket.

func CreateRevIDWithBytes

func CreateRevIDWithBytes(generation int, parentRevID string, bodyBytes []byte) string

func DecodeAttachment

func DecodeAttachment(att interface{}) ([]byte, error)

func DesignDocSyncGateway

func DesignDocSyncGateway() string

func DesignDocSyncHousekeeping

func DesignDocSyncHousekeeping() string

func ErrorToOttoValue

func ErrorToOttoValue(runner *sgbucket.JSRunner, err error) otto.Value

Converts an error to an otto value, to support native functions returning errors.

func GenerateChanges

func GenerateChanges(cancelCtx context.Context, database *Database, inChannels base.Set, options ChangesOptions, docIDFilter []string, send func([]*ChangeEntry) error) (err error, forceClose bool)

Shell of the continuous changes feed -- calls out to a `send` function to deliver the change. This is called from BLIP connections as well as HTTP handlers, which is why this is not a method on `handler`.

func GenerateProofOfAttachment

func GenerateProofOfAttachment(attachmentData []byte) (nonce []byte, proof string)

GenerateProofOfAttachment returns a nonce and proof for an attachment body.

func GetStringArrayProperty

func GetStringArrayProperty(body map[string]interface{}, property string) ([]string, error)

func InitializeIndexes

func InitializeIndexes(bucket base.Bucket, useXattrs bool, numReplicas uint) error

Initializes Sync Gateway indexes for bucket. Creates required indexes if not found, then waits for index readiness.

func InitializeViews

func InitializeViews(bucket base.Bucket) error

func IsMissingDDocError

func IsMissingDDocError(err error) bool

Similar to IsKeyNotFoundError(), but for the specific error returned by GetDDoc/DeleteDDoc

func NewBackgroundTask

func NewBackgroundTask(taskName string, dbName string, task BackgroundTaskFunc, interval time.Duration,
	c chan bool) error

backgroundTask runs task at the specified time interval in its own goroutine until stopped or an error is thrown by the BackgroundTaskFunc

func NewChannelCacheForContext

func NewChannelCacheForContext(terminator chan bool, options ChannelCacheOptions, context *DatabaseContext) (*channelCacheImpl, error)

func NewImportListener

func NewImportListener() *importListener

func NewNoRevMessage

func NewNoRevMessage() *noRevMessage

func NewReplicationCheckpoint

func NewReplicationCheckpoint(revID string, body Body) *replicationCheckpoint

NewReplicationCheckpoint converts a revID and checkpoint body into a replicationCheckpoint

func NewSGBlipContext

func NewSGBlipContext(ctx context.Context, id string) (bc *blip.Context)

NewSGBlipContext returns a go-blip context with the given ID, initialized for use in Sync Gateway.

func NewSGReplicateManager

func NewSGReplicateManager(dbContext *DatabaseContext, cfg *base.CfgSG) (*sgReplicateManager, error)

func ParseIntSequenceComponent

func ParseIntSequenceComponent(component string, allowEmpty bool) (uint64, error)

func ParseRevID

func ParseRevID(revid string) (int, string)

Splits a revision ID into generation number and hex digest.

func ParseRevisions

func ParseRevisions(body Body) []string

Parses a CouchDB _rev or _revisions property into a list of revision IDs

func ProveAttachment

func ProveAttachment(attachmentData, nonce []byte) (proof string)

ProveAttachment returns the proof for an attachment body and nonce pair.

func RealSpecialDocID

func RealSpecialDocID(doctype string, docid string) string

func RepairJobRevTreeCycles

func RepairJobRevTreeCycles(docId string, originalCBDoc []byte) (transformedCBDoc []byte, transformed bool, err error)

Repairs rev tree cycles (see SG issue #2847)

func ResultsEmpty

func ResultsEmpty(results gocb.QueryResults) (resultsEmpty bool)

Count how many rows are in gocb.QueryResults

func Sha1DigestKey

func Sha1DigestKey(data []byte) string

func VacuumAttachments

func VacuumAttachments(bucket base.Bucket) (int, error)

Deletes all orphaned CouchDB attachments not used by any revisions.

func ValidateDatabaseName

func ValidateDatabaseName(dbName string) error

func WaitForIndexEmpty

func WaitForIndexEmpty(bucket *base.CouchbaseBucketGoCB, useXattrs bool) error

Workaround SG #3570 by doing a polling loop until the star channel query returns 0 results. Uses the star channel index as a proxy to indicate that _all_ indexes are empty (which might not be true)

func WaitForUserWaiterChange

func WaitForUserWaiterChange(userWaiter *ChangeWaiter) bool

func WaitForViews

func WaitForViews(bucket base.Bucket) error

Issue a stale=false queries against critical views to guarantee indexing is complete and views are ready

Types

type APIEndpoints

type APIEndpoints struct {

	// This setting is only needed for testing purposes.  In the Couchbase Lite unit tests that run in "integration mode"
	// against a running Sync Gateway, the tests need to be able to flush the data in between tests to start with a clean DB.
	EnableCouchbaseBucketFlush bool `json:"enable_couchbase_bucket_flush,omitempty"` // Whether Couchbase buckets can be flushed via Admin REST API
}

type ActivePullReplicator

type ActivePullReplicator struct {
	// contains filtered or unexported fields
}

ActivePullReplicator is a unidirectional pull active replicator.

func NewPullReplicator

func NewPullReplicator(config *ActiveReplicatorConfig) *ActivePullReplicator

func (*ActivePullReplicator) CheckpointID

func (apr *ActivePullReplicator) CheckpointID() string

CheckpointID returns a unique ID to be used for the checkpoint client (which is used as part of the checkpoint Doc ID on the recipient)

func (*ActivePullReplicator) Complete

func (apr *ActivePullReplicator) Complete()

Complete gracefully shuts down a replication, waiting for all in-flight revisions to be processed before stopping the replication

func (ActivePullReplicator) GetStats

func (a ActivePullReplicator) GetStats() *BlipSyncStats

func (*ActivePullReplicator) Start

func (apr *ActivePullReplicator) Start() error

func (ActivePullReplicator) Stop

func (a ActivePullReplicator) Stop() error

Stop runs _disconnect and _stop on the replicator, and sets the Stopped replication state.

type ActivePushReplicator

type ActivePushReplicator struct {
	// contains filtered or unexported fields
}

ActivePushReplicator is a unidirectional push active replicator.

func NewPushReplicator

func NewPushReplicator(config *ActiveReplicatorConfig) *ActivePushReplicator

func (*ActivePushReplicator) CheckpointID

func (apr *ActivePushReplicator) CheckpointID() string

CheckpointID returns a unique ID to be used for the checkpoint client (which is used as part of the checkpoint Doc ID on the recipient)

func (*ActivePushReplicator) Complete

func (apr *ActivePushReplicator) Complete()

Complete gracefully shuts down a replication, waiting for all in-flight revisions to be processed before stopping the replication

func (ActivePushReplicator) GetStats

func (a ActivePushReplicator) GetStats() *BlipSyncStats

func (*ActivePushReplicator) Start

func (apr *ActivePushReplicator) Start() error

func (ActivePushReplicator) Stop

func (a ActivePushReplicator) Stop() error

Stop runs _disconnect and _stop on the replicator, and sets the Stopped replication state.

type ActiveReplicator

type ActiveReplicator struct {
	ID   string
	Push *ActivePushReplicator
	Pull *ActivePullReplicator
	// contains filtered or unexported fields
}

ActiveReplicator is a wrapper to encapsulate separate push and pull active replicators.

func NewActiveReplicator

func NewActiveReplicator(config *ActiveReplicatorConfig) *ActiveReplicator

NewActiveReplicator returns a bidirectional active replicator for the given config.

func (*ActiveReplicator) GetStatus

func (ar *ActiveReplicator) GetStatus() *ReplicationStatus

func (*ActiveReplicator) Reset

func (ar *ActiveReplicator) Reset() error

func (*ActiveReplicator) Start

func (ar *ActiveReplicator) Start() error

func (*ActiveReplicator) State

func (ar *ActiveReplicator) State() (state string, errorMessage string)

func (*ActiveReplicator) Stop

func (ar *ActiveReplicator) Stop() error

type ActiveReplicatorConfig

type ActiveReplicatorConfig struct {
	ID string
	// Filter is a predetermined filter name (e.g. sync_gateway/bychannel)
	Filter string
	// FilterChannels are a set of channels to be used by the sync_gateway/bychannel filter.
	FilterChannels []string
	// DocIDs limits the changes to only those doc IDs specified.
	DocIDs []string
	// ActiveOnly when true prevents changes being sent for tombstones on the initial replication.
	ActiveOnly bool
	// ChangesBatchSize controls how many revisions may be batched per changes message.
	ChangesBatchSize uint16
	// CheckpointInterval triggers a checkpoint to be set this often.
	CheckpointInterval time.Duration
	// CheckpointRevCount controls how many revs to store before attempting to save a checkpoint.
	Direction ActiveReplicatorDirection
	// Continuous specifies whether the replication should be continuous or one-shot.
	Continuous bool
	// RemoteDBURL represents the full Sync Gateway URL, including database path, and basic auth credentials of the target.
	RemoteDBURL *url.URL
	// PurgeOnRemoval will purge the document on the active side if we pull a removal from the remote.
	PurgeOnRemoval bool
	// ActiveDB is a reference to the active database context.
	ActiveDB *Database
	// WebsocketPingInterval is the time between websocket heartbeats sent by the active replicator.
	WebsocketPingInterval time.Duration
	// Conflict resolver
	ConflictResolverFunc ConflictResolverFunc
	// SGR1CheckpointID can be used as a fallback when SGR2 checkpoints can't be found.
	SGR1CheckpointID string
	// InitialReconnectInterval is the initial time to wait for exponential backoff reconnects.
	InitialReconnectInterval time.Duration
	// MaxReconnectInterval is the maximum amount of time to wait between exponential backoff reconnect attempts.
	MaxReconnectInterval time.Duration
	// TotalReconnectTimeout, if non-zero, is the amount of time to wait before giving up trying to reconnect.
	TotalReconnectTimeout time.Duration

	// Delta sync enabled
	DeltasEnabled bool

	// InsecureSkipVerify determines whether the TLS certificate verification should be
	// disabled during replication. TLS certificate verification is enabled by default.
	InsecureSkipVerify bool

	// Map corresponding to db.replications.[replicationID] in Sync Gateway's expvars.  Populated with
	// replication stats in blip_sync_stats.go
	ReplicationStatsMap *expvar.Map
	// contains filtered or unexported fields
}

ActiveReplicatorConfig controls the behaviour of the active replicator. TODO: This might be replaced with ReplicatorConfig in the future.

func (ActiveReplicatorConfig) CheckpointHash

func (arc ActiveReplicatorConfig) CheckpointHash() (string, error)

CheckpointHash returns a deterministic hash of the given config to be used as a checkpoint ID. TODO: Might be a way of caching this value? But need to be sure no config values wil change without clearing the cached hash.

type ActiveReplicatorDirection

type ActiveReplicatorDirection string
const (
	ActiveReplicatorTypePushAndPull ActiveReplicatorDirection = "pushAndPull"
	ActiveReplicatorTypePush        ActiveReplicatorDirection = "push"
	ActiveReplicatorTypePull        ActiveReplicatorDirection = "pull"
)

func (ActiveReplicatorDirection) IsValid

func (d ActiveReplicatorDirection) IsValid() bool

type AllDocsIndexQueryRow

type AllDocsIndexQueryRow struct {
	Id       string
	RevID    string              `json:"r"`
	Sequence uint64              `json:"s"`
	Channels channels.ChannelMap `json:"c"`
}

type AllDocsViewQueryRow

type AllDocsViewQueryRow struct {
	Key   string
	Value struct {
		RevID    string   `json:"r"`
		Sequence uint64   `json:"s"`
		Channels []string `json:"c"`
	}
}

type AsyncEvent

type AsyncEvent struct {
}

Currently the AsyncEvent type only manages the Synchronous() check. Future enhancements around async processing would leverage this type.

func (AsyncEvent) Synchronous

func (ae AsyncEvent) Synchronous() bool

type AsyncEventHandler

type AsyncEventHandler struct{}

type AttachmentCallback

type AttachmentCallback func(name string, digest string, knownData []byte, meta map[string]interface{}) ([]byte, error)

type AttachmentData

type AttachmentData map[AttachmentKey][]byte

type AttachmentKey

type AttachmentKey string

Key for retrieving an attachment from Couchbase.

type AttachmentMap

type AttachmentMap map[string]*DocAttachment

A map of keys -> DocAttachments.

type AttachmentsMeta

type AttachmentsMeta map[string]interface{} // AttachmentsMeta metadata as included in sync metadata

func GetBodyAttachments

func GetBodyAttachments(body Body) AttachmentsMeta

////// HELPERS: Returns _attachments property from body, when found. Checks for either map[string]interface{} (unmarshalled with body), or AttachmentsMeta (written by body by SG)

func (AttachmentsMeta) ShallowCopy

func (attachments AttachmentsMeta) ShallowCopy() AttachmentsMeta

type BLIPMessageSender

type BLIPMessageSender interface {
	Send(s *blip.Sender) (err error)
}

type BackgroundTaskError

type BackgroundTaskError struct {
	TaskName string
	Interval time.Duration
}

func (*BackgroundTaskError) Error

func (err *BackgroundTaskError) Error() string

type BackgroundTaskFunc

type BackgroundTaskFunc func(ctx context.Context) error

type BlipSyncContext

type BlipSyncContext struct {
	// contains filtered or unexported fields
}

BlipSyncContext represents one BLIP connection (socket) opened by a client. This connection remains open until the client closes it, and can receive any number of requests.

func NewBlipSyncContext

func NewBlipSyncContext(bc *blip.Context, db *Database, contextID string, replicationStats *BlipSyncStats) *BlipSyncContext

func (*BlipSyncContext) Close

func (bsc *BlipSyncContext) Close()

func (*BlipSyncContext) NotFoundHandler

func (bsc *BlipSyncContext) NotFoundHandler(rq *blip.Message)

NotFoundHandler is used for unknown requests

type BlipSyncStats

type BlipSyncStats struct {
	DeltaEnabledPullReplicationCount *expvar.Int // global
	HandleRevCount                   *expvar.Int // handleRev
	HandleRevErrorCount              *expvar.Int
	HandleRevDeltaRecvCount          *expvar.Int
	HandleRevBytes                   *expvar.Int
	HandleRevProcessingTime          *expvar.Int
	HandleRevDocsPurgedCount         *expvar.Int
	SendRevCount                     *expvar.Int // sendRev
	SendRevDeltaRequestedCount       *expvar.Int
	SendRevDeltaSentCount            *expvar.Int
	SendRevBytes                     *expvar.Int
	SendRevErrorTotal                *expvar.Int
	SendRevErrorConflictCount        *expvar.Int
	SendRevErrorRejectedCount        *expvar.Int
	SendRevErrorOtherCount           *expvar.Int
	HandleChangesCount               *expvar.Int // handleChanges/handleProposeChanges
	HandleChangesTime                *expvar.Int
	HandleChangesDeltaRequestedCount *expvar.Int
	HandleGetAttachment              *expvar.Int // handleGetAttachment
	HandleGetAttachmentBytes         *expvar.Int
	GetAttachment                    *expvar.Int // getAttachment
	GetAttachmentBytes               *expvar.Int
	HandleChangesResponseCount       *expvar.Int // handleChangesResponse
	HandleChangesResponseTime        *expvar.Int
	HandleChangesSendRevCount        *expvar.Int //  - (duplicates SendRevCount, included for support of CBL expvars)
	HandleChangesSendRevLatency      *expvar.Int
	HandleChangesSendRevTime         *expvar.Int
	SubChangesContinuousActive       *expvar.Int // subChanges
	SubChangesContinuousTotal        *expvar.Int
	SubChangesOneShotActive          *expvar.Int
	SubChangesOneShotTotal           *expvar.Int
	SendChangesCount                 *expvar.Int // sendChagnes
	NumConnectAttempts               *expvar.Int
	NumReconnectsAborted             *expvar.Int
}

func BlipSyncStatsForCBL

func BlipSyncStatsForCBL(dbStats *DatabaseStats) *BlipSyncStats

Stats mappings Create BlipSyncStats mapped to the corresponding CBL replication stats from DatabaseStats

func BlipSyncStatsForSGRPull

func BlipSyncStatsForSGRPull(statsMap *expvar.Map) *BlipSyncStats

func BlipSyncStatsForSGRPush

func BlipSyncStatsForSGRPush(statsMap *expvar.Map) *BlipSyncStats

func NewBlipSyncStats

func NewBlipSyncStats() *BlipSyncStats

type Body

type Body map[string]interface{}

The body of a CouchDB document/revision as decoded from JSON.

func DefaultConflictResolver

func DefaultConflictResolver(conflict Conflict) (result Body, err error)

DefaultConflictResolver uses the same logic as revTree.WinningRevision: the revision whose (!deleted, generation, hash) tuple compares the highest. Returns error to satisfy ConflictResolverFunc signature

func LocalWinsConflictResolver

func LocalWinsConflictResolver(conflict Conflict) (winner Body, err error)

LocalWinsConflictResolver returns the local document as winner

func RemoteWinsConflictResolver

func RemoteWinsConflictResolver(conflict Conflict) (winner Body, err error)

RemoteWinsConflictResolver returns the local document as-is

func (Body) Copy

func (body Body) Copy(copyType BodyCopyType) Body

func (Body) DeepCopy

func (body Body) DeepCopy() Body

func (Body) ExtractDeleted

func (body Body) ExtractDeleted() bool

func (Body) ExtractExpiry

func (body Body) ExtractExpiry() (uint32, error)

Returns the expiry as uint32 (using getExpiry), and removes the _exp property from the body

func (Body) ExtractRev

func (body Body) ExtractRev() string

func (Body) FixJSONNumbers

func (body Body) FixJSONNumbers()

Version of FixJSONNumbers (see base/util.go) that operates on a Body

func (Body) ShallowCopy

func (body Body) ShallowCopy() Body

func (*Body) Unmarshal

func (b *Body) Unmarshal(data []byte) error

type BodyCopyType

type BodyCopyType int
const (
	BodyDeepCopy    BodyCopyType = iota // Performs a deep copy (json marshal/unmarshal)
	BodyShallowCopy                     // Performs a shallow copy (copies top level properties, doesn't iterate into nested properties)
	BodyNoCopy                          // Doesn't copy - callers must not mutate the response
)

type BypassRevisionCache

type BypassRevisionCache struct {
	// contains filtered or unexported fields
}

BypassRevisionCache is an implementation of the RevisionCache interface that does not perform any caching. For any Get operation, it will always immediately fetch the requested revision from the backing store.

func NewBypassRevisionCache

func NewBypassRevisionCache(backingStore RevisionCacheBackingStore, bypassStat *expvar.Int) *BypassRevisionCache

func (*BypassRevisionCache) Get

func (rc *BypassRevisionCache) Get(docID, revID string, includeBody bool, includeDelta bool) (docRev DocumentRevision, err error)

Get fetches the revision for the given docID and revID immediately from the bucket.

func (*BypassRevisionCache) GetActive

func (rc *BypassRevisionCache) GetActive(docID string, includeBody bool) (docRev DocumentRevision, err error)

GetActive fetches the active revision for the given docID immediately from the bucket.

func (*BypassRevisionCache) Peek

func (rc *BypassRevisionCache) Peek(docID, revID string) (docRev DocumentRevision, found bool)

Peek is a no-op for a BypassRevisionCache, and always returns a false 'found' value.

func (*BypassRevisionCache) Put

func (rc *BypassRevisionCache) Put(docRev DocumentRevision)

Put is a no-op for a BypassRevisionCache

func (*BypassRevisionCache) UpdateDelta

func (rc *BypassRevisionCache) UpdateDelta(docID, revID string, toDelta RevisionDelta)

UpdateDelta is a no-op for a BypassRevisionCache

type CacheOptions

type CacheOptions struct {
	ChannelCacheOptions
	CachePendingSeqMaxWait time.Duration // Max wait for pending sequence before skipping
	CachePendingSeqMaxNum  int           // Max number of pending sequences before skipping
	CacheSkippedSeqMaxWait time.Duration // Max wait for skipped sequence before abandoning
}

func DefaultCacheOptions

func DefaultCacheOptions() CacheOptions

type ChangeEntry

type ChangeEntry struct {
	Seq     SequenceID      `json:"seq"`
	ID      string          `json:"id"`
	Deleted bool            `json:"deleted,omitempty"`
	Removed base.Set        `json:"removed,omitempty"`
	Doc     json.RawMessage `json:"doc,omitempty"`
	Changes []ChangeRev     `json:"changes"`
	Err     error           `json:"err,omitempty"` // Used to notify feed consumer of errors
	// contains filtered or unexported fields
}

A changes entry; Database.GetChanges returns an array of these. Marshals into the standard CouchDB _changes format.

func (*ChangeEntry) SetBranched

func (ce *ChangeEntry) SetBranched(isBranched bool)

func (*ChangeEntry) String

func (ce *ChangeEntry) String() string

type ChangeRev

type ChangeRev map[string]string // Key is always "rev", value is rev ID

type ChangeWaiter

type ChangeWaiter struct {
	// contains filtered or unexported fields
}

Helper for waiting on a changeListener. Every call to wait() will wait for the listener's counter to increment from the value at the last call.

func (*ChangeWaiter) CurrentUserCount

func (waiter *ChangeWaiter) CurrentUserCount() uint64

Returns the current counter value for the waiter's user (and roles). If this value changes, it means the user or roles have been updated.

func (*ChangeWaiter) GetUserKeys

func (waiter *ChangeWaiter) GetUserKeys() (result []string)

Returns the set of user keys for this ChangeWaiter

func (*ChangeWaiter) RefreshUserCount

func (waiter *ChangeWaiter) RefreshUserCount() bool

Refreshes the last user count from the listener (without Wait being triggered). Returns true if the count has changed

func (*ChangeWaiter) RefreshUserKeys

func (waiter *ChangeWaiter) RefreshUserKeys(user auth.User)

Refresh user keys refreshes the waiter's userKeys (users and roles). Required when the user associated with a waiter has roles, and the user doc is updated. Does NOT add the keys to waiter.keys - UpdateChannels must be invoked if that's required.

func (*ChangeWaiter) UpdateChannels

func (waiter *ChangeWaiter) UpdateChannels(chans channels.TimedSet)

Updates the set of channel keys in the ChangeWaiter (maintains the existing set of user keys)

func (*ChangeWaiter) Wait

func (waiter *ChangeWaiter) Wait() uint32

Waits for the changeListener's counter to change from the last time Wait() was called.

type ChangesOptions

type ChangesOptions struct {
	Since       SequenceID // sequence # to start _after_
	Limit       int        // Max number of changes to return, if nonzero
	Conflicts   bool       // Show all conflicting revision IDs, not just winning one?
	IncludeDocs bool       // Include doc body of each change?
	Wait        bool       // Wait for results, instead of immediately returning empty result?
	Continuous  bool       // Run continuously until terminated?
	Terminator  chan bool  // Caller can close this channel to terminate the feed
	HeartbeatMs uint64     // How often to send a heartbeat to the client
	TimeoutMs   uint64     // After this amount of time, close the longpoll connection
	ActiveOnly  bool       // If true, only return information on non-deleted, non-removed revisions

	Ctx context.Context // Used for adding context to logs
	// contains filtered or unexported fields
}

Options for changes-feeds. ChangesOptions must not contain any mutable pointer references, as changes processing currently assumes a deep copy when doing chanOpts := changesOptions.

func (ChangesOptions) String

func (options ChangesOptions) String() string

type ChangesSendErr

type ChangesSendErr struct {
	// contains filtered or unexported fields
}

type ChannelCache

type ChannelCache interface {

	// Initializes the cache high sequence value
	Init(initialSequence uint64)

	// Adds an entry to the cache, returns set of channels it was added to
	AddToCache(change *LogEntry) []string

	// Notifies the cache of a principal update.  Updates the cache's high sequence
	AddPrincipal(change *LogEntry)

	// Remove purges the given doc IDs from all channel caches and returns the number of items removed.
	Remove(docIDs []string, startTime time.Time) (count int)

	// Returns set of changes for a given channel, within the bounds specified in options
	GetChanges(channelName string, options ChangesOptions) ([]*LogEntry, error)

	// Returns the set of all cached data for a given channel (intended for diagnostic usage)
	GetCachedChanges(channelName string) []*LogEntry

	// Clear reinitializes the cache to an empty state
	Clear()

	// Size of the the largest individual channel cache, invoked for stats reporting
	//// TODO: let the cache manage its own stats internally (maybe take an updateStats call)
	MaxCacheSize() int

	// Returns the highest cached sequence, used for changes synchronization
	GetHighCacheSequence() uint64
	// contains filtered or unexported methods
}

type ChannelCacheOptions

type ChannelCacheOptions struct {
	ChannelCacheMinLength       int           // Keep at least this many entries in each per-channel cache
	ChannelCacheMaxLength       int           // Don't put more than this many entries in each per-channel cache
	ChannelCacheAge             time.Duration // Keep entries at least this long
	MaxNumChannels              int           // Maximum number of per-channel caches which will exist at any one point
	CompactHighWatermarkPercent int           // Compact HWM (as percent of MaxNumChannels)
	CompactLowWatermarkPercent  int           // Compact LWM (as percent of MaxNumChannels)
	ChannelQueryLimit           int           // Query limit
}

type ChannelQueryHandler

type ChannelQueryHandler interface {
	// contains filtered or unexported methods
}

ChannelQueryHandler interface is implemented by databaseContext.

type Checkpointer

type Checkpointer struct {
	// contains filtered or unexported fields
}

Checkpointer implements replicator checkpointing, by keeping two lists of sequences. Those which we expect to be processing revs for (either push or pull), and a map for those which we have done so on. Periodically (based on a time interval), these two lists are used to calculate the highest sequence number which we've not had a gap for yet, and send a SetCheckpoint message for this sequence.

func NewCheckpointer

func NewCheckpointer(ctx context.Context, clientID string, configHash string, blipSender *blip.Sender, replicatorConfig *ActiveReplicatorConfig) *Checkpointer

func (*Checkpointer) AddAlreadyKnownSeq

func (c *Checkpointer) AddAlreadyKnownSeq(seq ...string)

func (*Checkpointer) AddExpectedSeqIDAndRevs

func (c *Checkpointer) AddExpectedSeqIDAndRevs(seqs map[IDAndRev]string)

func (*Checkpointer) AddExpectedSeqs

func (c *Checkpointer) AddExpectedSeqs(seqs ...string)

func (*Checkpointer) AddProcessedSeq

func (c *Checkpointer) AddProcessedSeq(seq string)

func (*Checkpointer) AddProcessedSeqIDAndRev

func (c *Checkpointer) AddProcessedSeqIDAndRev(seq string, idAndRev IDAndRev)

func (*Checkpointer) CheckpointNow

func (c *Checkpointer) CheckpointNow()

CheckpointNow forces the checkpointer to send a checkpoint, and blocks until it has finished.

func (*Checkpointer) Start

func (c *Checkpointer) Start()

func (*Checkpointer) Stats

func (c *Checkpointer) Stats() CheckpointerStats

Stats returns a copy of the checkpointer stats. Intended for test use - non-test usage may have performance implications associated with locking

type CheckpointerStats

type CheckpointerStats struct {
	ExpectedSequenceCount              int64
	ProcessedSequenceCount             int64
	AlreadyKnownSequenceCount          int64
	SetCheckpointCount                 int64
	GetCheckpointHitCount              int64
	GetCheckpointMissCount             int64
	GetCheckpointSGR1FallbackHitCount  int64
	GetCheckpointSGR1FallbackMissCount int64
}

type ClusterUpdateFunc

type ClusterUpdateFunc func(cluster *SGRCluster) (cancel bool, err error)

ClusterUpdateFunc is callback signature used when updating the cluster configuration

type Conflict

type Conflict struct {
	LocalDocument  Body `json:"LocalDocument"`
	RemoteDocument Body `json:"RemoteDocument"`
}

Conflict is the input to all conflict resolvers. LocalDocument and RemoteDocument are expected to be document bodies with metadata injected into the body following the same approach used for doc and oldDoc in the Sync Function

type ConflictResolutionType

type ConflictResolutionType string

ConflictResolutionType is used to identify the Body returned by a conflict resolution function as local, remote, or merge

const (
	ConflictResolutionLocal  ConflictResolutionType = "local"
	ConflictResolutionRemote ConflictResolutionType = "remote"
	ConflictResolutionMerge  ConflictResolutionType = "merge"
)

type ConflictResolver

type ConflictResolver struct {
	// contains filtered or unexported fields
}

func NewConflictResolver

func NewConflictResolver(crf ConflictResolverFunc, statsContainer *expvar.Map) *ConflictResolver

func (*ConflictResolver) Resolve

func (c *ConflictResolver) Resolve(conflict Conflict) (winner Body, resolutionType ConflictResolutionType, err error)

Wrapper for ConflictResolverFunc that evaluates whether conflict resolution resulted in localWins, remoteWins, or merge

type ConflictResolverFunc

type ConflictResolverFunc func(conflict Conflict) (winner Body, err error)

Definition of the ConflictResolverFunc API. Winner may be one of conflict.LocalDocument or conflict.RemoteDocument, or a new Body based on a merge of the two.

  • In the merge case, winner[revid] must be empty.
  • If an nil Body is returned, the conflict should be resolved as a deletion/tombstone.

func NewConflictResolverFunc

func NewConflictResolverFunc(resolverType ConflictResolverType, customResolverSource string) (ConflictResolverFunc, error)

func NewCustomConflictResolver

func NewCustomConflictResolver(source string) (ConflictResolverFunc, error)

NewCustomConflictResolver returns a ConflictResolverFunc that executes the javascript conflict resolver specified by source

type ConflictResolverJSServer

type ConflictResolverJSServer struct {
	*sgbucket.JSServer
}

ConflictResolverJSServer manages the compiled javascript function runner instance

func NewConflictResolverJSServer

func NewConflictResolverJSServer(fnSource string) *ConflictResolverJSServer

func (*ConflictResolverJSServer) EvaluateFunction

func (i *ConflictResolverJSServer) EvaluateFunction(conflict Conflict) (Body, error)

EvaluateFunction executes the conflict resolver with the provided conflict and returns the result.

type ConflictResolverStats

type ConflictResolverStats struct {
	ConflictResultMergeCount  *expvar.Int
	ConflictResultLocalCount  *expvar.Int
	ConflictResultRemoteCount *expvar.Int
}

func DefaultConflictResolverStats

func DefaultConflictResolverStats() *ConflictResolverStats

func NewConflictResolverStats

func NewConflictResolverStats(container *expvar.Map) *ConflictResolverStats

NewConflictResolverStats initializes the replications stats inside the provided container, and returns a ConflictResolverStats to manage interaction with those stats. If the container is not specified, expvar stats will not be published.

type ConflictResolverType

type ConflictResolverType string
const (
	ConflictResolverLocalWins  ConflictResolverType = "localWins"
	ConflictResolverRemoteWins ConflictResolverType = "remoteWins"
	ConflictResolverDefault    ConflictResolverType = "default"
	ConflictResolverCustom     ConflictResolverType = "custom"
)

func (ConflictResolverType) IsValid

func (d ConflictResolverType) IsValid() bool

type DBOnlineCallback

type DBOnlineCallback func(dbContext *DatabaseContext)

Function type for something that calls NewDatabaseContext and wants a callback when the DB is detected to come back online. A rest.ServerContext package cannot be passed since it would introduce a circular dependency

type DBStateChangeEvent

type DBStateChangeEvent struct {
	AsyncEvent
	Doc Body
}

DBStateChangeEvent is raised when a DB goes online or offline. Event has name of DB that is firing event, the admin interface address for interacting with the db. The new state, the reason for the state change, the local system time of the change

func (*DBStateChangeEvent) EventType

func (dsce *DBStateChangeEvent) EventType() EventType

func (*DBStateChangeEvent) String

func (dsce *DBStateChangeEvent) String() string

type Database

type Database struct {
	*DatabaseContext

	Ctx context.Context
	// contains filtered or unexported fields
}

Represents a simulated CouchDB database. A new instance is created for each HTTP request, so this struct does not have to be thread-safe.

func CreateDatabase

func CreateDatabase(context *DatabaseContext) (*Database, error)

func GetDatabase

func GetDatabase(context *DatabaseContext, user auth.User) (*Database, error)

Makes a Database object given its name and bucket.

func (*Database) AddDocInstanceToChangeEntry

func (db *Database) AddDocInstanceToChangeEntry(entry *ChangeEntry, doc *Document, options ChangesOptions)

Adds a document body and/or its conflicts to a ChangeEntry

func (*Database) AddDocToChangeEntry

func (db *Database) AddDocToChangeEntry(entry *ChangeEntry, options ChangesOptions)

func (*Database) AddDocToChangeEntryUsingRevCache

func (db *Database) AddDocToChangeEntryUsingRevCache(entry *ChangeEntry, revID string) (err error)

func (*Database) CheckProposedRev

func (db *Database) CheckProposedRev(docid string, revid string, parentRevID string) ProposedRevStatus

Given a docID/revID to be pushed by a client, check whether it can be added _without conflict_. This is used by the BLIP replication code in "allow_conflicts=false" mode.

func (*Database) Compact

func (db *Database) Compact() (int, error)

Trigger tombstone compaction from view and/or GSI indexes. Several Sync Gateway indexes server tombstones (deleted documents with an xattr). There currently isn't a mechanism for server to remove these docs from the index when the tombstone is purged by the server during metadata purge, because metadata purge doesn't trigger a DCP event. When compact is run, Sync Gateway initiates a normal delete operation for the document and xattr (a Sync Gateway purge). This triggers removal of the document from the index. In the event that the document has already been purged by server, we need to recreate and delete the document to accomplish the same result.

func (*Database) DeleteDesignDoc

func (db *Database) DeleteDesignDoc(ddocName string) (err error)

func (*Database) DeleteDoc

func (db *Database) DeleteDoc(docid string, revid string) (string, error)

Deletes a document, by adding a new revision whose _deleted property is true.

func (*Database) DeleteSpecial

func (db *Database) DeleteSpecial(doctype string, docid string, revid string) error

func (*Database) DocIDChangesFeed

func (db *Database) DocIDChangesFeed(userChannels base.Set, explicitDocIds []string, options ChangesOptions) (<-chan *ChangeEntry, error)

Generate the changes for a specific list of doc ID's, only documents accessible to the user will generate results. Only supports non-continuous changes, closes buffered channel before returning.

func (*Database) ForEachDocID

func (db *Database) ForEachDocID(callback ForEachDocIDFunc, resultsOpts ForEachDocIDOptions) error

Iterates over all documents in the database, calling the callback function on each

func (*Database) ForEachStubAttachment

func (db *Database) ForEachStubAttachment(body Body, minRevpos int, callback AttachmentCallback) error

Given a document body, invokes the callback once for each attachment that doesn't include its data. The callback is told whether the attachment body is known to the database, according to its digest. If the attachment isn't known, the callback can return data for it, which will be added to the metadata as a "data" property.

func (*Database) Get1xBody

func (db *Database) Get1xBody(docid string) (Body, error)

Returns the body of the current revision of a document

func (*Database) Get1xRevAndChannels

func (db *Database) Get1xRevAndChannels(docID string, revID string, listRevisions bool) (bodyBytes []byte, channels channels.ChannelMap, access UserAccessMap, roleAccess UserAccessMap, flags uint8, sequence uint64, gotRevID string, removed bool, err error)

Returns the body of a revision of a document, as well as the document's current channels and the user/roles it grants channel access to.

func (*Database) Get1xRevBody

func (db *Database) Get1xRevBody(docid, revid string, history bool, attachmentsSince []string) (Body, error)

Get Rev with all-or-none history based on specified 'history' flag

func (*Database) Get1xRevBodyWithHistory

func (db *Database) Get1xRevBodyWithHistory(docid, revid string, maxHistory int, historyFrom []string, attachmentsSince []string, showExp bool) (Body, error)

Retrieves rev with request history specified as collection of revids (historyFrom)

func (*Database) GetAttachment

func (db *Database) GetAttachment(key AttachmentKey) ([]byte, error)

Retrieves an attachment given its key.

func (*Database) GetChangeLog

func (db *Database) GetChangeLog(channelName string, afterSeq uint64) (entries []*LogEntry)

Returns the set of cached log entries for a given channel

func (*Database) GetChanges

func (db *Database) GetChanges(channels base.Set, options ChangesOptions) ([]*ChangeEntry, error)

Synchronous convenience function that returns all changes as a simple array, FOR TEST USE ONLY Returns error if initial feed creation fails, or if an error is returned with the changes entries

func (*Database) GetDelta

func (db *Database) GetDelta(docID, fromRevID, toRevID string) (delta *RevisionDelta, redactedRev *DocumentRevision, err error)

GetDelta attempts to return the delta between fromRevId and toRevId. If the delta can't be generated, returns nil.

func (*Database) GetDesignDoc

func (db *Database) GetDesignDoc(ddocName string, result interface{}) (err error)

func (*Database) GetRev

func (db *Database) GetRev(docID, revID string, history bool, attachmentsSince []string) (DocumentRevision, error)

GetRev returns the revision for the given docID and revID, or the current active revision if revID is empty.

func (*Database) GetSpecial

func (db *Database) GetSpecial(doctype string, docid string) (Body, error)

func (*Database) GetSpecialBytes

func (db *Database) GetSpecialBytes(doctype string, docid string) ([]byte, error)

func (*Database) ImportDoc

func (db *Database) ImportDoc(docid string, existingDoc *Document, isDelete bool, expiry *uint32, mode ImportMode) (docOut *Document, err error)

Import a document, given the existing state of the doc in *document format.

func (*Database) ImportDocRaw

func (db *Database) ImportDocRaw(docid string, value []byte, xattrValue []byte, isDelete bool, cas uint64, expiry *uint32, mode ImportMode) (docOut *Document, err error)

Imports a document that was written by someone other than sync gateway, given the existing state of the doc in raw bytes

func (*Database) IsCompactRunning

func (db *Database) IsCompactRunning() bool

func (*Database) IsIllegalConflict

func (db *Database) IsIllegalConflict(doc *Document, parentRevID string, deleted, noConflicts bool) bool

IsIllegalConflict returns true if the given operation is forbidden due to conflicts. AllowConflicts is whether or not the database allows conflicts, and 'noConflicts' is whether or not the request should allow conflicts to occurr.

Truth table for AllowConflicts and noConflicts combinations:

                    AllowConflicts=true     AllowConflicts=false
noConflicts=true    continue checks         continue checks
noConflicts=false   return false            continue checks

func (*Database) MarkPrincipalsChanged

func (db *Database) MarkPrincipalsChanged(docid string, newRevID string, changedPrincipals, changedRoleUsers []string)

func (*Database) MultiChangesFeed

func (db *Database) MultiChangesFeed(chans base.Set, options ChangesOptions) (<-chan *ChangeEntry, error)

func (*Database) NewUserWaiter

func (db *Database) NewUserWaiter() *ChangeWaiter

func (*Database) OnDemandImportForWrite

func (db *Database) OnDemandImportForWrite(docid string, doc *Document, deleted bool) error

func (*Database) Post

func (db *Database) Post(body Body) (string, string, *Document, error)

Creates a new document, assigning it a random doc ID.

func (*Database) Purge

func (db *Database) Purge(key string) error

Purges a document from the bucket (no tombstone)

func (*Database) Put

func (db *Database) Put(docid string, body Body) (newRevID string, doc *Document, err error)

Updates or creates a document. The new body's BodyRev property must match the current revision's, if any.

func (*Database) PutDesignDoc

func (db *Database) PutDesignDoc(ddocName string, ddoc sgbucket.DesignDoc) (err error)

func (*Database) PutExistingRev

func (db *Database) PutExistingRev(newDoc *Document, docHistory []string, noConflicts bool) (doc *Document, newRevID string, err error)

Adds an existing revision to a document along with its history (list of rev IDs.)

func (*Database) PutExistingRevWithBody

func (db *Database) PutExistingRevWithBody(docid string, body Body, docHistory []string, noConflicts bool) (doc *Document, newRev string, err error)

func (*Database) PutExistingRevWithConflictResolution

func (db *Database) PutExistingRevWithConflictResolution(newDoc *Document, docHistory []string, noConflicts bool, conflictResolver *ConflictResolver) (doc *Document, newRevID string, err error)

Adds an existing revision to a document along with its history (list of rev IDs.) If this new revision would result in a conflict:

  1. If noConflicts == false, the revision will be added to the rev tree as a conflict
  2. If noConflicts == true and a conflictResolverFunc is not provided, a 409 conflict error will be returned
  3. If noConflicts == true and a conflictResolverFunc is provided, conflicts will be resolved and the result added to the document.

func (*Database) PutSpecial

func (db *Database) PutSpecial(doctype string, docid string, body Body) (string, error)

func (*Database) QueryDesignDoc

func (db *Database) QueryDesignDoc(ddocName string, viewName string, options map[string]interface{}) (*sgbucket.ViewResult, error)

func (*Database) ReloadUser

func (db *Database) ReloadUser() error

Reloads the database's User object, in case its persistent properties have been changed.

func (*Database) RevDiff

func (db *Database) RevDiff(docid string, revids []string) (missing, possible []string)

Given a document ID and a set of revision IDs, looks up which ones are not known. Returns an array of the unknown revisions, and an array of known revisions that might be recent ancestors.

func (*Database) SameAs

func (db *Database) SameAs(otherdb *Database) bool

func (*Database) SetUser

func (db *Database) SetUser(user auth.User)

func (*Database) SimpleMultiChangesFeed

func (db *Database) SimpleMultiChangesFeed(chans base.Set, options ChangesOptions) (<-chan *ChangeEntry, error)

Returns the (ordered) union of all of the changes made to multiple channels.

func (*Database) UpdateAllDocChannels

func (db *Database) UpdateAllDocChannels() (int, error)

Re-runs the sync function on every current document in the database (if doCurrentDocs==true) and/or imports docs in the bucket not known to the gateway (if doImportDocs==true). To be used when the JavaScript sync function changes.

func (*Database) User

func (db *Database) User() auth.User

type DatabaseContext

type DatabaseContext struct {
	Name       string          // Database name
	UUID       string          // UUID for this database instance. Used by cbgt and sgr
	Bucket     base.Bucket     // Storage
	BucketSpec base.BucketSpec // The BucketSpec
	BucketLock sync.RWMutex    // Control Access to the underlying bucket object

	ImportListener *importListener // Import feed listener

	ChannelMapper *channels.ChannelMapper // Runs JS 'sync' function
	StartTime     time.Time               // Timestamp when context was instantiated
	RevsLimit     uint32                  // Max depth a document's revision tree can grow to

	EventMgr           *EventManager          // Manages notification events
	AllowEmptyPassword bool                   // Allow empty passwords?  Defaults to false
	Options            DatabaseContextOptions // Database Context Options
	AccessLock         sync.RWMutex           // Allows DB offline to block until synchronous calls have completed
	State              uint32                 // The runtime state of the DB from a service perspective
	ExitChanges        chan struct{}          // Active _changes feeds on the DB will close when this channel is closed
	OIDCProviders      auth.OIDCProviderMap   // OIDC clients
	PurgeInterval      int                    // Metadata purge interval, in hours

	DbStats      *DatabaseStats // stats that correspond to this database context
	CompactState uint32         // Status of database compaction

	CfgSG                        *base.CfgSG         // Sync Gateway cluster shared config
	SGReplicateMgr               *sgReplicateManager // Manages interactions with sg-replicate replications
	Heartbeater                  base.Heartbeater    // Node heartbeater for SG cluster awareness
	ServeInsecureAttachmentTypes bool                // Attachment content type will bypass the content-disposition handling, default false
	// contains filtered or unexported fields
}

Basic description of a database. Shared between all Database objects on the same database. This object is thread-safe so it can be shared between HTTP handlers.

func NewDatabaseContext

func NewDatabaseContext(dbName string, bucket base.Bucket, autoImport bool, options DatabaseContextOptions) (*DatabaseContext, error)

Creates a new DatabaseContext on a bucket. The bucket will be closed when this context closes.

func (*DatabaseContext) AllPrincipalIDs

func (db *DatabaseContext) AllPrincipalIDs() (users, roles []string, err error)

Returns the IDs of all users and roles

func (*DatabaseContext) AllowConflicts

func (context *DatabaseContext) AllowConflicts() bool

func (*DatabaseContext) AllowExternalRevBodyStorage

func (context *DatabaseContext) AllowExternalRevBodyStorage() bool

func (*DatabaseContext) AllowFlushNonCouchbaseBuckets

func (context *DatabaseContext) AllowFlushNonCouchbaseBuckets() bool

func (*DatabaseContext) Authenticator

func (context *DatabaseContext) Authenticator() *auth.Authenticator

func (*DatabaseContext) CacheCompactActive

func (db *DatabaseContext) CacheCompactActive() bool

func (*DatabaseContext) ChannelViewTest

func (dbc *DatabaseContext) ChannelViewTest(channelName string, startSeq, endSeq uint64) (LogEntries, error)

Public channel view call - for unit test support

func (*DatabaseContext) Close

func (context *DatabaseContext) Close()

func (*DatabaseContext) ComputeChannelsForPrincipal

func (context *DatabaseContext) ComputeChannelsForPrincipal(princ auth.Principal) (channels.TimedSet, error)

Recomputes the set of channels a User/Role has been granted access to by sync() functions. This is part of the ChannelComputer interface defined by the Authenticator.

func (*DatabaseContext) ComputeRolesForUser

func (context *DatabaseContext) ComputeRolesForUser(user auth.User) (channels.TimedSet, error)

Recomputes the set of channels a User/Role has been granted access to by sync() functions. This is part of the ChannelComputer interface defined by the Authenticator.

func (*DatabaseContext) ComputeSequenceChannelsForPrincipal

func (context *DatabaseContext) ComputeSequenceChannelsForPrincipal(princ auth.Principal) (channels.TimedSet, error)

Recomputes the set of channels a User/Role has been granted access to by sync() functions. This is part of the ChannelComputer interface defined by the Authenticator.

func (*DatabaseContext) ComputeSequenceRolesForUser

func (context *DatabaseContext) ComputeSequenceRolesForUser(user auth.User) (channels.TimedSet, error)

Recomputes the set of roles a User has been granted access to by sync() functions. This is part of the ChannelComputer interface defined by the Authenticator.

func (*DatabaseContext) CreateZeroSinceValue

func (context *DatabaseContext) CreateZeroSinceValue() SequenceID

Create a zero'd out since value (eg, initial since value) based on the sequence type of the database (int or vector clock)

func (*DatabaseContext) DeleteUserSessions

func (db *DatabaseContext) DeleteUserSessions(userName string) error

Deletes all session documents for a user

func (*DatabaseContext) DeltaSyncEnabled

func (context *DatabaseContext) DeltaSyncEnabled() bool

func (*DatabaseContext) FlushChannelCache

func (context *DatabaseContext) FlushChannelCache() error

Cache flush support. Currently test-only - added for unit test access from rest package

func (*DatabaseContext) FlushRevisionCacheForTest

func (context *DatabaseContext) FlushRevisionCacheForTest()

For test usage

func (*DatabaseContext) GetChangeCache

func (context *DatabaseContext) GetChangeCache() *changeCache

Utility function to support cache testing from outside db package

func (*DatabaseContext) GetChannelQueryCount

func (db *DatabaseContext) GetChannelQueryCount() int64

func (*DatabaseContext) GetDocSyncData

func (db *DatabaseContext) GetDocSyncData(docid string) (SyncData, error)

This gets *just* the Sync Metadata (_sync field) rather than the entire doc, for efficiency reasons.

func (*DatabaseContext) GetDocWithXattr

func (db *DatabaseContext) GetDocWithXattr(key string, unmarshalLevel DocumentUnmarshalLevel) (doc *Document, rawBucketDoc *sgbucket.BucketDocument, err error)

func (*DatabaseContext) GetDocument

func (db *DatabaseContext) GetDocument(docid string, unmarshalLevel DocumentUnmarshalLevel) (doc *Document, err error)

Lowest-level method that reads a document from the bucket

func (*DatabaseContext) GetOIDCProvider

func (context *DatabaseContext) GetOIDCProvider(providerName string) (*auth.OIDCProvider, error)

func (*DatabaseContext) GetPrincipal

func (dbc *DatabaseContext) GetPrincipal(name string, isUser bool) (info *PrincipalConfig, err error)

Test-only version of GetPrincipal that doesn't trigger channel/role recalculation

func (*DatabaseContext) GetRevisionCacheForTest

func (context *DatabaseContext) GetRevisionCacheForTest() RevisionCache

For test usage

func (*DatabaseContext) GetServerUUID

func (context *DatabaseContext) GetServerUUID() string

func (*DatabaseContext) GetUserViewsEnabled

func (context *DatabaseContext) GetUserViewsEnabled() bool

func (*DatabaseContext) IsClosed

func (context *DatabaseContext) IsClosed() bool

func (*DatabaseContext) LastSequence

func (context *DatabaseContext) LastSequence() (uint64, error)

func (*DatabaseContext) N1QLQueryWithStats

func (context *DatabaseContext) N1QLQueryWithStats(queryName string, statement string, params interface{}, consistency gocb.ConsistencyMode, adhoc bool) (results gocb.QueryResults, err error)

N1QlQueryWithStats is a wrapper for gocbBucket.Query that performs additional diagnostic processing (expvars, slow query logging)

func (*DatabaseContext) NewCacheRevsActiveWaiter

func (db *DatabaseContext) NewCacheRevsActiveWaiter(tb testing.TB) *StatWaiter

func (*DatabaseContext) NewDCPCachingCountWaiter

func (db *DatabaseContext) NewDCPCachingCountWaiter(tb testing.TB) *StatWaiter

func (*DatabaseContext) NewPullReplicationCaughtUpWaiter

func (db *DatabaseContext) NewPullReplicationCaughtUpWaiter(tb testing.TB) *StatWaiter

func (*DatabaseContext) NewStatWaiter

func (db *DatabaseContext) NewStatWaiter(stat *expvar.Int, tb testing.TB) *StatWaiter

func (*DatabaseContext) NotifyTerminatedChanges

func (context *DatabaseContext) NotifyTerminatedChanges(username string)

Trigger terminate check handling for connected continuous replications. TODO: The underlying code (NotifyCheckForTermination) doesn't actually leverage the specific username - should be refactored

to remove

func (*DatabaseContext) OnDemandImportForGet

func (db *DatabaseContext) OnDemandImportForGet(docid string, rawDoc []byte, rawXattr []byte, cas uint64) (docOut *Document, err error)

OnDemandImportForGet. Attempts to import the doc based on the provided id, contents and cas. ImportDocRaw does cas retry handling if the document gets updated after the initial retrieval attempt that triggered this.

func (*DatabaseContext) ParseSequenceID

func (dbc *DatabaseContext) ParseSequenceID(str string) (s SequenceID, err error)

Currently accepts a plain string, but in the future might accept generic JSON objects. Calling this with a JSON string will result in an error.

func (*DatabaseContext) QueryAccess

func (context *DatabaseContext) QueryAccess(username string) (sgbucket.QueryResultIterator, error)

Query to compute the set of channels granted to the specified user via the Sync Function

func (*DatabaseContext) QueryAllDocs

func (context *DatabaseContext) QueryAllDocs(startKey string, endKey string) (sgbucket.QueryResultIterator, error)

AllDocs returns all non-deleted documents in the bucket between startKey and endKey

func (*DatabaseContext) QueryChannels

func (context *DatabaseContext) QueryChannels(channelName string, startSeq uint64, endSeq uint64, limit int,
	activeOnly bool) (sgbucket.QueryResultIterator, error)

Query to compute the set of documents assigned to the specified channel within the sequence range

func (*DatabaseContext) QueryPrincipals

func (context *DatabaseContext) QueryPrincipals() (sgbucket.QueryResultIterator, error)

Query to retrieve the set of user and role doc ids, using the primary index

func (*DatabaseContext) QueryResync

func (context *DatabaseContext) QueryResync() (sgbucket.QueryResultIterator, error)

func (*DatabaseContext) QueryRoleAccess

func (context *DatabaseContext) QueryRoleAccess(username string) (sgbucket.QueryResultIterator, error)

Query to compute the set of roles granted to the specified user via the Sync Function

func (*DatabaseContext) QuerySequences

func (context *DatabaseContext) QuerySequences(sequences []uint64) (sgbucket.QueryResultIterator, error)

Query to retrieve keys for the specified sequences. View query uses star channel, N1QL query uses IndexAllDocs

func (*DatabaseContext) QuerySessions

func (context *DatabaseContext) QuerySessions(userName string) (sgbucket.QueryResultIterator, error)

Query to retrieve the set of user and role doc ids, using the primary index

func (*DatabaseContext) QueryTombstones

func (context *DatabaseContext) QueryTombstones(olderThan time.Time, limit int) (sgbucket.QueryResultIterator, error)

func (*DatabaseContext) RemoveObsoleteDesignDocs

func (context *DatabaseContext) RemoveObsoleteDesignDocs(previewOnly bool) (removedDesignDocs []string, err error)

Removes previous versions of Sync Gateway's design docs found on the server

func (*DatabaseContext) RemoveObsoleteIndexes

func (context *DatabaseContext) RemoveObsoleteIndexes(previewOnly bool) (removedIndexes []string, err error)

Removes previous versions of Sync Gateway's indexes found on the server

func (*DatabaseContext) RestartListener

func (context *DatabaseContext) RestartListener() error

For testing only!

func (*DatabaseContext) RevisionBodyLoader

func (db *DatabaseContext) RevisionBodyLoader(key string) ([]byte, error)

RevisionBodyLoader retrieves a non-winning revision body stored outside the document metadata

func (*DatabaseContext) SetOnChangeCallback

func (context *DatabaseContext) SetOnChangeCallback(callback DocChangedFunc)

func (*DatabaseContext) SetUserViewsEnabled

func (context *DatabaseContext) SetUserViewsEnabled(value bool)

func (*DatabaseContext) TakeDbOffline

func (dc *DatabaseContext) TakeDbOffline(reason string) error

func (*DatabaseContext) UpdateCalculatedStats

func (db *DatabaseContext) UpdateCalculatedStats()

Update database-specific stats that are more efficiently calculated at stats collection time

func (*DatabaseContext) UpdatePrincipal

func (dbc *DatabaseContext) UpdatePrincipal(newInfo PrincipalConfig, isUser bool, allowReplace bool) (replaced bool, err error)

Updates or creates a principal from a PrincipalConfig structure.

func (*DatabaseContext) UpdateSyncFun

func (context *DatabaseContext) UpdateSyncFun(syncFun string) (changed bool, err error)

Sets the database context's sync function based on the JS code from config. Returns a boolean indicating whether the function is different from the saved one. If multiple gateway instances try to update the function at the same time (to the same new value) only one of them will get a changed=true result.

func (*DatabaseContext) UseViews

func (context *DatabaseContext) UseViews() bool

func (*DatabaseContext) UseXattrs

func (context *DatabaseContext) UseXattrs() bool

func (*DatabaseContext) ViewQueryWithStats

func (context *DatabaseContext) ViewQueryWithStats(ddoc string, viewName string, params map[string]interface{}) (results sgbucket.QueryResultIterator, err error)

N1QlQueryWithStats is a wrapper for gocbBucket.Query that performs additional diagnostic processing (expvars, slow query logging)

func (*DatabaseContext) WaitForCaughtUp

func (db *DatabaseContext) WaitForCaughtUp(targetCount int64) error

func (*DatabaseContext) WaitForPendingChanges

func (dbc *DatabaseContext) WaitForPendingChanges(ctx context.Context) (err error)

WaitForPendingChanges blocks until the change-cache has caught up with the latest writes to the database.

func (*DatabaseContext) WaitForSequence

func (dbc *DatabaseContext) WaitForSequence(ctx context.Context, sequence uint64) (err error)

WaitForSequenceNotSkipped blocks until the given sequence has been received or skipped by the change cache.

func (*DatabaseContext) WaitForSequenceNotSkipped

func (dbc *DatabaseContext) WaitForSequenceNotSkipped(ctx context.Context, sequence uint64) (err error)

WaitForSequenceNotSkipped blocks until the given sequence has been received by the change cache without being skipped.

type DatabaseContextOptions

type DatabaseContextOptions struct {
	CacheOptions              *CacheOptions
	RevisionCacheOptions      *RevisionCacheOptions
	OldRevExpirySeconds       uint32
	AdminInterface            *string
	UnsupportedOptions        UnsupportedOptions
	OIDCOptions               *auth.OIDCOptions
	DBOnlineCallback          DBOnlineCallback // Callback function to take the DB back online
	ImportOptions             ImportOptions
	EnableXattr               bool             // Use xattr for _sync
	LocalDocExpirySecs        uint32           // The _local doc expiry time in seconds
	SecureCookieOverride      bool             // Pass-through DBConfig.SecureCookieOverride
	SessionCookieName         string           // Pass-through DbConfig.SessionCookieName
	SessionCookieHttpOnly     bool             // Pass-through DbConfig.SessionCookieHTTPOnly
	AllowConflicts            *bool            // False forbids creating conflicts
	SendWWWAuthenticateHeader *bool            // False disables setting of 'WWW-Authenticate' header
	UseViews                  bool             // Force use of views
	DeltaSyncOptions          DeltaSyncOptions // Delta Sync Options
	CompactInterval           uint32           // Interval in seconds between compaction is automatically ran - 0 means don't run
	SGReplicateOptions        SGReplicateOptions
	SlowQueryWarningThreshold time.Duration
}

type DatabaseStats

type DatabaseStats struct {
	// contains filtered or unexported fields
}

Wrapper around *expvars.Map for database stats that provide:

  • A lazy loading mechanism
  • Initialize all stats in a stat group to their zero values

func NewDatabaseStats

func NewDatabaseStats() *DatabaseStats

func (*DatabaseStats) ExpvarMap

func (d *DatabaseStats) ExpvarMap() *expvar.Map

Convert the entire dbstats structure into an expvar map to embed into a parent expvar map

func (*DatabaseStats) StatsByKey

func (d *DatabaseStats) StatsByKey(key string) (stats *expvar.Map)

func (*DatabaseStats) StatsCache

func (d *DatabaseStats) StatsCache() (stats *expvar.Map)

func (*DatabaseStats) StatsCblReplicationPull

func (d *DatabaseStats) StatsCblReplicationPull() (stats *expvar.Map)

func (*DatabaseStats) StatsCblReplicationPush

func (d *DatabaseStats) StatsCblReplicationPush() (stats *expvar.Map)

func (*DatabaseStats) StatsDatabase

func (d *DatabaseStats) StatsDatabase() (stats *expvar.Map)

func (*DatabaseStats) StatsDeltaSync

func (d *DatabaseStats) StatsDeltaSync() (stats *expvar.Map)

func (*DatabaseStats) StatsGsiViews

func (d *DatabaseStats) StatsGsiViews() (stats *expvar.Map)

func (*DatabaseStats) StatsReplications

func (d *DatabaseStats) StatsReplications() (stats *expvar.Map)

func (*DatabaseStats) StatsSecurity

func (d *DatabaseStats) StatsSecurity() (stats *expvar.Map)

func (*DatabaseStats) StatsSharedBucketImport

func (d *DatabaseStats) StatsSharedBucketImport() (stats *expvar.Map)

type DeltaSyncOptions

type DeltaSyncOptions struct {
	Enabled          bool   // Whether delta sync is enabled (EE only)
	RevMaxAgeSeconds uint32 // The number of seconds deltas for old revs are available for
}

type DocAttachment

type DocAttachment struct {
	ContentType string `json:"content_type,omitempty"`
	Digest      string `json:"digest,omitempty"`
	Length      int    `json:"length,omitempty"`
	Revpos      int    `json:"revpos,omitempty"`
	Stub        bool   `json:"stub,omitempty"`
	Data        []byte `json:"-"` // tell json marshal/unmarshal to ignore this field
}

A struct which models an attachment. Currently only used by test code, however new code or refactoring in the main codebase should try to use where appropriate.

type DocChangedFunc

type DocChangedFunc func(event sgbucket.FeedEvent)

type DocTransformer

type DocTransformer func(docId string, originalCBDoc []byte) (transformedCBDoc []byte, transformed bool, err error)

Given a Couchbase Bucket doc, transform the doc in some way to produce a new doc. Also return a boolean to indicate whether a transformation took place, or any errors occurred.

type Document

type Document struct {
	SyncData // Sync metadata

	ID  string `json:"-"` // Doc id.  (We're already using a custom MarshalJSON for *document that's based on body, so the json:"-" probably isn't needed here)
	Cas uint64 // Document cas

	Deleted        bool
	DocExpiry      uint32
	RevID          string
	DocAttachments AttachmentsMeta
	// contains filtered or unexported fields
}

A document as stored in Couchbase. Contains the body of the current revision plus metadata. In its JSON form, the body's properties are at top-level while the SyncData is in a special "_sync" property. Document doesn't do any locking - document instances aren't intended to be shared across multiple goroutines.

func NewDocument

func NewDocument(docid string) *Document

Returns a new empty document.

func (*Document) Body

func (doc *Document) Body() Body

Accessors for document properties. To support lazy unmarshalling of document contents, all access should be done through accessors

func (*Document) BodyBytes

func (doc *Document) BodyBytes() ([]byte, error)

func (*Document) BodyWithSpecialProperties

func (doc *Document) BodyWithSpecialProperties() ([]byte, error)

func (*Document) GetDeepMutableBody

func (doc *Document) GetDeepMutableBody() Body

Get a deep mutable copy of the body, using _rawBody. Initializes _rawBody based on _body if not already present.

func (*Document) HasBody

func (doc *Document) HasBody() bool

HasBody returns true if the given document has either an unmarshalled body, or raw bytes available.

func (*Document) IsChannelRemoval

func (doc *Document) IsChannelRemoval(revID string) (bodyBytes []byte, history Revisions, channels base.Set, isRemoval bool, isDelete bool, err error)

Determine whether the specified revision was a channel removal, based on doc.Channels. If so, construct the standard document body for a removal notification (_removed=true)

func (*Document) IsDeleted

func (doc *Document) IsDeleted() bool

func (*Document) IsSGWrite

func (doc *Document) IsSGWrite(rawBody []byte) (isSGWrite bool, crc32Match bool)

doc.IsSGWrite - used during on-demand import. Doesn't invoke SyncData.IsSGWrite so that we can complete the inexpensive cas check before the (potential) doc marshalling.

func (*Document) MarshalBodyAndSync

func (doc *Document) MarshalBodyAndSync() (retBytes []byte, err error)

Marshals both the body and sync data for a given document. If there is no rawbody already available then we will marshall it all in one go. Otherwise we will reduce marshalling as much as possible by only marshalling the sync data and injecting it into the existing raw body.

func (*Document) MarshalJSON

func (doc *Document) MarshalJSON() (data []byte, err error)

func (*Document) MarshalWithXattr

func (doc *Document) MarshalWithXattr() (data []byte, xdata []byte, err error)

func (*Document) RemoveBody

func (doc *Document) RemoveBody()

func (*Document) UnmarshalJSON

func (doc *Document) UnmarshalJSON(data []byte) error

func (*Document) UnmarshalWithXattr

func (doc *Document) UnmarshalWithXattr(data []byte, xdata []byte, unmarshalLevel DocumentUnmarshalLevel) error

UnmarshalWithXattr unmarshals the provided raw document and xattr bytes. The provided DocumentUnmarshalLevel (unmarshalLevel) specifies how much of the provided document/xattr needs to be initially unmarshalled. If unmarshalLevel is anything less than the full document + metadata, the raw data is retained for subsequent lazy unmarshalling as needed.

func (*Document) UpdateBody

func (doc *Document) UpdateBody(body Body)

func (*Document) UpdateBodyBytes

func (doc *Document) UpdateBodyBytes(bodyBytes []byte)

func (*Document) UpdateExpiry

func (doc *Document) UpdateExpiry(expiry uint32)

Updates the expiry for a document

type DocumentChangeEvent

type DocumentChangeEvent struct {
	AsyncEvent
	DocBytes []byte
	DocID    string
	OldDoc   string
	Channels base.Set
}

DocumentChangeEvent is raised when a document has been successfully written to the backing data store. Event has the document body and channel set as properties.

func (*DocumentChangeEvent) EventType

func (dce *DocumentChangeEvent) EventType() EventType

func (*DocumentChangeEvent) String

func (dce *DocumentChangeEvent) String() string

type DocumentRevision

type DocumentRevision struct {
	DocID string
	RevID string
	// BodyBytes contains the raw document, with no special properties.
	BodyBytes   []byte
	History     Revisions
	Channels    base.Set
	Expiry      *time.Time
	Attachments AttachmentsMeta
	Delta       *RevisionDelta
	Deleted     bool
	// contains filtered or unexported fields
}

DocumentRevision stored and returned by the rev cache

func (*DocumentRevision) As1xBytes

func (rev *DocumentRevision) As1xBytes(db *Database, requestedHistory Revisions, attachmentsSince []string, showExp bool) (b []byte, err error)

As1xBytes returns a byte slice representing the 1.x style body, containing special properties (i.e. _id, _rev, _attachments, etc.)

func (*DocumentRevision) DeepMutableBody

func (rev *DocumentRevision) DeepMutableBody() (b Body, err error)

DeepMutableBody returns a deep copy of the given document revision as a plain body (without any special properties) Callers are free to modify any of this body without affecting the document revision.

func (*DocumentRevision) Mutable1xBody

func (rev *DocumentRevision) Mutable1xBody(db *Database, requestedHistory Revisions, attachmentsSince []string, showExp bool) (b Body, err error)

Mutable1xBody returns a copy of the given document revision as a 1.x style body (with special properties) Callers are free to modify this body without affecting the document revision.

func (*DocumentRevision) MutableBody

func (rev *DocumentRevision) MutableBody() (b Body, err error)

MutableBody returns a shallow copy of the given document revision as a plain body (without any special properties) Callers are only free to modify top-level properties of this body without affecting the document revision.

type DocumentUnmarshalLevel

type DocumentUnmarshalLevel uint8

type EmptyResultIterator

type EmptyResultIterator struct{}

func (*EmptyResultIterator) Close

func (e *EmptyResultIterator) Close() error

func (*EmptyResultIterator) Next

func (e *EmptyResultIterator) Next(valuePtr interface{}) bool

func (*EmptyResultIterator) NextBytes

func (e *EmptyResultIterator) NextBytes() []byte

func (*EmptyResultIterator) One

func (e *EmptyResultIterator) One(valuePtr interface{}) error

type Event

type Event interface {
	Synchronous() bool
	EventType() EventType
	String() string
}

An event that can be raised during SG processing.

type EventHandler

type EventHandler interface {
	HandleEvent(event Event) bool
	String() string
}

EventHandler interface represents an instance of an event handler defined in the database config

type EventManager

type EventManager struct {
	// contains filtered or unexported fields
}

EventManager routes raised events to corresponding event handlers. Incoming events are just dumped in the eventChannel to minimize time spent blocking whatever process is raising the event. The event queue worker goroutine works the event channel and sends events to the appropriate handlers

func NewEventManager

func NewEventManager() *EventManager

Creates a new event manager. Sets up the event channel for async events, and the goroutine to monitor and process that channel.

func (*EventManager) GetEventsProcessedFail

func (em *EventManager) GetEventsProcessedFail() int64

func (*EventManager) GetEventsProcessedSuccess

func (em *EventManager) GetEventsProcessedSuccess() int64

func (*EventManager) HasHandlerForEvent

func (em *EventManager) HasHandlerForEvent(eventType EventType) bool

Checks whether a handler of the given type has been registered to the event manager.

func (*EventManager) IncrementEventsProcessedFail

func (em *EventManager) IncrementEventsProcessedFail(delta int64) int64

func (*EventManager) IncrementEventsProcessedSuccess

func (em *EventManager) IncrementEventsProcessedSuccess(delta int64) int64

func (*EventManager) ProcessEvent

func (em *EventManager) ProcessEvent(event Event)

Concurrent processing of all async event handlers registered for the event type

func (*EventManager) RaiseDBStateChangeEvent

func (em *EventManager) RaiseDBStateChangeEvent(dbName string, state string, reason string, adminInterface string) error

Raises a DB state change event based on the db name, admininterface, new state, reason and local system time. If the event manager doesn't have a listener for this event, ignores.

func (*EventManager) RaiseDocumentChangeEvent

func (em *EventManager) RaiseDocumentChangeEvent(docBytes []byte, docID string, oldBodyJSON string, channels base.Set) error

Raises a document change event based on the the document body and channel set. If the event manager doesn't have a listener for this event, ignores.

func (*EventManager) RegisterEventHandler

func (em *EventManager) RegisterEventHandler(handler EventHandler, eventType EventType)

Register a new event handler to the EventManager. The event manager will route events of type eventType to the handler.

func (*EventManager) Start

func (em *EventManager) Start(maxProcesses uint, waitTime int)

Starts the listener queue for the event manager

type EventType

type EventType uint8

Event type

const (
	DocumentChange EventType = iota
	DBStateChange
	UserAdd
)

type ForEachDocIDFunc

type ForEachDocIDFunc func(id IDRevAndSequence, channels []string) (bool, error)

type ForEachDocIDOptions

type ForEachDocIDOptions struct {
	Startkey string
	Endkey   string
	Limit    uint64
}

The ForEachDocID options for limiting query results

type GetSGR2CheckpointRequest

type GetSGR2CheckpointRequest struct {
	Client string // Client is the unique ID of client checkpoint to retrieve
	// contains filtered or unexported fields
}

GetSGR2CheckpointRequest is a strongly typed 'getCheckpoint' request for SG-Replicate 2.

func (*GetSGR2CheckpointRequest) Response

func (rq *GetSGR2CheckpointRequest) Response() (*SGR2Checkpoint, error)

func (*GetSGR2CheckpointRequest) Send

type IDAndRev

type IDAndRev struct {
	DocID string
	RevID string
}

type IDRevAndSequence

type IDRevAndSequence struct {
	DocID    string
	RevID    string
	Sequence uint64
}

type ImportFilterFunction

type ImportFilterFunction struct {
	*sgbucket.JSServer
}

func NewImportFilterFunction

func NewImportFilterFunction(fnSource string) *ImportFilterFunction

func (*ImportFilterFunction) EvaluateFunction

func (i *ImportFilterFunction) EvaluateFunction(doc Body) (bool, error)

Calls a jsEventFunction returning an interface{}

type ImportMode

type ImportMode uint8

type ImportOptions

type ImportOptions struct {
	ImportFilter     *ImportFilterFunction // Opt-in filter for document import
	BackupOldRev     bool                  // Create temporary backup of old revision body when available
	ImportPartitions uint16                // Number of partitions for import
}

Options associated with the import of documents not written by Sync Gateway

type JSEventFunction

type JSEventFunction struct {
	*sgbucket.JSServer
}

A thread-safe wrapper around a jsEventTask, i.e. an event function.

func NewJSEventFunction

func NewJSEventFunction(fnSource string) *JSEventFunction

func (*JSEventFunction) CallFunction

func (ef *JSEventFunction) CallFunction(event Event) (interface{}, error)

Calls a jsEventFunction returning an interface{}

func (*JSEventFunction) CallValidateFunction

func (ef *JSEventFunction) CallValidateFunction(event Event) (bool, error)

Calls a jsEventFunction returning bool.

type LRURevisionCache

type LRURevisionCache struct {
	// contains filtered or unexported fields
}

An LRU cache of document revision bodies, together with their channel access.

func NewLRURevisionCache

func NewLRURevisionCache(capacity uint32, backingStore RevisionCacheBackingStore, cacheHitStat, cacheMissStat *expvar.Int) *LRURevisionCache

Creates a revision cache with the given capacity and an optional loader function.

func (*LRURevisionCache) Get

func (rc *LRURevisionCache) Get(docID, revID string, includeBody bool, includeDelta bool) (DocumentRevision, error)

Looks up a revision from the cache. Returns the body of the revision, its history, and the set of channels it's in. If the cache has a loaderFunction, it will be called if the revision isn't in the cache; any error returned by the loaderFunction will be returned from Get.

func (*LRURevisionCache) GetActive

func (rc *LRURevisionCache) GetActive(docID string, includeBody bool) (DocumentRevision, error)

Attempts to retrieve the active revision for a document from the cache. Requires retrieval of the document from the bucket to guarantee the current active revision, but does minimal unmarshalling of the retrieved document to get the current rev from _sync metadata. If active rev is already in the rev cache, will use it. Otherwise will add to the rev cache using the raw document obtained in the initial retrieval.

func (*LRURevisionCache) Peek

func (rc *LRURevisionCache) Peek(docID, revID string) (docRev DocumentRevision, found bool)

Looks up a revision from the cache only. Will not fall back to loader function if not present in the cache.

func (*LRURevisionCache) Put

func (rc *LRURevisionCache) Put(docRev DocumentRevision)

Adds a revision to the cache.

func (*LRURevisionCache) UpdateDelta

func (rc *LRURevisionCache) UpdateDelta(docID, revID string, toDelta RevisionDelta)

Attempt to update the delta on a revision cache entry. If the entry is no longer resident in the cache, fails silently

type LogEntries

type LogEntries []*LogEntry

type LogEntry

type LogEntry channels.LogEntry

func (*LogEntry) IsActive

func (entry *LogEntry) IsActive() bool

Returns false if the entry is either a removal or a delete

func (*LogEntry) IsDeleted

func (entry *LogEntry) IsDeleted() bool

func (*LogEntry) IsRemoved

func (entry *LogEntry) IsRemoved() bool

func (*LogEntry) SetDeleted

func (entry *LogEntry) SetDeleted()

func (*LogEntry) SetRemoved

func (entry *LogEntry) SetRemoved()

func (LogEntry) String

func (l LogEntry) String() string

type LogPriorityQueue

type LogPriorityQueue []*LogEntry

A priority-queue of LogEntries, kept ordered by increasing sequence #.

func (LogPriorityQueue) Len

func (h LogPriorityQueue) Len() int

func (LogPriorityQueue) Less

func (h LogPriorityQueue) Less(i, j int) bool

func (*LogPriorityQueue) Pop

func (h *LogPriorityQueue) Pop() interface{}

func (*LogPriorityQueue) Push

func (h *LogPriorityQueue) Push(x interface{})

func (LogPriorityQueue) Swap

func (h LogPriorityQueue) Swap(i, j int)

type NodesByReplicationCount

type NodesByReplicationCount []*sortableSGNode

func (NodesByReplicationCount) Len

func (a NodesByReplicationCount) Len() int

func (NodesByReplicationCount) Less

func (a NodesByReplicationCount) Less(i, j int) bool

func (NodesByReplicationCount) Swap

func (a NodesByReplicationCount) Swap(i, j int)

type OidcTestProviderOptions

type OidcTestProviderOptions struct {
	Enabled bool `json:"enabled,omitempty"` // Whether the oidc_test_provider endpoints should be exposed on the public API
}

type OnCompleteFunc

type OnCompleteFunc func(replicationID string)

type PrincipalConfig

type PrincipalConfig struct {
	Name             *string  `json:"name,omitempty"`
	ExplicitChannels base.Set `json:"admin_channels,omitempty"`
	Channels         base.Set `json:"all_channels"`
	// Fields below only apply to Users, not Roles:
	Email             string   `json:"email,omitempty"`
	Disabled          bool     `json:"disabled,omitempty"`
	Password          *string  `json:"password,omitempty"`
	ExplicitRoleNames []string `json:"admin_roles,omitempty"`
	RoleNames         []string `json:"roles,omitempty"`
}

Struct that configures settings of a User/Role, for UpdatePrincipal. Also used in the rest package as a JSON object that defines a User/Role within a DbConfig and structures the request/response body in the admin REST API for /db/_user/*

func (PrincipalConfig) IsPasswordValid

func (p PrincipalConfig) IsPasswordValid(allowEmptyPass bool) (isValid bool, reason string)

Check if the password in this PrincipalConfig is valid. Only allow empty passwords if allowEmptyPass is true.

type ProposedRevStatus

type ProposedRevStatus int

Status code returned by CheckProposedRev

const (
	ProposedRev_OK       ProposedRevStatus = 0   // Rev can be added without conflict
	ProposedRev_Exists   ProposedRevStatus = 304 // Rev already exists locally
	ProposedRev_Conflict ProposedRevStatus = 409 // Rev would cause conflict
	ProposedRev_Error    ProposedRevStatus = 500 // Error occurred reading local doc
)

type QueryAccessRow

type QueryAccessRow struct {
	Value channels.TimedSet
}

QueryAccessRow used for response from both QueryAccess and QueryRoleAccess

type QueryChannelsRow

type QueryChannelsRow struct {
	Id         string `json:"id,omitempty"`
	Rev        string `json:"rev,omitempty"`
	Sequence   uint64 `json:"seq,omitempty"`
	Flags      uint8  `json:"flags,omitempty"`
	RemovalRev string `json:"rRev,omitempty"`
	RemovalDel bool   `json:"rDel,omitempty"`
}

type QueryIdRow

type QueryIdRow struct {
	Id string
}

Used for queries that only return doc id

type RepairBucket

type RepairBucket struct {
	DryRun            bool // If true, will only output what changes it *would* have made, but not make any changes
	RepairedFileTTL   time.Duration
	ViewQueryPageSize int
	Bucket            base.Bucket
	RepairJobs        []DocTransformer
}

A RepairBucket struct is the main API entrypoint to call for repairing documents in buckets

func NewRepairBucket

func NewRepairBucket(bucket base.Bucket) *RepairBucket

func (*RepairBucket) AddRepairJob

func (r *RepairBucket) AddRepairJob(repairJob DocTransformer) *RepairBucket

func (*RepairBucket) InitFrom

func (r *RepairBucket) InitFrom(params RepairBucketParams) *RepairBucket

func (RepairBucket) RepairBucket

func (r RepairBucket) RepairBucket() (results []RepairBucketResult, err error)

This is how the view is iterated:

┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─

┌ ─ ─ ─│─ ─ ─ ─ ─ ─ ─ ─

│ ┌ ─ ─ ─│─ ─ ─ ─ ─ ─ ─ ┐

│      │                ┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┐

│┌────┐ ┌────┐ ┌────┐ ┌────┐ │┌────┐│ ┌────┐ │

│doc1│  │doc2│ ││doc3││ │doc4│  │doc5│ ││doc6│               │

│└────┘ └────┘ └────┘ └────┘ │└────┘│ └────┘ │

│      │                │                     │

└ ─ ─ ─ ─ ─ ▲ ─ ─ ─ ─ ─ │ │ │

        │   └ ─ ─ ─ ─ ─ ▲ ─ ─ ─ ─ ─ │                     │
        │               │   └ ─ ─ ─ ─ ─▲─ ─ ─ ─ ─ ┘
  StartKey: ""          │           └ ─│─ ─ ─ ─ ─ ─ ─ ─ ─ ┘
    Limit: 3            │              │       ▲
NumProcessed: 3         │              │       │
                StartKey: "doc3"       │       │
                    Limit: 3           │       │
                NumProcessed: 2        │       └────────┐
                                       │                │
                               StartKey: "doc5"         │
                                   Limit: 3             │
                               NumProcessed: 1          │
                                                        │
                                                StartKey: "doc6"
                                                    Limit: 3
                                                NumProcessed: 0

* It starts with an empty start key * For the next page, it uses the last key processed as the new start key * Since the start key is inclusive, it will see the start key twice (on first page, and on next page) * If it's iterating a result page and sees a doc with the start key (eg, doc3 in above), it will ignore it so it doesn't process it twice * Stop condition: if NumProcessed is 0, because the only doc in result set had already been processed. *

func (*RepairBucket) SetDryRun

func (r *RepairBucket) SetDryRun(dryRun bool) *RepairBucket

func (RepairBucket) TransformBucketDoc

func (r RepairBucket) TransformBucketDoc(docId string, originalCBDoc []byte) (transformedCBDoc []byte, transformed bool, repairJobs []RepairJobType, err error)

Loops over all repair jobs and applies them

func (RepairBucket) WriteRepairedDocsToBucket

func (r RepairBucket) WriteRepairedDocsToBucket(docId string, originalDoc, updatedDoc []byte) (backupOrDryRunDocId string, err error)

type RepairBucketParams

type RepairBucketParams struct {
	DryRun            bool              `json:"dry_run"`
	ViewQueryPageSize *int              `json:"view_query_page_size"`
	RepairedFileTTL   *int              `json:"repair_output_ttl_seconds"`
	RepairJobs        []RepairJobParams `json:"repair_jobs"`
}

Params suitable for external (eg, HTTP) invocations to describe a RepairBucket operation

type RepairBucketResult

type RepairBucketResult struct {
	DryRun              bool            `json:"dry_run"`
	BackupOrDryRunDocId string          `json:"backup_or_dryrun_doc_id"`
	DocId               string          `json:"id"`
	RepairJobTypes      []RepairJobType `json:"repair_job_type"`
}

Record details about the result of a bucket repair that was made on a doc

type RepairJobParams

type RepairJobParams struct {
	RepairJobType   RepairJobType          `json:"type"`
	RepairJobParams map[string]interface{} `json:"params"`
}

Params suitable for external (eg, HTTP) invocations to describe a specific RepairJob operation

type RepairJobType

type RepairJobType string

Enum for the different repair jobs (eg, repairing rev tree cycles)

type ReplicationCfg

type ReplicationCfg struct {
	ReplicationConfig
	SGR1CheckpointID string `json:"sgr1_checkpoint_id,omitempty"` // can be set to fall back to when SGR2 checkpoints can't be found
	AssignedNode     string `json:"assigned_node"`                // UUID of node assigned to this replication
}

ReplicationCfg represents a replication definition as stored in the cluster config.

type ReplicationConfig

type ReplicationConfig struct {
	ID                     string                    `json:"replication_id"`
	Remote                 string                    `json:"remote"`
	Username               string                    `json:"username,omitempty"`
	Password               string                    `json:"password,omitempty"`
	Direction              ActiveReplicatorDirection `json:"direction"`
	ConflictResolutionType ConflictResolverType      `json:"conflict_resolution_type,omitempty"`
	ConflictResolutionFn   string                    `json:"custom_conflict_resolver,omitempty"`
	PurgeOnRemoval         bool                      `json:"purge_on_removal,omitempty"`
	DeltaSyncEnabled       bool                      `json:"enable_delta_sync,omitempty"`
	MaxBackoff             int                       `json:"max_backoff_time,omitempty"`
	State                  string                    `json:"state,omitempty"`
	Continuous             bool                      `json:"continuous"`
	Filter                 string                    `json:"filter,omitempty"`
	QueryParams            interface{}               `json:"query_params,omitempty"`
	Cancel                 bool                      `json:"cancel,omitempty"`
	Adhoc                  bool                      `json:"adhoc,omitempty"`
	BatchSize              int                       `json:"batch_size,omitempty"`
}

ReplicationConfig is a replication definition as stored in the Sync Gateway config

func DefaultReplicationConfig

func DefaultReplicationConfig() ReplicationConfig

func (*ReplicationConfig) Equals

func (rc *ReplicationConfig) Equals(compareToCfg *ReplicationConfig) (bool, error)

Equals is doing a relatively expensive json-based equality computation, so shouldn't be used in performance-sensitive scenarios

func (*ReplicationConfig) Redacted

func (r *ReplicationConfig) Redacted() *ReplicationConfig

Redacted returns the ReplicationCfg with password of the remote database redacted from both replication config and remote URL, i.e., any password will be replaced with xxxxx.

func (*ReplicationConfig) Upsert

Upsert updates ReplicationConfig with any non-empty properties specified in the incoming replication config. Note that if the intention is to reset the value to default, empty values must be specified.

func (*ReplicationConfig) ValidateReplication

func (rc *ReplicationConfig) ValidateReplication(fromConfig bool) (err error)

type ReplicationHeartbeatListener

type ReplicationHeartbeatListener struct {
	// contains filtered or unexported fields
}

ImportHeartbeatListener uses replication cfg to manage node list

func NewReplicationHeartbeatListener

func NewReplicationHeartbeatListener(mgr *sgReplicateManager) (*ReplicationHeartbeatListener, error)

func (*ReplicationHeartbeatListener) GetNodes

func (l *ReplicationHeartbeatListener) GetNodes() ([]string, error)

GetNodes returns a copy of the in-memory node set

func (*ReplicationHeartbeatListener) Name

func (*ReplicationHeartbeatListener) StaleHeartbeatDetected

func (l *ReplicationHeartbeatListener) StaleHeartbeatDetected(nodeUUID string)

When we detect other nodes have stopped pushing heartbeats, use manager to remove from cfg

func (*ReplicationHeartbeatListener) Stop

func (l *ReplicationHeartbeatListener) Stop()

type ReplicationStatus

type ReplicationStatus struct {
	ID               string             `json:"replication_id"`
	DocsRead         int64              `json:"docs_read"`
	DocsWritten      int64              `json:"docs_written"`
	DocsPurged       int64              `json:"docs_purged,omitempty"`
	DocWriteFailures int64              `json:"doc_write_failures"`
	DocWriteConflict int64              `json:"doc_write_conflict"`
	Status           string             `json:"status"`
	RejectedRemote   int64              `json:"rejected_by_remote"`
	RejectedLocal    int64              `json:"rejected_by_local"`
	LastSeqPull      string             `json:"last_seq_pull,omitempty"`
	LastSeqPush      string             `json:"last_seq_push,omitempty"`
	ErrorMessage     string             `json:"error_message,omitempty"`
	DeltasSent       int64              `json:"deltas_sent,omitempty"`
	DeltasRecv       int64              `json:"deltas_recv,omitempty"`
	DeltasRequested  int64              `json:"deltas_requested,omitempty"`
	Config           *ReplicationConfig `json:"config,omitempty"`
}

ReplicationStatus is used by the _replicationStatus REST API endpoints

func LoadReplicationStatus

func LoadReplicationStatus(dbContext *DatabaseContext, replicationID string) (status *ReplicationStatus, err error)

type ReplicationStatusOptions

type ReplicationStatusOptions struct {
	IncludeConfig bool // Include replication config
	LocalOnly     bool // Local replications only
	ActiveOnly    bool // Active replications only (
	IncludeError  bool // Exclude replication in error state
}

func DefaultReplicationStatusOptions

func DefaultReplicationStatusOptions() ReplicationStatusOptions

type ReplicationUpsertConfig

type ReplicationUpsertConfig struct {
	ID                     string      `json:"replication_id"`
	Remote                 *string     `json:"remote"`
	Username               *string     `json:"username,omitempty"`
	Password               *string     `json:"password,omitempty"`
	Direction              *string     `json:"direction"`
	ConflictResolutionType *string     `json:"conflict_resolution_type,omitempty"`
	ConflictResolutionFn   *string     `json:"custom_conflict_resolver,omitempty"`
	PurgeOnRemoval         *bool       `json:"purge_on_removal,omitempty"`
	DeltaSyncEnabled       *bool       `json:"enable_delta_sync,omitempty"`
	MaxBackoff             *int        `json:"max_backoff_time,omitempty"`
	State                  *string     `json:"state,omitempty"`
	Continuous             *bool       `json:"continuous"`
	Filter                 *string     `json:"filter,omitempty"`
	QueryParams            interface{} `json:"query_params,omitempty"`
	Cancel                 *bool       `json:"cancel,omitempty"`
	Adhoc                  *bool       `json:"adhoc,omitempty"`
	BatchSize              *int        `json:"batch_size,omitempty"`
	SGR1CheckpointID       *string     `json:"sgr1_checkpoint_id,omitempty"`
}

ReplicationUpsertConfig is used for operations that support upsert of a subset of replication properties.

type ReplicatorCompleteFunc

type ReplicatorCompleteFunc func()

type ResponseType

type ResponseType uint8
const (
	StringResponse ResponseType = iota
	JSObjectResponse
)

type RevInfo

type RevInfo struct {
	ID       string
	Parent   string
	Deleted  bool
	Body     []byte // Used when revision body stored inline (stores bodies)
	BodyKey  string // Used when revision body stored externally (doc key used for external storage)
	Channels base.Set
	// contains filtered or unexported fields
}

Information about a single revision.

func (RevInfo) IsRoot

func (rev RevInfo) IsRoot() bool

func (RevInfo) ParentGenGTENodeGen

func (node RevInfo) ParentGenGTENodeGen() bool

Detect situations like:

node: &{ID:10-684759c169c75629d02b90fe10b56925 Parent:184-a6b3f72a2bc1f988bfb720fec8db3a1d Deleted:fa...

where the parent generation is *higher* than the node generation, which is never a valid scenario. Likewise, detect situations where the parent generation is equal to the node generation, which is also invalid.

type RevKey

type RevKey string

type RevLoaderFunc

type RevLoaderFunc func(key string) ([]byte, error)

RevLoaderFunc and RevWriterFunc manage persistence of non-winning revision bodies that are stored outside the document.

type RevMessage

type RevMessage struct {
	*blip.Message
}

Rev message

func NewRevMessage

func NewRevMessage() *RevMessage

func (*RevMessage) Deleted

func (rm *RevMessage) Deleted() bool

func (*RevMessage) DeltaSrc

func (rm *RevMessage) DeltaSrc() (deltaSrc string, found bool)

func (*RevMessage) HasDeletedProperty

func (rm *RevMessage) HasDeletedProperty() bool

func (*RevMessage) ID

func (rm *RevMessage) ID() (id string, found bool)

func (*RevMessage) Rev

func (rm *RevMessage) Rev() (rev string, found bool)

func (*RevMessage) Sequence

func (rm *RevMessage) Sequence() (sequence string, found bool)

func (*RevMessage) SetID

func (rm *RevMessage) SetID(id string)

func (*RevMessage) SetNoConflicts

func (rm *RevMessage) SetNoConflicts(noConflicts bool)

func (*RevMessage) SetProperties

func (rm *RevMessage) SetProperties(properties blip.Properties)

setProperties will add the given properties to the blip message, overwriting any that already exist.

func (*RevMessage) SetRev

func (rm *RevMessage) SetRev(rev string)

func (*RevMessage) String

func (rm *RevMessage) String() string

type RevTree

type RevTree map[string]*RevInfo

A revision tree maps each revision ID to its RevInfo.

func (RevTree) ContainsCycles

func (tree RevTree) ContainsCycles() bool

func (RevTree) DeleteBranch

func (tree RevTree) DeleteBranch(node *RevInfo) (pruned int)

func (RevTree) FindLongestTombstonedBranch

func (tree RevTree) FindLongestTombstonedBranch() (generation int)

Find the generation of the longest deleted branch. For example in this rev tree:

http://cbmobile-bucket.s3.amazonaws.com/diagrams/example-sync-gateway-revtrees/four_branches_two_tombstoned.png

The longest deleted branch has a generation of 10

func (RevTree) FindLongestTombstonedBranchFromLeaves

func (tree RevTree) FindLongestTombstonedBranchFromLeaves(leaves []string) (generation int)

func (RevTree) FindShortestNonTombstonedBranch

func (tree RevTree) FindShortestNonTombstonedBranch() (generation int, found bool)

Find the minimum generation that has a non-deleted leaf. For example in this rev tree:

http://cbmobile-bucket.s3.amazonaws.com/diagrams/example-sync-gateway-revtrees/three_branches.png

The minimim generation that has a non-deleted leaf is "7-non-winning unresolved"

func (RevTree) FindShortestNonTombstonedBranchFromLeaves

func (tree RevTree) FindShortestNonTombstonedBranchFromLeaves(leaves []string) (generation int, found bool)

func (RevTree) GetLeaves

func (tree RevTree) GetLeaves() []string

Returns the leaf revision IDs (those that have no children.)

func (RevTree) GetLeavesFiltered

func (tree RevTree) GetLeavesFiltered(filter func(revId string) bool) []string

func (RevTree) MarshalJSON

func (tree RevTree) MarshalJSON() ([]byte, error)

func (RevTree) RenderGraphvizDot

func (tree RevTree) RenderGraphvizDot() string

Render the RevTree in Graphviz Dot format, which can then be used to generate a PNG diagram like http://cbmobile-bucket.s3.amazonaws.com/diagrams/example-sync-gateway-revtrees/three_branches.png using the command: dot -Tpng revtree.dot > revtree.png or an online tool such as webgraphviz.com

func (RevTree) RepairCycles

func (tree RevTree) RepairCycles() (err error)

Repair rev trees that have cycles introduced by SG Issue #2847

func (RevTree) UnmarshalJSON

func (tree RevTree) UnmarshalJSON(inputjson []byte) (err error)

type RevisionCache

type RevisionCache interface {
	// Get returns the given revision, and stores if not already cached.
	// When includeBody=true, the returned DocumentRevision will include a mutable shallow copy of the marshaled body.
	// When includeDelta=true, the returned DocumentRevision will include delta - requires additional locking during retrieval.
	Get(docID, revID string, includeBody bool, includeDelta bool) (DocumentRevision, error)

	// GetActive returns the current revision for the given doc ID, and stores if not already cached.
	// When includeBody=true, the returned DocumentRevision will include a mutable shallow copy of the marshaled body.
	GetActive(docID string, includeBody bool) (docRev DocumentRevision, err error)

	// Peek returns the given revision if present in the cache
	Peek(docID, revID string) (docRev DocumentRevision, found bool)

	// Put will store the given docRev in the cache
	Put(docRev DocumentRevision)

	// UpdateDelta stores the given toDelta value in the given rev if cached
	UpdateDelta(docID, revID string, toDelta RevisionDelta)
}

RevisionCache is an interface that can be used to fetch a DocumentRevision for a Doc ID and Rev ID pair.

func NewRevisionCache

func NewRevisionCache(cacheOptions *RevisionCacheOptions, backingStore RevisionCacheBackingStore, statsCache *expvar.Map) RevisionCache

NewRevisionCache returns a RevisionCache implementation for the given config options.

type RevisionCacheBackingStore

type RevisionCacheBackingStore interface {
	GetDocument(docid string, unmarshalLevel DocumentUnmarshalLevel) (doc *Document, err error)
	// contains filtered or unexported methods
}

RevisionCacheBackingStore is the interface required to be passed into a RevisionCache constructor to provide a backing store for loading documents.

type RevisionCacheOptions

type RevisionCacheOptions struct {
	Size       uint32
	ShardCount uint16
}

func DefaultRevisionCacheOptions

func DefaultRevisionCacheOptions() *RevisionCacheOptions

type RevisionDelta

type RevisionDelta struct {
	ToRevID           string   // Target revID for the delta
	DeltaBytes        []byte   // The actual delta
	AttachmentDigests []string // Digests for all attachments present on ToRevID
	ToChannels        base.Set // Full list of channels for the to revision
	RevisionHistory   []string // Revision history from parent of ToRevID to source revID, in descending order
	ToDeleted         bool     // Flag if ToRevID is a tombstone
}

RevisionDelta stores data about a delta between a revision and ToRevID.

type Revisions

type Revisions map[string]interface{}

A revisions property found within a Body. Expected to be of the form:

Revisions["start"]: int64, starting generation number
Revisions["ids"]: []string, list of digests

Used as map[string]interface{} instead of Revisions struct because it's unmarshalled along with Body, and we don't need the overhead of allocating a new object

func (Revisions) ParseRevisions

func (revisions Revisions) ParseRevisions() []string

ParseRevisions returns revisions as a slice of revids.

func (Revisions) ShallowCopy

func (revisions Revisions) ShallowCopy() Revisions

type SGIndex

type SGIndex struct {
	// contains filtered or unexported fields
}

SGIndex is used to manage the set of constants associated with each index definition

type SGIndexFlags

type SGIndexFlags uint8

type SGIndexType

type SGIndexType int
const (
	IndexAccess SGIndexType = iota
	IndexRoleAccess
	IndexChannels
	IndexAllDocs
	IndexTombstones
	IndexSyncDocs
)

type SGNode

type SGNode struct {
	UUID string `json:"uuid"` // Node UUID
	Host string `json:"host"` // Host name
}

SGNode represents a single Sync Gateway node in the cluster

func NewSGNode

func NewSGNode(uuid string, host string) *SGNode

type SGQuery

type SGQuery struct {
	// contains filtered or unexported fields
}

type SGR2Checkpoint

type SGR2Checkpoint struct {
	RevID string // The RevID of the checkpoint.
	Body  Body   // The checkpoint body
}

type SGRCluster

type SGRCluster struct {
	Replications map[string]*ReplicationCfg `json:"replications"` // Set of replications defined for the cluster, indexed by replicationID
	Nodes        map[string]*SGNode         `json:"nodes"`        // Set of nodes, indexed by host name
	// contains filtered or unexported fields
}

SGRCluster defines sg-replicate configuration and distribution for a collection of Sync Gateway nodes

func NewSGRCluster

func NewSGRCluster() *SGRCluster

func (*SGRCluster) GetReplicationIDsForNode

func (c *SGRCluster) GetReplicationIDsForNode(nodeUUID string) (replicationIDs []string)

func (*SGRCluster) RebalanceReplications

func (c *SGRCluster) RebalanceReplications()

RebalanceReplications distributes the set of defined replications across the set of available nodes

type SGReplicateOptions

type SGReplicateOptions struct {
	Enabled               bool          // Whether this node can be assigned sg-replicate replications
	WebsocketPingInterval time.Duration // BLIP Websocket Ping interval (for active replicators)
}

type SequenceID

type SequenceID struct {
	TriggeredBy uint64 // Int sequence: The sequence # that triggered this (0 if none)
	LowSeq      uint64 // Int sequence: Lowest contiguous sequence seen on the feed
	Seq         uint64 // Int sequence: The actual internal sequence
}

SequenceID doesn't do any clock hash management - it's expected that hashing has already been done (if required) when the clock is set.

func (SequenceID) Before

func (s SequenceID) Before(s2 SequenceID) bool

The most significant value is TriggeredBy, unless it's zero, in which case use Seq. The tricky part is that "n" sorts after "n:m" for any nonzero m

func (SequenceID) Equals

func (s SequenceID) Equals(s2 SequenceID) bool

Equality of sequences, based on seq, triggered by and low hash

func (SequenceID) IsNonZero

func (s SequenceID) IsNonZero() bool

func (SequenceID) MarshalJSON

func (s SequenceID) MarshalJSON() ([]byte, error)

func (SequenceID) SafeSequence

func (s SequenceID) SafeSequence() uint64

func (SequenceID) String

func (s SequenceID) String() string

Format sequence ID to send to clients. Sequence IDs can be in one of the following formats:

Seq                    - simple sequence
TriggeredBy:Seq        - when TriggeredBy is non-zero, LowSeq is zero
LowSeq:TriggeredBy:Seq - when LowSeq is non-zero.

When LowSeq is non-zero but TriggeredBy is zero, will appear as LowSeq::Seq. When LowSeq is non-zero but is greater than s.Seq (occurs when sending previously skipped sequences), ignore LowSeq.

func (*SequenceID) UnmarshalJSON

func (s *SequenceID) UnmarshalJSON(data []byte) error

type SequenceIDParser

type SequenceIDParser func(since string) (SequenceID, error)

Function signature for something that parses a sequence id from a string

type SetCheckpointMessage

type SetCheckpointMessage struct {
	*blip.Message
}

setCheckpoint message

func NewSetCheckpointMessage

func NewSetCheckpointMessage() *SetCheckpointMessage

func (*SetCheckpointMessage) SetClient

func (scm *SetCheckpointMessage) SetClient(client string)

func (*SetCheckpointMessage) SetRev

func (scm *SetCheckpointMessage) SetRev(rev string)

func (*SetCheckpointMessage) String

func (scm *SetCheckpointMessage) String() string

type SetCheckpointResponse

type SetCheckpointResponse struct {
	*blip.Message
}

func (*SetCheckpointResponse) Rev

func (scr *SetCheckpointResponse) Rev() (rev string)

type SetSGR2CheckpointRequest

type SetSGR2CheckpointRequest struct {
	Client     string  // Client is the unique ID of client checkpoint to retrieve
	RevID      *string // RevID of the previous checkpoint, if known.
	Checkpoint Body    // Checkpoint is the actual checkpoint body we're sending.
	// contains filtered or unexported fields
}

SetSGR2CheckpointRequest is a strongly typed 'setCheckpoint' request for SG-Replicate 2.

func (*SetSGR2CheckpointRequest) Response

func (*SetSGR2CheckpointRequest) Send

type SetSGR2CheckpointResponse

type SetSGR2CheckpointResponse struct {
	RevID string // The RevID of the sent checkpoint.
}

type ShardedLRURevisionCache

type ShardedLRURevisionCache struct {
	// contains filtered or unexported fields
}

func NewShardedLRURevisionCache

func NewShardedLRURevisionCache(shardCount uint16, capacity uint32, backingStore RevisionCacheBackingStore, cacheHitStat, cacheMissStat *expvar.Int) *ShardedLRURevisionCache

Creates a sharded revision cache with the given capacity and an optional loader function.

func (*ShardedLRURevisionCache) Get

func (sc *ShardedLRURevisionCache) Get(docID, revID string, includeBody bool, includeDelta bool) (docRev DocumentRevision, err error)

func (*ShardedLRURevisionCache) GetActive

func (sc *ShardedLRURevisionCache) GetActive(docID string, includeBody bool) (docRev DocumentRevision, err error)

func (*ShardedLRURevisionCache) Peek

func (sc *ShardedLRURevisionCache) Peek(docID, revID string) (docRev DocumentRevision, found bool)

func (*ShardedLRURevisionCache) Put

func (sc *ShardedLRURevisionCache) Put(docRev DocumentRevision)

func (*ShardedLRURevisionCache) UpdateDelta

func (sc *ShardedLRURevisionCache) UpdateDelta(docID, revID string, toDelta RevisionDelta)

type SingleChannelCache

type SingleChannelCache interface {
	GetChanges(options ChangesOptions) ([]*LogEntry, error)
	GetCachedChanges(options ChangesOptions) (validFrom uint64, result []*LogEntry)
	ChannelName() string
	SupportsLateFeed() bool
	LateSequenceUUID() uuid.UUID
	GetLateSequencesSince(sinceSequence uint64) (entries []*LogEntry, lastSequence uint64, err error)
	RegisterLateSequenceClient() (latestLateSeq uint64)
	ReleaseLateSequenceClient(sequence uint64) (success bool)
}

Minimizes need to perform GSI/View queries to respond to the changes feed by keeping a per-channel cache of the most recent changes in the channel. Changes might be received over DCP won't necessarily be in sequence order, but the changes are buffered and re-ordered before inserting into the cache, and so the cache is always kept in ascending order by sequence. (this is the global sync sequence, not the per-vbucket sequence).

Uniqueness guarantee: a given document should only have _one_ entry in the cache, which represents the most recent revision (the revision with the highest sequence number).

Completeness guarantee: the cache is guaranteed to have _every_ change in a channel from the validFrom sequence up to the current known latest change in that channel. Eg, there are no gaps/holes.

The validFrom state variable tracks the oldest sequence for which the cache is complete. This may be earlier than the oldest entry in the cache. Used to determine whether a GSI/View query is required for a given getChanges request.

Shortly after startup and receiving a few changes from DCP, the cache might look something like this:

┌───────────────────────────────────────────────────────────────────────────────────────┐ │ Changes for Channel X (CBServer + Cache) │ │ ┌─────────────────────────────────────────────────┐ │ │ │ Cache Subset (MaxSize=3) │ │ │ │ │ │ │ │ ┌─────────┐ ┌─────────┐ │ │ │ │ │ Seq: 8 │ │ Seq: 25 │ │ │ │ │ ▲ │ Doc: A │ │ Doc: B │ │ │ │ │ │ │ Rev: 1 │ │ Rev: 4 │ │ │ │ │ │ └─────────┘ └─────────┘ │ │ │ │ │ │ │ │ ValidFrom = Seq 5 │ │ │ │ (_sync:Seq at Startup) │ │ │ │ │ │ │ └─────────────────────────────────────────────────┘ │ │ │ └───────────────────────────────────────────────────────────────────────────────────────┘

All changes for the channel currently fit in the cache. The validFrom is set to the value of the _sync:Seq that was read on startup.

Later after more data has been added that will fit in the cache, it will look more like this:

┌───────────────────────────────────────────────────────────────────────────────────────┐ │ Changes for Channel X (CBServer + Cache) │ │ ┌─────────────────────────────────────────────┐ │ │ │ Cache Subset (MaxSize=3) │ │ │ │ │ │ │ ┌─────────┐ ┌─────────┐ ┌─────────┐ │ ┌─────────┐┌─────────┐ ┌─────────┐ │ │ │ │ Seq: 8 │ │ Seq: 25 │ │Seq: ... │ │ │Seq: 6002││Seq: 7022│ │Seq: 7027│ │ │ │ │ Doc: A │ │ Doc: B │ │Doc: ... │ │ ▲ │ Doc: A ││ Doc: H │ │ Doc: M │ │ │ │ │ Rev: 1 │ │ Rev: 4 │ │Rev: ... │ │ │ │Rev: 345 ││ Rev: 4 │ │ Rev: 47 │ │ │ │ └─────────┘ └─────────┘ └─────────┘ │ │ └─────────┘└─────────┘ └─────────┘ │ │ │ │ │ │ │ │ ValidFrom = Seq 5989 (cache known │ │ │ │ to be complete + valid from seq) │ │ │ │ │ │ │ └─────────────────────────────────────────────┘ │ │ │ └───────────────────────────────────────────────────────────────────────────────────────┘

If a calling function wanted to get all of the changes for the channel since Seq 3000, they would be forced to issue a GSI/View backfill query for changes is in the channel between 3000 and 5989, since the cache is only validFrom the sequence 5989. In this case, none of those backfilled values will fit in the cache, which is already full, however if it did have capacity available, then those values would be _prepended_ to the front of the cache and the validFrom sequence would be lowered to account for the fact that it's now valid from a lower sequence value. Entries that violated the guarantees described above would possibly be discarded.

type SkippedSequence

type SkippedSequence struct {
	// contains filtered or unexported fields
}

type SkippedSequenceList

type SkippedSequenceList struct {
	// contains filtered or unexported fields
}

SkippedSequenceList stores the set of skipped sequences as an ordered list of *SkippedSequence with an associated map for sequence-based lookup.

func NewSkippedSequenceList

func NewSkippedSequenceList() *SkippedSequenceList

func (*SkippedSequenceList) Contains

func (l *SkippedSequenceList) Contains(x uint64) bool

Contains does a simple search to detect presence

func (*SkippedSequenceList) Push

func (l *SkippedSequenceList) Push(x *SkippedSequence) (err error)

Push sequence to the end of SkippedSequenceList. Validates sequence ordering in list.

func (*SkippedSequenceList) Remove

func (l *SkippedSequenceList) Remove(x uint64) error

Removes a single entry from the list.

func (*SkippedSequenceList) RemoveSequences

func (l *SkippedSequenceList) RemoveSequences(ctx context.Context, sequences []uint64) (removedCount int64)

type StableSequenceCallbackFunc

type StableSequenceCallbackFunc func() uint64

type StatWaiter

type StatWaiter struct {
	// contains filtered or unexported fields
}

func (*StatWaiter) Add

func (sw *StatWaiter) Add(count int)

func (*StatWaiter) AddAndWait

func (sw *StatWaiter) AddAndWait(count int)

func (*StatWaiter) Wait

func (sw *StatWaiter) Wait()

Wait uses backoff retry for up to ~27s

type SubChangesBody

type SubChangesBody struct {
	DocIDs []string `json:"docIDs"`
}

type SubChangesParams

type SubChangesParams struct {
	// contains filtered or unexported fields
}

Helper for handling BLIP subChanges requests. Supports Stringer() interface to log aspects of the request.

func NewSubChangesParams

func NewSubChangesParams(logCtx context.Context, rq *blip.Message, zeroSeq SequenceID, sequenceIDParser SequenceIDParser) (*SubChangesParams, error)

Create a new subChanges helper

func (*SubChangesParams) Since

func (s *SubChangesParams) Since() SequenceID

func (*SubChangesParams) String

func (s *SubChangesParams) String() string

Satisfy fmt.Stringer interface for dumping attributes of this subChanges request to logs

type SubChangesRequest

type SubChangesRequest struct {
	Continuous     bool     // Continuous can be set to true if the requester wants change notifications to be sent indefinitely (optional)
	Batch          uint16   // Batch controls the maximum number of changes to send in a single change message (optional)
	Since          string   // Since represents the latest sequence ID already known to the requester (optional)
	Filter         string   // Filter is the name of a filter function known to the recipient (optional)
	FilterChannels []string // FilterChannels are a set of channels used with a 'sync_gateway/bychannel' filter (optional)
	DocIDs         []string // DocIDs specifies which doc IDs the recipient should send changes for (optional)
	ActiveOnly     bool     // ActiveOnly is set to `true` if the requester doesn't want to be sent tombstones. (optional)
	// contains filtered or unexported fields
}

SubChangesRequest is a strongly typed 'subChanges' request.

func (*SubChangesRequest) Send

func (rq *SubChangesRequest) Send(s *blip.Sender) error

type SyncData

type SyncData struct {
	CurrentRev      string              `json:"rev"`
	NewestRev       string              `json:"new_rev,omitempty"` // Newest rev, if different from CurrentRev
	Flags           uint8               `json:"flags,omitempty"`
	Sequence        uint64              `json:"sequence,omitempty"`
	UnusedSequences []uint64            `json:"unused_sequences,omitempty"` // unused sequences due to update conflicts/CAS retry
	RecentSequences []uint64            `json:"recent_sequences,omitempty"` // recent sequences for this doc - used in server dedup handling
	History         RevTree             `json:"history"`
	Channels        channels.ChannelMap `json:"channels,omitempty"`
	Access          UserAccessMap       `json:"access,omitempty"`
	RoleAccess      UserAccessMap       `json:"role_access,omitempty"`
	Expiry          *time.Time          `json:"exp,omitempty"`           // Document expiry.  Information only - actual expiry/delete handling is done by bucket storage.  Needs to be pointer for omitempty to work (see https://github.com/golang/go/issues/4357)
	Cas             string              `json:"cas"`                     // String representation of a cas value, populated via macro expansion
	Crc32c          string              `json:"value_crc32c"`            // String representation of crc32c hash of doc body, populated via macro expansion
	TombstonedAt    int64               `json:"tombstoned_at,omitempty"` // Time the document was tombstoned.  Used for view compaction
	Attachments     AttachmentsMeta     `json:"attachments,omitempty"`

	// Only used for performance metrics:
	TimeSaved time.Time `json:"time_saved,omitempty"` // Timestamp of save.

	// Backward compatibility (the "deleted" field was, um, deleted in commit 4194f81, 2/17/14)
	Deleted_OLD bool `json:"deleted,omitempty"`
	// contains filtered or unexported fields
}

The sync-gateway metadata stored in the "_sync" property of a Couchbase document.

func UnmarshalDocumentSyncData

func UnmarshalDocumentSyncData(data []byte, needHistory bool) (*SyncData, error)

Unmarshals just a document's sync metadata from JSON data. (This is somewhat faster, if all you need is the sync data without the doc body.)

func UnmarshalDocumentSyncDataFromFeed

func UnmarshalDocumentSyncDataFromFeed(data []byte, dataType uint8, needHistory bool) (result *SyncData, rawBody []byte, rawXattr []byte, err error)

TODO: Using a pool of unmarshal workers may help prevent memory spikes under load

func (*SyncData) GetSyncCas

func (s *SyncData) GetSyncCas() uint64

Converts the string hex encoding that's stored in the sync metadata to a uint64 cas value

func (*SyncData) HasValidSyncData

func (doc *SyncData) HasValidSyncData() bool

func (*SyncData) HashRedact

func (sd *SyncData) HashRedact(salt string) SyncData

func (*SyncData) IsSGWrite

func (s *SyncData) IsSGWrite(cas uint64, rawBody []byte) (isSGWrite bool, crc32Match bool)

SyncData.IsSGWrite - used during feed-based import

type UnsupportedOptions

type UnsupportedOptions struct {
	UserViews                UserViewsOptions        `json:"user_views,omitempty"`                  // Config settings for user views
	OidcTestProvider         OidcTestProviderOptions `json:"oidc_test_provider,omitempty"`          // Config settings for OIDC Provider
	APIEndpoints             APIEndpoints            `json:"api_endpoints,omitempty"`               // Config settings for API endpoints
	WarningThresholds        WarningThresholds       `json:"warning_thresholds,omitempty"`          // Warning thresholds related to _sync size
	DisableCleanSkippedQuery bool                    `json:"disable_clean_skipped_query,omitempty"` // Clean skipped sequence processing bypasses final check
	OidcTlsSkipVerify        bool                    `json:"oidc_tls_skip_verify"`                  // Config option to enable self-signed certs for OIDC testing.
	SgrTlsSkipVerify         bool                    `json:"sgr_tls_skip_verify"`                   // Config option to enable self-signed certs for SG-Replicate testing.
}

type UserAccessMap

type UserAccessMap map[string]channels.TimedSet

Maps what users have access to what channels or roles, and when they got that access.

type UserViewsOptions

type UserViewsOptions struct {
	Enabled *bool `json:"enabled,omitempty"` // Whether pass-through view query is supported through public API
}

type ViewDoc

type ViewDoc struct {
	Json json.RawMessage // should be type 'document', but that fails to unmarshal correctly
}

type WarningThresholds

type WarningThresholds struct {
	XattrSize      *uint32 `json:"xattr_size_bytes,omitempty"`               // Number of bytes to be used as a threshold for xattr size limit warnings
	ChannelsPerDoc *uint32 `json:"channels_per_doc,omitempty"`               // Number of channels per document to be used as a threshold for channel count warnings
	GrantsPerDoc   *uint32 `json:"access_and_role_grants_per_doc,omitempty"` // Number of access and role grants per document to be used as a threshold for grant count warnings
}

type Webhook

type Webhook struct {
	AsyncEventHandler
	// contains filtered or unexported fields
}

Webhook is an implementation of EventHandler that sends an asynchronous HTTP POST

func NewWebhook

func NewWebhook(url string, filterFnString string, timeout *uint64) (*Webhook, error)

Creates a new webhook handler based on the url and filter function.

func (*Webhook) HandleEvent

func (wh *Webhook) HandleEvent(event Event) bool

Performs an HTTP POST to the url defined for the handler. If a filter function is defined, calls it to determine whether to POST. The payload for the POST is depends on the event type.

func (*Webhook) SanitizedUrl

func (wh *Webhook) SanitizedUrl() string

func (*Webhook) String

func (wh *Webhook) String() string

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL