base

package
v0.0.0-...-7d4eda5 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Aug 21, 2020 License: Apache-2.0 Imports: 54 Imported by: 0

Documentation

Index

Constants

View Source
const (
	TapFeedType = "tap"
	DcpFeedType = "dcp"
)
View Source
const (
	MaxConcurrentSingleOps = 1000 // Max 1000 concurrent single bucket ops
	MaxConcurrentBulkOps   = 35   // Max 35 concurrent bulk ops
	MaxConcurrentViewOps   = 100  // Max concurrent view ops
	MaxBulkBatchSize       = 100  // Maximum number of ops per bulk call

	// CRC-32 checksum represents the body hash of "Deleted" document.
	DeleteCrc32c = "0x00"
)
View Source
const (

	// The username of the special "GUEST" user
	GuestUsername = "GUEST"
	ISO8601Format = "2006-01-02T15:04:05.000Z07:00"

	// These settings are used when running unit tests against a live Couchbase Server to create/flush buckets
	DefaultCouchbaseAdministrator = "Administrator"
	DefaultCouchbasePassword      = "password"

	DefaultTestBucketname = "test_data_bucket"
	DefaultTestUsername   = DefaultTestBucketname
	DefaultTestPassword   = "password"

	DefaultTestIndexBucketname = "test_indexbucket"
	DefaultTestIndexUsername   = DefaultTestIndexBucketname
	DefaultTestIndexPassword   = DefaultTestPassword

	// Env variable to enable user to override the Couchbase Server URL used in tests
	TestEnvCouchbaseServerUrl = "SG_TEST_COUCHBASE_SERVER_URL"

	// Walrus by default, but can set to "Couchbase" to have it use http://localhost:8091
	TestEnvSyncGatewayBackingStore = "SG_TEST_BACKING_STORE"
	TestEnvBackingStoreCouchbase   = "Couchbase"

	// Don't use Xattrs by default, but provide the test runner a way to specify Xattr usage
	TestEnvSyncGatewayUseXattrs = "SG_TEST_USE_XATTRS"
	TestEnvSyncGatewayTrue      = "True"

	// Should the tests drop the GSI indexes?
	TestEnvSyncGatewayDropIndexes = "SG_TEST_DROP_INDEXES"

	// Should the tests disable the use of any GSI-related code?
	TestEnvSyncGatewayDisableGSI = "SG_TEST_DISABLE_GSI"

	// Don't use an auth handler by default, but provide a way to override
	TestEnvSyncGatewayUseAuthHandler = "SG_TEST_USE_AUTH_HANDLER"

	DefaultUseXattrs      = false // Whether Sync Gateway uses xattrs for metadata storage, if not specified in the config
	DefaultAllowConflicts = true  // Whether Sync Gateway allows revision conflicts, if not specified in the config

	DefaultDropIndexes = false // Whether Sync Gateway drops GSI indexes before each test while running in integration mode

	DefaultOldRevExpirySeconds = uint32(300)

	// Default value of _local document expiry
	DefaultLocalDocExpirySecs = uint32(60 * 60 * 24 * 90) //90 days in seconds

	DefaultViewQueryPageSize = 5000 // This must be greater than 1, or the code won't work due to windowing method

	// Until the sporadic integration tests failures in SG #3570 are fixed, should be GTE n1ql query timeout
	// to make it easier to identify root cause of test failures.
	DefaultWaitForSequence = time.Second * 30

	// Default the max number of idle connections per host to a relatively high number to avoid
	// excessive socket churn caused by opening short-lived connections and closing them after, which can cause
	// a high number of connections to end up in the TIME_WAIT state and exhaust system resources.  Since
	// GoCB is only connecting to a fixed set of Couchbase nodes, this number can be set relatively high and
	// still stay within a reasonable value.
	DefaultHttpMaxIdleConnsPerHost = "256"

	// This primarily depends on MaxIdleConnsPerHost as the limiting factor, but sets some upper limit just to avoid
	// being completely unlimited
	DefaultHttpMaxIdleConns = "64000"

	// Keep idle connections around for a maximimum of 90 seconds.  This is the same value used by the Go DefaultTransport.
	DefaultHttpIdleConnTimeoutMilliseconds = "90000"

	// Number of kv connections (pipelines) per Couchbase Server node
	DefaultGocbKvPoolSize = "2"

	//==== Sync Prefix Documents & Keys ====
	SyncPrefix = "_sync:"

	AttPrefix              = SyncPrefix + "att:"
	BackfillCompletePrefix = SyncPrefix + "backfill:complete:"
	BackfillPendingPrefix  = SyncPrefix + "backfill:pending:"
	DCPCheckpointPrefix    = SyncPrefix + "dcp_ck:"
	RepairBackup           = SyncPrefix + "repair:backup:"
	RepairDryRun           = SyncPrefix + "repair:dryrun:"
	RevBodyPrefix          = SyncPrefix + "rb:"
	RevPrefix              = SyncPrefix + "rev:"
	RolePrefix             = SyncPrefix + "role:"
	SessionPrefix          = SyncPrefix + "session:"
	SGCfgPrefix            = SyncPrefix + "cfg"
	SyncSeqPrefix          = SyncPrefix + "seq:"
	UserEmailPrefix        = SyncPrefix + "useremail:"
	UserPrefix             = SyncPrefix + "user:"
	UnusedSeqPrefix        = SyncPrefix + "unusedSeq:"
	UnusedSeqRangePrefix   = SyncPrefix + "unusedSeqs:"

	DCPBackfillSeqKey = SyncPrefix + "dcp_backfill"
	SyncDataKey       = SyncPrefix + "syncdata"
	SyncSeqKey        = SyncPrefix + "seq"

	SyncPropertyName = "_sync"
	SyncXattrName    = "_sync"

	SGRStatusPrefix = SyncPrefix + "sgrStatus:"

	// Prefix for transaction metadata documents
	TxnPrefix = "_txn:"

	// Replication filter constants
	ByChannelFilter = "sync_gateway/bychannel"
)
View Source
const (
	SyncFnErrorMissingRole          = "sg missing role"
	SyncFnErrorAdminRequired        = "sg admin required"
	SyncFnErrorWrongUser            = "sg wrong user"
	SyncFnErrorMissingChannelAccess = "sg missing channel access"
)
View Source
const (
	DestFullFeed destFeedType = iota
	DestShardedFeed
)
View Source
const (
	MemcachedDataTypeJSON = 1 << iota
	MemcachedDataTypeSnappy
	MemcachedDataTypeXattr
)

Memcached binary protocol datatype bit flags (https://github.com/couchbase/memcached/blob/master/docs/BinaryProtocol.md#data-types), used in MCRequest.DataType

View Source
const (
	PerDb          = "per_db"
	PerReplication = "per_replication"
	Global         = "global"
)
View Source
const (

	// StatsResourceUtilization
	StatKeyProcessCpuPercentUtilization   = "process_cpu_percent_utilization"
	StatKeyProcessMemoryResident          = "process_memory_resident"
	StatKeySystemMemoryTotal              = "system_memory_total"
	StatKeyPubNetworkInterfaceBytesSent   = "pub_net_bytes_sent"
	StatKeyPubNetworkInterfaceBytesRecv   = "pub_net_bytes_recv"
	StatKeyAdminNetworkInterfaceBytesSent = "admin_net_bytes_sent"
	StatKeyAdminNetworkInterfaceBytesRecv = "admin_net_bytes_recv"
	StatKeyNumGoroutines                  = "num_goroutines"
	StatKeyGoroutinesHighWatermark        = "goroutines_high_watermark"
	StatKeyGoMemstatsSys                  = "go_memstats_sys"
	StatKeyGoMemstatsHeapAlloc            = "go_memstats_heapalloc"
	StatKeyGoMemstatsHeapIdle             = "go_memstats_heapidle"
	StatKeyGoMemstatsHeapInUse            = "go_memstats_heapinuse"
	StatKeyGoMemstatsHeapReleased         = "go_memstats_heapreleased"
	StatKeyGoMemstatsStackInUse           = "go_memstats_stackinuse"
	StatKeyGoMemstatsStackSys             = "go_memstats_stacksys"
	StatKeyGoMemstatsPauseTotalNs         = "go_memstats_pausetotalns"
	StatKeyErrorCount                     = "error_count"
	StatKeyWarnCount                      = "warn_count"

	// StatsCache
	StatKeyRevisionCacheHits                   = "rev_cache_hits"
	StatKeyRevisionCacheMisses                 = "rev_cache_misses"
	StatKeyRevisionCacheBypass                 = "rev_cache_bypass"
	StatKeyChannelCacheHits                    = "chan_cache_hits"
	StatKeyChannelCacheMisses                  = "chan_cache_misses"
	StatKeyChannelCacheRevsActive              = "chan_cache_active_revs"
	StatKeyChannelCacheRevsTombstone           = "chan_cache_tombstone_revs"
	StatKeyChannelCacheRevsRemoval             = "chan_cache_removal_revs"
	StatKeyChannelCacheNumChannels             = "chan_cache_num_channels"
	StatKeyChannelCacheMaxEntries              = "chan_cache_max_entries"
	StatKeyChannelCachePendingQueries          = "chan_cache_pending_queries"
	StatKeyChannelCacheChannelsAdded           = "chan_cache_channels_added"
	StatKeyChannelCacheChannelsEvictedInactive = "chan_cache_channels_evicted_inactive"
	StatKeyChannelCacheChannelsEvictedNRU      = "chan_cache_channels_evicted_nru"
	StatKeyChannelCacheCompactCount            = "chan_cache_compact_count"
	StatKeyChannelCacheCompactTime             = "chan_cache_compact_time"
	StatKeyChannelCacheBypassCount             = "chan_cache_bypass_count"
	StatKeyActiveChannels                      = "num_active_channels"
	StatKeyNumSkippedSeqs                      = "num_skipped_seqs"
	StatKeyAbandonedSeqs                       = "abandoned_seqs"
	StatKeyHighSeqCached                       = "high_seq_cached"
	StatKeyHighSeqStable                       = "high_seq_stable"
	StatKeySkippedSeqLen                       = "skipped_seq_len"
	StatKeyPendingSeqLen                       = "pending_seq_len"

	// StatsDatabase
	StatKeySequenceGetCount        = "sequence_get_count"
	StatKeySequenceIncrCount       = "sequence_incr_count"
	StatKeySequenceReservedCount   = "sequence_reserved_count"
	StatKeySequenceAssignedCount   = "sequence_assigned_count"
	StatKeySequenceReleasedCount   = "sequence_released_count"
	StatKeyCrc32cMatchCount        = "crc32c_match_count"
	StatKeyNumReplicationsActive   = "num_replications_active"
	StatKeyNumReplicationsTotal    = "num_replications_total"
	StatKeyNumDocWrites            = "num_doc_writes"
	StatKeyNumTombstonesCompacted  = "num_tombstones_compacted"
	StatKeyDocWritesBytes          = "doc_writes_bytes"
	StatKeyDocWritesXattrBytes     = "doc_writes_xattr_bytes"
	StatKeyNumDocReadsRest         = "num_doc_reads_rest"
	StatKeyNumDocReadsBlip         = "num_doc_reads_blip"
	StatKeyDocWritesBytesBlip      = "doc_writes_bytes_blip"
	StatKeyDocReadsBytesBlip       = "doc_reads_bytes_blip"
	StatKeyWarnXattrSizeCount      = "warn_xattr_size_count"
	StatKeyWarnChannelsPerDocCount = "warn_channels_per_doc_count"
	StatKeyWarnGrantsPerDocCount   = "warn_grants_per_doc_count"
	StatKeyDcpReceivedCount        = "dcp_received_count"
	StatKeyHighSeqFeed             = "high_seq_feed"
	StatKeyDcpReceivedTime         = "dcp_received_time"
	StatKeyDcpCachingCount         = "dcp_caching_count"
	StatKeyDcpCachingTime          = "dcp_caching_time"
	StatKeyCachingDcpStats         = "cache_feed"
	StatKeyImportDcpStats          = "import_feed"

	// StatsDeltaSync
	StatKeyDeltasRequested           = "deltas_requested"
	StatKeyDeltasSent                = "deltas_sent"
	StatKeyDeltaPullReplicationCount = "delta_pull_replication_count"
	StatKeyDeltaCacheHits            = "delta_cache_hit"
	StatKeyDeltaCacheMisses          = "delta_cache_miss"
	StatKeyDeltaPushDocCount         = "delta_push_doc_count"

	// StatsSharedBucketImport
	StatKeyImportCount          = "import_count"
	StatKeyImportCancelCAS      = "import_cancel_cas"
	StatKeyImportErrorCount     = "import_error_count"
	StatKeyImportProcessingTime = "import_processing_time"
	StatKeyImportHighSeq        = "import_high_seq"
	StatKeyImportPartitions     = "import_partitions"

	// StatsCBLReplicationPush
	StatKeyDocPushCount        = "doc_push_count"
	StatKeyWriteProcessingTime = "write_processing_time"
	StatKeySyncFunctionTime    = "sync_function_time"
	StatKeySyncFunctionCount   = "sync_function_count"
	StatKeyProposeChangeTime   = "propose_change_time"
	StatKeyProposeChangeCount  = "propose_change_count"
	StatKeyAttachmentPushCount = "attachment_push_count"
	StatKeyAttachmentPushBytes = "attachment_push_bytes"
	StatKeyConflictWriteCount  = "conflict_write_count"

	// StatsCBLReplicationPull
	StatKeyPullReplicationsActiveOneShot    = "num_pull_repl_active_one_shot"
	StatKeyPullReplicationsActiveContinuous = "num_pull_repl_active_continuous"
	StatKeyPullReplicationsTotalOneShot     = "num_pull_repl_total_one_shot"
	StatKeyPullReplicationsTotalContinuous  = "num_pull_repl_total_continuous"
	StatKeyPullReplicationsSinceZero        = "num_pull_repl_since_zero"
	StatKeyPullReplicationsCaughtUp         = "num_pull_repl_caught_up"
	StatKeyRequestChangesCount              = "request_changes_count"
	StatKeyRequestChangesTime               = "request_changes_time"
	StatKeyRevSendCount                     = "rev_send_count"
	StatKeyRevSendLatency                   = "rev_send_latency"
	StatKeyRevProcessingTime                = "rev_processing_time"
	StatKeyMaxPending                       = "max_pending"
	StatKeyAttachmentPullCount              = "attachment_pull_count"
	StatKeyAttachmentPullBytes              = "attachment_pull_bytes"

	// StatsSecurity
	StatKeyNumDocsRejected  = "num_docs_rejected"
	StatKeyNumAccessErrors  = "num_access_errors"
	StatKeyAuthSuccessCount = "auth_success_count"
	StatKeyAuthFailedCount  = "auth_failed_count"
	StatKeyTotalAuthTime    = "total_auth_time"

	// StatsGsiViews
	// Gsi and View stat names are dynamically generated based on the following patterns
	StatKeyN1qlQueryCountExpvarFormat      = "%s_count"          // Query name
	StatKeyN1qlQueryErrorCountExpvarFormat = "%s_error_count"    // Query name
	StatKeyN1qlQueryTimeExpvarFormat       = "%s_time"           // Query name
	StatKeyViewQueryCountExpvarFormat      = "%s.%s_count"       // Design doc, view
	StatKeyViewQueryErrorCountExpvarFormat = "%s.%s_error_count" // Design doc, view
	StatKeyViewQueryTimeExpvarFormat       = "%s.%s_time"        // Design doc, view

	// StatsReplication (1.x)
	StatKeySgrActive                     = "sgr_active"
	StatKeySgrNumAttachmentsTransferred  = "sgr_num_attachments_transferred"
	StatKeySgrAttachmentBytesTransferred = "sgr_num_attachment_bytes_transferred"

	// StatsReplication (SGR 1.x and 2.x)
	StatKeySgrNumDocsPushed       = "sgr_num_docs_pushed"
	StatKeySgrNumDocsFailedToPush = "sgr_num_docs_failed_to_push"
	StatKeySgrDocsCheckedSent     = "sgr_docs_checked_sent"

	// StatsReplication (SGR 2.x)
	StatKeySgrNumAttachmentsPushed     = "sgr_num_attachments_pushed"
	StatKeySgrNumAttachmentBytesPushed = "sgr_num_attachment_bytes_pushed"
	StatKeySgrNumAttachmentsPulled     = "sgr_num_attachments_pulled"
	StatKeySgrNumAttachmentBytesPulled = "sgr_num_attachment_bytes_pulled"
	StatKeySgrPulledCount              = "sgr_num_docs_pulled"
	StatKeySgrPurgedCount              = "sgr_num_docs_purged"
	StatKeySgrFailedToPullCount        = "sgr_num_docs_failed_to_pull"
	StatKeySgrPushConflictCount        = "sgr_push_conflict_count"
	StatKeySgrPushRejectedCount        = "sgr_push_rejected_count"
	StatKeySgrDocsCheckedRecv          = "sgr_docs_checked_recv"
	StatKeySgrDeltaRecvCount           = "sgr_deltas_recv"
	StatKeySgrDeltaRequestedCount      = "sgr_deltas_requested"
	StatKeySgrPushDeltaSentCount       = "sgr_deltas_sent"
	StatKeySgrConflictResolvedLocal    = "sgr_conflict_resolved_local_count"
	StatKeySgrConflictResolvedRemote   = "sgr_conflict_resolved_remote_count"
	StatKeySgrConflictResolvedMerge    = "sgr_conflict_resolved_merge_count"
)
View Source
const (
	StatsGroupKeySyncGateway         = "syncgateway"
	StatsGroupKeyResourceUtilization = "resource_utilization"
	StatsGroupKeyCache               = "cache"
	StatsGroupKeyDatabase            = "database"
	StatsGroupKeyDeltaSync           = "delta_sync"
	StatsGroupKeySharedBucketImport  = "shared_bucket_import"
	StatsGroupKeyCblReplicationPush  = "cbl_replication_push"
	StatsGroupKeyCblReplicationPull  = "cbl_replication_pull"
	StatsGroupKeySecurity            = "security"
	StatsGroupKeyGsiViews            = "gsi_views"
	StatsGroupKeyReplications        = "replications"
)
View Source
const (
	// Default amount of time allowed to read request headers.
	// If ReadHeaderTimeout is not defined in JSON config file,
	// the value of DefaultReadHeaderTimeout is used.
	DefaultReadHeaderTimeout = 5 * time.Second

	// Default maximum amount of time to wait for the next request
	// when keep-alives are enabled. If IdleTimeout is not defined
	// in JSON config file, the value of DefaultIdleTimeout is used.
	DefaultIdleTimeout = 90 * time.Second
)
View Source
const (
	DefaultTestClusterUsername = DefaultCouchbaseAdministrator

	DefaultTestClusterPassword = DefaultCouchbasePassword
)
View Source
const (
	ViewQueryParamStale         = "stale"
	ViewQueryParamReduce        = "reduce"
	ViewQueryParamStartKey      = "startkey"
	ViewQueryParamEndKey        = "endkey"
	ViewQueryParamInclusiveEnd  = "inclusive_end"
	ViewQueryParamLimit         = "limit"
	ViewQueryParamIncludeDocs   = "include_docs" // Ignored -- see https://forums.couchbase.com/t/do-the-viewquery-options-omit-include-docs-on-purpose/12399
	ViewQueryParamDescending    = "descending"
	ViewQueryParamGroup         = "group"
	ViewQueryParamSkip          = "skip"
	ViewQueryParamGroupLevel    = "group_level"
	ViewQueryParamStartKeyDocId = "startkey_docid"
	ViewQueryParamEndKeyDocId   = "endkey_docid"
	ViewQueryParamKey           = "key"
	ViewQueryParamKeys          = "keys"
)
View Source
const BucketQueryToken = "$_bucket" // Token used for bucket name replacement in query statements
View Source
const CBGTIndexTypeSyncGatewayImport = "syncGateway-import-"
View Source
const DCPCachingFeedID = "SG"

DCP Feed IDs are used to build unique DCP identifiers

View Source
const DCPImportFeedID = "SGI"
View Source
const (
	DefaultAutoImport = false // Whether Sync Gateway should auto-import docs, if not specified in the config
)
View Source
const DefaultImportPartitions = 16
View Source
const (
	// EmptyDocument denotes an empty document in JSON form.
	EmptyDocument = `{}`
)
View Source
const (
	// If true, all HTTP request/response bodies will be logged.
	// Use this sparingly as it will probably dump sensitive information into the logs.
	EnableLogHTTPBodies = false
)
View Source
const GitBranch = ""
View Source
const GitCommit = ""
View Source
const GitDirty = ""
View Source
const GitProductName = ""

The git commit that was compiled. This will be filled in by the compiler.

View Source
const IndexStateDeferred = "deferred" // bucket state value, as returned by SELECT FROM system:indexes.  Index has been created but not built.
View Source
const IndexStateOnline = "online" // bucket state value, as returned by SELECT FROM system:indexes.  Index has been created and built.
View Source
const IndexStatePending = "pending" // bucket state value, as returned by SELECT FROM system:indexes.  Index has been created, build is in progress
View Source
const MaxQueryRetries = 30 // Maximum query retries on indexer error
View Source
const MemcachedDataTypeRaw = 0

Memcached datatype for raw (binary) document (non-flag)

View Source
const PrimaryIndexName = "#primary"
View Source
const ServerName = "@PRODUCT_NAME@" // DO NOT CHANGE; clients check this
View Source
const VersionBuildNumberString = "@PRODUCT_VERSION@" // Real string substituted by Jenkins build
View Source
const VersionCommitSHA = "@COMMIT_SHA@" // Real string substituted by Jenkins build
View Source
const VersionNumber = "2.8" // API/feature level

Variables

View Source
var (
	SyncFnAccessErrors = []string{
		HTTPErrorf(403, SyncFnErrorMissingRole).Error(),
		HTTPErrorf(403, SyncFnErrorAdminRequired).Error(),
		HTTPErrorf(403, SyncFnErrorWrongUser).Error(),
		HTTPErrorf(403, SyncFnErrorMissingChannelAccess).Error(),
	}

	// Default warning thresholds
	DefaultWarnThresholdXattrSize      = 0.9 * float64(couchbaseMaxSystemXattrSize)
	DefaultWarnThresholdChannelsPerDoc = uint32(50)
	DefaultWarnThresholdGrantsPerDoc   = uint32(50)

	// ErrUnknownField is marked as the cause of the error when trying to decode a JSON snippet with unknown fields
	ErrUnknownField = errors.New("unrecognized JSON field")
)
View Source
var (
	ErrRevTreeAddRevFailure  = &sgError{"Failure adding Rev to RevTree"}
	ErrImportCancelled       = &sgError{"Import cancelled"}
	ErrAlreadyImported       = &sgError{"Document already imported"}
	ErrImportCasFailure      = &sgError{"CAS failure during import"}
	ErrViewTimeoutError      = &sgError{"Timeout performing Query"}
	ErrImportCancelledFilter = &sgError{"Import cancelled based on import filter"}
	ErrDocumentMigrated      = &sgError{"Document migrated"}
	ErrFatalBucketConnection = &sgError{"Fatal error connecting to bucket"}
	ErrEmptyMetadata         = &sgError{"Empty Sync Gateway metadata"}
	ErrCasFailureShouldRetry = &sgError{"CAS failure should retry"}
	ErrIndexerError          = &sgError{"Indexer error"}
	ErrAlreadyExists         = &sgError{"Already exists"}
	ErrNotFound              = &sgError{"Not Found"}
	ErrUpdateCancel          = &sgError{"Cancel update"}
	ErrImportCancelledPurged = &sgError{"Import Cancelled Due to Purge"}
	ErrChannelFeed           = &sgError{"Error while building channel feed"}

	// ErrPartialViewErrors is returned if the view call contains any partial errors.
	// This is more of a warning, and inspecting ViewResult.Errors is required for detail.
	ErrPartialViewErrors = &sgError{"Partial errors in view"}

	// ErrEmptyDocument is returned when trying to insert a document with a null body.
	ErrEmptyDocument = &sgError{"Document body is empty"}
)
View Source
var (

	// Top level stats expvar map
	Stats *expvar.Map

	// Global Stats
	GlobalStats *expvar.Map

	// Per-database stats
	PerDbStats *expvar.Map

	// Per-replication (sg-replicate) stats
	PerReplicationStats *expvar.Map
)
View Source
var ErrCfgCasError = &cbgt.CfgCASError{}
View Source
var ErrDeltasNotSupported = fmt.Errorf("Deltas not supported in CE")

ErrDeltasNotSupported is returned when these functions are called in CE

View Source
var (
	ErrInvalidLogFilePath = errors.New("invalid log file path")
)
View Source
var ErrUnsetLogFilePath = errors.New("No log_file_path property specified in config, and --defaultLogFilePath command line flag was not set. Log files required for product support are not being generated.")

ErrUnsetLogFilePath is returned when no log_file_path, or --defaultLogFilePath fallback can be used.

View Source
var LongVersionString string

This includes build number; appears in the response of "GET /" and the initial log message

View Source
var ProductName string

Either comes from Gerrit (jenkins builds) or Git (dev builds)

View Source
var RedactMetadata = false

RedactMetadata is a global toggle for system data redaction.

View Source
var RedactSystemData = false

RedactSystemData is a global toggle for system data redaction.

View Source
var RedactUserData = false

RedactUserData is a global toggle for user data redaction.

View Source
var TestExternalRevStorage = false
View Source
var TimingExpvarsEnabled = false
View Source
var UseStdlibJSON bool

UseStdlibJSON if true, uses the stdlib JSON package. This variable is not thread-safe, and should be set only once on startup.

View Source
var VersionString string

This appears in the "Server:" header of HTTP responses. This should be changed only very cautiously, because Couchbase Lite parses the header value to determine whether it's talking to Sync Gateway (vs. CouchDB) and what version. This in turn determines what replication API features it will use.

Functions

func AddDbPathToCookie

func AddDbPathToCookie(rq *http.Request, cookie *http.Cookie)

Needed due to https://github.com/couchbase/sync_gateway/issues/1345

func BoolPtr

func BoolPtr(b bool) *bool

func CbsExpiryToTime

func CbsExpiryToTime(expiry uint32) time.Time

This function takes a CBS expiry and returns as a time

func ClogCallback

func ClogCallback(level, format string, v ...interface{}) string

************************************************************************** Implementation of callback for github.com/couchbase/clog.SetLoggerCallback

Our main library that uses clog is cbgt, so all logging goes to KeyDCP.
Note that although sg-replicate uses clog's log levels, sgreplicateLogFn
bypasses clog logging, and so won't end up in this callback.

**************************************************************************

func Consolef

func Consolef(logLevel LogLevel, logKey LogKey, format string, args ...interface{})

Consolef logs the given formatted string and args to the given log level and log key, as well as making sure the message is *always* logged to stdout.

func ContainsString

func ContainsString(s []string, e string) bool

func ConvertBackQuotedStrings

func ConvertBackQuotedStrings(data []byte) []byte

ConvertBackQuotedStrings sanitises a string containing `...`-delimited strings. - Converts the backquotes into double-quotes - Escapes literal backslashes, newlines or double-quotes with backslashes.

func ConvertJSONString

func ConvertJSONString(s string) string

Convert a JSON string, which has extra double quotes (eg, `"thing"`) into a normal string with the extra double quotes removed (eg "thing"). Normal strings will be returned as-is.

`"thing"` -> "thing" "thing" -> "thing"

func ConvertToEmptyInterfaceSlice

func ConvertToEmptyInterfaceSlice(i interface{}) (result []interface{}, err error)

func ConvertToJSONString

func ConvertToJSONString(s string) string

ConvertToJSONString takes a string, and returns a JSON string, with any illegal characters escaped.

func CopyDefaultBucketDatasourceOptions

func CopyDefaultBucketDatasourceOptions() *cbdatasource.BucketDataSourceOptions

CopyDefaultBucketDatasourceOptions makes a copy of cbdatasource.DefaultBucketDataSourceOptions. DeepCopyInefficient can't be used here due to function definitions present on BucketDataSourceOptions (ConnectBucket, etc)

func CouchHTTPErrorName

func CouchHTTPErrorName(status int) string

Returns the standard CouchDB error string for an HTTP error status. These are important for compatibility, as some REST APIs don't show numeric statuses, only these strings.

func CouchbaseURIToHttpURL

func CouchbaseURIToHttpURL(bucket Bucket, couchbaseUri string, connSpec *gocbconnstr.ConnSpec) (httpUrls []string, err error)

Convert a Bucket, or a Couchbase URI (eg, couchbase://host1,host2) to a list of HTTP URLs with ports (eg, ["http://host1:8091", "http://host2:8091"]) connSpec can be optionally passed in if available, to prevent unnecessary double-parsing of connstr Primary use case is for backwards compatibility with go-couchbase, cbdatasource, and CBGT. Supports secure URI's as well (couchbases://). Related CBGT ticket: https://issues.couchbase.com/browse/MB-25522

func CouchbaseUrlWithAuth

func CouchbaseUrlWithAuth(serverUrl, username, password, bucketname string) (string, error)

func Crc32cHash

func Crc32cHash(input []byte) uint32

func Crc32cHashString

func Crc32cHashString(input []byte) string

func CreateProperty

func CreateProperty(size int) (result string)

func Debugf

func Debugf(logKey LogKey, format string, args ...interface{})

Debugf logs the given formatted string and args to the debug log level with an optional log key.

func DebugfCtx

func DebugfCtx(ctx context.Context, logKey LogKey, format string, args ...interface{})

DebugfCtx logs the given formatted string and args to the debug log level with an optional log key.

func DeepCopyInefficient

func DeepCopyInefficient(dst interface{}, src interface{}) error

Make a deep copy from src into dst. Copied from https://github.com/getlantern/deepcopy, commit 7f45deb8130a0acc553242eb0e009e3f6f3d9ce3 (Apache 2 licensed)

func DefaultHTTPTransport

func DefaultHTTPTransport() *http.Transport

DefaultHTTPTransport returns a new HTTP Transport that copies values from http.DefaultTransport

func Diff

func Diff(old, new map[string]interface{}) (delta []byte, err error)

Diff is only implemented in EE, the CE stub always returns an error.

func DirExists

func DirExists(filename string) bool

func DisableTestLogging

func DisableTestLogging() (teardownFn func())

DisableTestLogging is an alias for SetUpTestLogging(LevelNone, KeyNone) This function will panic if called multiple times without running the teardownFn.

func DropAllBucketIndexes

func DropAllBucketIndexes(gocbBucket *CouchbaseBucketGoCB) error

Reset bucket state

func DurationToCbsExpiry

func DurationToCbsExpiry(ttl time.Duration) uint32

This is how Couchbase Server handles document expiration times

The actual value sent may either be Unix time (number of seconds since January 1, 1970, as a 32-bit value), or a number of seconds starting from current time. In the latter case, this number of seconds may not exceed 60*60*24*30 (number of seconds in 30 days); if the number sent by a client is larger than that, the server will consider it to be real Unix time value rather than an offset from current time.

This function takes a ttl as a Duration and returns an int formatted as required by CBS expiry processing

func ErrorAsHTTPStatus

func ErrorAsHTTPStatus(err error) (int, string)

Attempts to map an error to an HTTP status code and message. Defaults to 500 if it doesn't recognize the error. Returns 200 for a nil error.

func Errorf

func Errorf(format string, args ...interface{})

Errorf logs the given formatted string and args to the error log level and given log key.

func ErrorfCtx

func ErrorfCtx(ctx context.Context, format string, args ...interface{})

ErrorfCtx logs the given formatted string and args to the error log level and given log key.

func ExpvarFloatVal

func ExpvarFloatVal(val float64) *expvar.Float

Convert a float into an *expvar.Float

func ExpvarInt64Val

func ExpvarInt64Val(val int64) *expvar.Int

func ExpvarIntVal

func ExpvarIntVal(val int) *expvar.Int

Convert an int into an *expvar.Int

func ExpvarUInt64Val

func ExpvarUInt64Val(val uint64) *expvar.Int

func ExpvarVar2Int

func ExpvarVar2Int(expvarVar expvar.Var) int64

Convert an expvar.Var to an int64. Return 0 if the expvar var is nil.

func ExtractExpiryFromDCPMutation

func ExtractExpiryFromDCPMutation(rq *gomemcached.MCRequest) (expiry uint32)

TODO: temporary workaround until https://issues.couchbase.com/browse/MB-27026 is implemented

func FatalPanicHandler

func FatalPanicHandler()

func Fatalf

func Fatalf(format string, args ...interface{})

Fatalf logs the given formatted string and args to the error log level and given log key and then exits.

func FatalfCtx

func FatalfCtx(ctx context.Context, format string, args ...interface{})

FatalfCtx logs the given formatted string and args to the error log level and given log key and then exits.

func FileExists

func FileExists(filename string) bool

func FindPrimaryAddr

func FindPrimaryAddr() (net.IP, error)

FindPrimaryAddr returns the primary outbound IP of this machine. This is the same as find_primary_addr in sgcollect_info.

func FixJSONNumbers

func FixJSONNumbers(value interface{}) interface{}

This is a workaround for an incompatibility between Go's JSON marshaler and CouchDB. Go parses JSON numbers into float64 type, and then when it marshals float64 to JSON it uses scientific notation if the number is more than six digits long, even if it's an integer. However, CouchDB doesn't seem to like scientific notation and throws an exception. (See <https://issues.apache.org/jira/browse/COUCHDB-1670>) Thus, this function, which walks through a JSON-compatible object and converts float64 values to int64 when possible. NOTE: This function works on generic map[string]interface{}, but *not* on types based on it, like db.Body. Thus, db.Body has a special FixJSONNumbers method -- call that instead. TODO: In Go 1.1 we will be able to use a new option in the JSON parser that converts numbers to a special number type that preserves the exact formatting.

func FlushLogBuffers

func FlushLogBuffers()

FlushLogBuffers will cause all log collation buffers to be flushed to the output before returning.

func FormatBlipContextID

func FormatBlipContextID(contextID string) string

func GenerateDcpStreamName

func GenerateDcpStreamName(feedID string) (string, error)

Create a prefix that will be used to create the dcp stream name, which must be globally unique in order to avoid https://issues.couchbase.com/browse/MB-24237. It's also useful to have the Sync Gateway version number / commit for debugging purposes

func GenerateIndexName

func GenerateIndexName(dbName string) string

Given a dbName, generate a unique and length-constrained index name for CBGT to use as part of their DCP name.

func GenerateRandomID

func GenerateRandomID() string

GenerateRandomID returns a cryptographically-secure 128-bit random number encoded as a hex string.

func GenerateRandomSecret

func GenerateRandomSecret() string

GenerateRandomSecret returns a cryptographically-secure 160-bit random number encoded as a hex string.

func GetCallersName

func GetCallersName(depth int, includeLine bool) string

Returns a string identifying a function on the call stack. Use depth=1 for the caller of the function that calls GetCallersName, etc.

func GetCounter

func GetCounter(bucket Bucket, k string) (result uint64, err error)

GetCounter returns a uint64 result for the given counter key. If the given key is not found in the bucket, this function returns a result of zero.

func GetExpvarAsInt

func GetExpvarAsInt(mapName string, name string) (int, error)

Returns int representation of an expvar, given map name and key name

func GetExpvarAsString

func GetExpvarAsString(mapName string, name string) string

Returns string representation of an expvar, given map name and key name

func GetFeedType

func GetFeedType(bucket Bucket) (feedType string)

Returns mutation feed type for bucket. Will first return the feed type from the spec, when present. If not found, returns default feed type for bucket (DCP for any couchbase bucket, TAP otherwise)

func GetHttpClient

func GetHttpClient(insecureSkipVerify bool) *http.Client

GetHttpClient returns a new HTTP client with TLS certificate verification disabled when insecureSkipVerify is true and enabled otherwise.

func GetLogKeys

func GetLogKeys() map[string]bool

GetLogKeys returns log keys in a map

func GetRestrictedInt

func GetRestrictedInt(rawValue *uint64, defaultValue, minValue, maxValue uint64, allowZero bool) uint64

func GetRestrictedIntFromString

func GetRestrictedIntFromString(rawValue string, defaultValue, minValue, maxValue uint64, allowZero bool) uint64

func GetRestrictedIntQuery

func GetRestrictedIntQuery(values url.Values, query string, defaultValue, minValue, maxValue uint64, allowZero bool) uint64

GetRestrictedIntQuery returns the integer value of a URL query, restricted to a min and max value, but returning 0 if missing or unparseable. If allowZero is true, values coming in as zero will stay zero, instead of being set to the minValue.

func GetStatsVbSeqno

func GetStatsVbSeqno(stats map[string]map[string]string, maxVbno uint16, useAbsHighSeqNo bool) (uuids map[uint16]uint64, highSeqnos map[uint16]uint64, seqErr error)

func GoCBBucketMgmtEndpoint

func GoCBBucketMgmtEndpoint(bucket *gocb.Bucket) (url string, err error)

Get one of the management endpoints. It will be a string such as http://couchbase

func HexCasToUint64

func HexCasToUint64(cas string) uint64

func Infof

func Infof(logKey LogKey, format string, args ...interface{})

Infof logs the given formatted string and args to the info log level and given log key.

func InfofCtx

func InfofCtx(ctx context.Context, logKey LogKey, format string, args ...interface{})

InfofCtx logs the given formatted string and args to the info log level and given log key.

func InjectJSONProperties

func InjectJSONProperties(b []byte, kvPairs ...KVPair) (new []byte, err error)

InjectJSONProperties takes the given JSON byte slice, and for each KV pair, marshals the value and inserts into the returned byte slice under the given key, without modifying the given byte slice.

This has the potential to create duplicate keys, which whilst adhering to the spec, are ambiguous with how they get read... usually "last key wins" - although there is no standardized way of handling JSON with non-unique keys.

func InjectJSONPropertiesFromBytes

func InjectJSONPropertiesFromBytes(b []byte, kvPairs ...KVPairBytes) (new []byte, err error)

InjectJSONPropertiesFromBytes takes the given JSON byte slice, and for each KV pair, inserts into b under the given key.

This has the potential to create duplicate keys, which whilst adhering to the spec, are ambiguous with how they get read... usually "last key wins" - although there is no standardized way of handling JSON with non-unique keys.

func IntPtr

func IntPtr(i int) *int

func IsCasMismatch

func IsCasMismatch(err error) bool

func IsConnectionRefusedError

func IsConnectionRefusedError(err error) bool

IsConnectionRefusedError returns true if the given error is due to a connection being actively refused.

func IsDeltaError

func IsDeltaError(_ error) bool

IsDeltaError is only implemented in EE, the CE stub always returns false.

func IsDocNotFoundError

func IsDocNotFoundError(err error) bool

Returns true if an error is a doc-not-found error

func IsEnterpriseEdition

func IsEnterpriseEdition() bool

IsEnterpriseEdition returns true if this Sync Gateway node is enterprise edition. This can be used to restrict config options, etc. at runtime. This should not be used as a conditional around private/EE-only code, as CE builds will fail to compile. Use the build tag for conditional compilation instead.

func IsIndexNotFoundError

func IsIndexNotFoundError(err error) bool

Index not found errors (returned by DropIndex) don't have a specific N1QL error code - they are of the form:

[5000] GSI index testIndex_not_found not found.

Stuck with doing a string compare to differentiate between 'not found' and other errors

func IsIndexerRetryBuildError

func IsIndexerRetryBuildError(err error) bool

func IsIndexerRetryIndexError

func IsIndexerRetryIndexError(err error) bool

'IsIndexerRetry' type errors are of the form: error:[5000] GSI CreateIndex() - cause: Encountered transient error. Index creation will be retried in background. Error: Index testIndex_value will retry building in the background for reason: Bucket test_data_bucket In Recovery. error:[5000] GSI Drop() - cause: Fail to drop index on some indexer nodes. Error=Encountered error when dropping index: Indexer In Recovery. Drop index will be retried in background. error:[5000] BuildIndexes - cause: Build index fails. %vIndex testIndexDeferred will retry building in the background for reason: Build Already In Progress. Bucket test_data_bucket.

https://issues.couchbase.com/browse/MB-19358 is filed to request improved indexer error codes for these scenarios (and others)

func IsKeyNotFoundError

func IsKeyNotFoundError(bucket Bucket, err error) bool

func IsPowerOfTwo

func IsPowerOfTwo(n uint16) bool

func JSONMarshal

func JSONMarshal(v interface{}) ([]byte, error)

JSONMarshal returns the JSON encoding of v.

func JSONMarshalCanonical

func JSONMarshalCanonical(v interface{}) ([]byte, error)

JSONMarshalCanonical returns the canonical JSON encoding of v. Mostly notably: Ordered properties, in order to generate deterministic Rev IDs.

func JSONUnmarshal

func JSONUnmarshal(data []byte, v interface{}) error

JSONUnmarshal parses the JSON-encoded data and stores the result in the value pointed to by v.

func ListenAndServeHTTP

func ListenAndServeHTTP(addr string, connLimit int, certFile *string, keyFile *string, handler http.Handler,
	readTimeout *int, writeTimeout *int, readHeaderTimeout *int, idleTimeout *int, http2Enabled bool,
	tlsMinVersion uint16) error

This is like a combination of http.ListenAndServe and http.ListenAndServeTLS, which also uses ThrottledListen to limit the number of open HTTP connections.

func LogDebugEnabled

func LogDebugEnabled(logKey LogKey) bool

LogDebugEnabled returns true if either the console should log at debug level, or if the debugLogger is enabled.

func LogInfoEnabled

func LogInfoEnabled(logKey LogKey) bool

LogInfoEnabled returns true if either the console should log at info level, or if the infoLogger is enabled.

func LogSyncGatewayVersion

func LogSyncGatewayVersion()

LogSyncGatewayVersion will print the startup indicator and version number to ALL log outputs.

func LogTraceEnabled

func LogTraceEnabled(logKey LogKey) bool

LogTraceEnabled returns true if either the console should log at trace level, or if the traceLogger is enabled.

func MergeStringArrays

func MergeStringArrays(arrays ...[]string) (merged []string)

Concatenates and merges multiple string arrays into one, discarding all duplicates (including duplicates within a single array.) Ordering is preserved.

func MinInt

func MinInt(x, y int) int

func NewConsoleLogger

func NewConsoleLogger(config *ConsoleLoggerConfig) (*ConsoleLogger, []DeferredLogFn, error)

NewConsoleLogger returns a new ConsoleLogger from a config.

func NewCouchbaseHeartbeater

func NewCouchbaseHeartbeater(bucket Bucket, keyPrefix, nodeUUID string) (heartbeater *couchbaseHeartBeater, err error)

Create a new CouchbaseHeartbeater, passing in an authenticated bucket connection, the keyPrefix which will be prepended to the heartbeat doc keys, and the nodeUUID, which is an opaque identifier for the "thing" that is using this library. nodeUUID will be passed to listeners on stale node detection.

func NewDCPReceiver

func NewDCPReceiver(callback sgbucket.FeedEventCallbackFunc, bucket Bucket, maxVbNo uint16, persistCheckpoints bool, dbStats *expvar.Map, feedID string) (cbdatasource.Receiver, context.Context)

func NewDocumentBackedListener

func NewDocumentBackedListener(bucket Bucket, keyPrefix string) (*documentBackedListener, error)

func NewImportHeartbeatListener

func NewImportHeartbeatListener(cfg cbgt.Cfg, mgrVersion string) (*importHeartbeatListener, error)

func NewStatsResourceUtilization

func NewStatsResourceUtilization() *expvar.Map

func NewTaskID

func NewTaskID(contextID string, taskName string) string

func Panicf

func Panicf(format string, args ...interface{})

Panicf logs the given formatted string and args to the error log level and given log key and then panics.

func PanicfCtx

func PanicfCtx(ctx context.Context, format string, args ...interface{})

PanicfCtx logs the given formatted string and args to the error log level and given log key and then panics.

func Patch

func Patch(old *map[string]interface{}, delta map[string]interface{}) (err error)

Patch is only implemented in EE, the CE stub always returns an error.

func PrependContextID

func PrependContextID(contextID, format string, params ...interface{}) (newFormat string, newParams []interface{})

Prepend a context ID to each blip logging message. The contextID uniquely identifies the blip context, and is useful for grouping the blip connections in the log output.

func RecordStats

func RecordStats(statsJson string)

RecordStats writes the given stats JSON content to a stats log file, if enabled. The content passed in is expected to be a JSON dictionary.

func RedactBasicAuthURL

func RedactBasicAuthURL(url string) string

RedactBasicAuthURL returns the given string, with a redacted HTTP basic auth component.

func RedactBasicAuthURLPassword

func RedactBasicAuthURLPassword(url string) string

RedactBasicAuthURL returns the given string, with a redacted HTTP basic auth password component.

func ReflectExpiry

func ReflectExpiry(rawExpiry interface{}) (*uint32, error)

ReflectExpiry attempts to convert expiry from one of the following formats to a Couchbase Server expiry value:

  1. Numeric JSON values are converted to uint32 and returned as-is
  2. JSON numbers are converted to uint32 and returned as-is
  3. String JSON values that are numbers are converted to int32 and returned as-is
  4. String JSON values that are ISO-8601 dates are converted to UNIX time and returned
  5. Null JSON values return 0

func RemovePerDbStats

func RemovePerDbStats(dbName string)

Removes the per-database stats for this database by regenerating a new expvar map without that particular dbname

func ReplaceAll

func ReplaceAll(s, chars, new string) string

ReplaceAll returns a string with all of the given chars replaced by new

func ReplicationStats

func ReplicationStats(replicationID string) (stats *sgreplicate.ReplicationStats)

ReplicationStats returns replication stats for the given replication ID, or a new set if they do not already exist.

func ReplicationStatsMap

func ReplicationStatsMap(s *sgreplicate.ReplicationStats) *expvar.Map

ReplicationStatsMap returns an expvar.Map that contains references to stats in the given ReplicationStats

func RetryLoop

func RetryLoop(description string, worker RetryWorker, sleeper RetrySleeper) (error, interface{})

func RetryLoopCas

func RetryLoopCas(description string, worker RetryCasWorker, sleeper RetrySleeper) (error, uint64)

A version of RetryLoop that returns a strongly typed cas as uint64, to avoid interface conversion overhead for high throughput operations.

func RotateLogfiles

func RotateLogfiles() map[*FileLogger]error

RotateLogfiles rotates all active log files.

func SafeSlice

func SafeSlice(data []byte, from int, to int) ([]byte, error)

Retrieves a slice from a byte, but returns error (instead of panic) if range isn't contained by the slice

func SanitizeRequestURL

func SanitizeRequestURL(req *http.Request, cachedQueryValues *url.Values) string

SanitizeRequestURL will return a sanitised string of the URL by: - Tagging mux path variables. - Tagging query parameters. - Replacing sensitive data from the URL query string with ******. Have to use string replacement instead of writing directly to the Values URL object, as only the URL's raw query is mutable.

func SecondsToCbsExpiry

func SecondsToCbsExpiry(ttl int) uint32

This function takes a ttl in seconds and returns an int formatted as required by CBS expiry processing

func ServerUrlsWithAuth

func ServerUrlsWithAuth(urls []string, spec BucketSpec) (urlsWithAuth []string, err error)

Add auth credentials to the given urls, since CBGT cannot take auth handlers in certain API calls yet

func SetIfMax

func SetIfMax(expvarMap *expvar.Map, key string, val int64)

func SetRedaction

func SetRedaction(redactionLevel RedactionLevel)

func SetUpBenchmarkLogging

func SetUpBenchmarkLogging(logLevel LogLevel, logKeys ...LogKey) (teardownFn func())

SetUpBenchmarkLogging will set the given log level and key, and do log processing for that configuration, but discards the output, instead of writing it to console.

func SetUpTestLogging

func SetUpTestLogging(logLevel LogLevel, logKeys ...LogKey) (teardownFn func())

SetUpTestLogging will set the given log level and log keys, and return a function that can be deferred for teardown.

This function will panic if called multiple times without running the teardownFn.

To set multiple log keys, append as variadic arguments E.g. KeyCache,KeyDCP,KeySync

Usage:

teardownFn := SetUpTestLogging(LevelDebug, KeyCache,KeyDCP,KeySync)
defer teardownFn()

Shorthand style:

defer SetUpTestLogging(LevelDebug, KeyCache,KeyDCP,KeySync)()

func Sha1HashString

func Sha1HashString(str string, salt string) string

func SingleHostCouchbaseURIToHttpURL

func SingleHostCouchbaseURIToHttpURL(couchbaseUri string) (httpUrl string)

Special case for couchbaseUri strings that contain a single host with http:// or https:// schemes, possibly containing embedded basic auth. Needed since gocbconnstr.Parse() will remove embedded basic auth from URLS.

func SlowQueryLog

func SlowQueryLog(startTime time.Time, threshold time.Duration, messageFormat string, args ...interface{})

func SplitHostPort

func SplitHostPort(hostport string) (string, string, error)

func StartCbgtCbdatasourceFeed

func StartCbgtCbdatasourceFeed(bucket Bucket, spec BucketSpec, args sgbucket.FeedArguments, callback sgbucket.FeedEventCallbackFunc, dbStats *expvar.Map) error

This starts a cbdatasource powered DCP Feed using an entirely separate connection to Couchbase Server than anything the existing bucket is using, and it uses the go-couchbase cbdatasource DCP abstraction layer

func StartCbgtDCPFeed

func StartCbgtDCPFeed(bucket Bucket, spec BucketSpec, args sgbucket.FeedArguments, callback sgbucket.FeedEventCallbackFunc, dbStats *expvar.Map) error

This starts a cbdatasource powered DCP Feed using an entirely separate connection to Couchbase Server than anything the existing bucket is using, and it uses the go-couchbase cbdatasource DCP abstraction layer

func StartCbgtGocbFeed

func StartCbgtGocbFeed(bucket Bucket, spec BucketSpec, args sgbucket.FeedArguments, callback sgbucket.FeedEventCallbackFunc, dbStats *expvar.Map) error

This starts a cbdatasource powered DCP Feed using an entirely separate connection to Couchbase Server than anything the existing bucket is using, and it uses the go-couchbase cbdatasource DCP abstraction layer

func StartDCPFeed

func StartDCPFeed(bucket Bucket, spec BucketSpec, args sgbucket.FeedArguments, callback sgbucket.FeedEventCallbackFunc, dbStats *expvar.Map) error

This starts a cbdatasource powered DCP Feed using an entirely separate connection to Couchbase Server than anything the existing bucket is using, and it uses the go-couchbase cbdatasource DCP abstraction layer

func StatsResourceUtilization

func StatsResourceUtilization() *expvar.Map

func StringPrefix

func StringPrefix(s string, desiredSize int) string

Slice a string to be less than or equal to desiredSze

func StringPtr

func StringPtr(value string) *string

func StringSliceContains

func StringSliceContains(set []string, target string) bool

func StringSliceToN1QLArray

func StringSliceToN1QLArray(values []string, quote string) string

Converts to a format like `value1`,`value2` when quote=`

func SyncSourceFromURL

func SyncSourceFromURL(u *url.URL) string

* Returns a URL formatted string which excludes the path, query and fragment * This is used by _replicate to split the single URL passed in a CouchDB style * request into a source URL and a database name as used in sg_replicate

func TLSConfigForX509

func TLSConfigForX509(certpath, keypath, cacertpath string) (*tls.Config, error)

Returns a TLSConfig based on the specified certificate paths. If none are provided, returns tlsConfig with InsecureSkipVerify:true.

func TestClusterPassword

func TestClusterPassword() string

TestClusterPassword returns the configured cluster password.

func TestClusterUsername

func TestClusterUsername() string

TestClusterUsername returns the configured cluster username.

func TestUseCouchbaseServer

func TestUseCouchbaseServer() bool

Check the whether tests are being run with SG_TEST_BACKING_STORE=Couchbase

func TestUseXattrs

func TestUseXattrs() bool

Should Sync Gateway use XATTRS functionality when running unit tests?

func TestsDisableGSI

func TestsDisableGSI() bool

TestsDisableGSI returns true if tests should be forced to avoid any GSI-specific code.

func TestsShouldDropIndexes

func TestsShouldDropIndexes() bool

Should tests try to drop GSI indexes before flushing buckets? See SG #3422

func ThrottledListen

func ThrottledListen(protocol string, addr string, limit int) (net.Listener, error)

Equivalent to net.Listen except that the returned listener allows only a limited number of open connections at a time. When the limit is reached it will block until some are closed before accepting any more. If the 'limit' parameter is 0, there is no limit and the behavior is identical to net.Listen.

func ToArrayOfInterface

func ToArrayOfInterface(arrayOfString []string) []interface{}

func ToInt64

func ToInt64(value interface{}) (int64, bool)

func ToLogKey

func ToLogKey(keysStr []string) (logKeys LogKeyMask, warnings []DeferredLogFn)

ToLogKey takes a slice of case-sensitive log key names and will return a LogKeyMask bitfield and a slice of deferred log functions for any warnings that may occurr.

func Tracef

func Tracef(logKey LogKey, format string, args ...interface{})

Tracef logs the given formatted string and args to the trace log level with an optional log key.

func TracefCtx

func TracefCtx(ctx context.Context, logKey LogKey, format string, args ...interface{})

TracefCtx logs the given formatted string and args to the trace log level with an optional log key.

func TransformBucketCredentials

func TransformBucketCredentials(inputUsername, inputPassword, inputBucketname string) (username, password, bucketname string)

This transforms raw input bucket credentials (for example, from config), to input credentials expected by Couchbase server, based on a few rules

func Uint32Ptr

func Uint32Ptr(u uint32) *uint32

func UintPtr

func UintPtr(u uint) *uint

func UnitTestUrl

func UnitTestUrl() string

UnitTestUrl returns the configured test URL.

func UnitTestUrlIsWalrus

func UnitTestUrlIsWalrus() bool

UnitTestUrlIsWalrus returns true if we're running with a Walrus test URL.

func UpdateLogKeys

func UpdateLogKeys(keys map[string]bool, replace bool)

UpdateLogKeys updates the console's log keys from a map

func VBHash

func VBHash(key string, numVb int) uint32

VBHash finds the vbucket for the given key.

func ValidateUint32Expiry

func ValidateUint32Expiry(expiry int64) (*uint32, error)

func ValueToStringArray

func ValueToStringArray(value interface{}) ([]string, []interface{})

Convert string or array into a string array, otherwise return nil. If the input slice contains entries of mixed type, all string entries would be collected and returned as a slice and non-string entries as another.

func WaitForStat

func WaitForStat(getStatFunc func() int64, expected int64) (int64, bool)

WaitForStat will retry for up to 20 seconds until the result of getStatFunc is equal to the expected value.

func Warnf

func Warnf(format string, args ...interface{})

Warnf logs the given formatted string and args to the warn log level and given log key.

func WarnfCtx

func WarnfCtx(ctx context.Context, format string, args ...interface{})

WarnfCtx logs the given formatted string and args to the warn log level and given log key.

func WrapJSONUnknownFieldErr

func WrapJSONUnknownFieldErr(err error) error

WrapJSONUnknownFieldErr wraps JSON unknown field errors with ErrUnknownField for later checking via errors.Cause

func WriteHistogram

func WriteHistogram(expvarMap *expvar.Map, since time.Time, prefix string)

func WriteHistogramForDuration

func WriteHistogramForDuration(expvarMap *expvar.Map, duration time.Duration, prefix string)

Types

type AppendOnlyList

type AppendOnlyList struct {
	// contains filtered or unexported fields
}

AppendOnlyList is a doubly-linked list that only supports adding elements to the list using PushBack. Modelled on container/list, but modified to support concurrent iteration and PushBack calls.

func (*AppendOnlyList) Back

Back returns the last element of list l or nil if the list is empty.

func (*AppendOnlyList) Front

Front returns the first element of list l or nil if the list is empty.

func (*AppendOnlyList) Init

func (l *AppendOnlyList) Init() *AppendOnlyList

Init initializes or clears list l.

func (*AppendOnlyList) PushBack

func (l *AppendOnlyList) PushBack(key string, v interface{}) *AppendOnlyListElement

PushBack inserts a new element e with value v at the back of list l and returns e.

func (*AppendOnlyList) Remove

func (l *AppendOnlyList) Remove(e *AppendOnlyListElement) interface{}

Remove removes e from l if e is an element of list l. It returns the element value e.Value. The element must not be nil.

type AppendOnlyListElement

type AppendOnlyListElement struct {

	// The value stored with this element.
	Value interface{}
	// contains filtered or unexported fields
}

AppendOnlyListElement is an element of an AppendOnlyList.

func (*AppendOnlyListElement) Next

Next returns the next list element or nil.

func (*AppendOnlyListElement) Prev

Prev returns the previous list element or nil.

type AtomicBool

type AtomicBool struct {
	// contains filtered or unexported fields
}

AtomicBool is a bool that can be set or read atomically

func (*AtomicBool) CompareAndSwap

func (ab *AtomicBool) CompareAndSwap(old bool, new bool) bool

func (*AtomicBool) IsTrue

func (ab *AtomicBool) IsTrue() bool

func (*AtomicBool) Set

func (ab *AtomicBool) Set(flag bool)

type AuthHandler

type AuthHandler couchbase.AuthHandler

type BackfillSequences

type BackfillSequences struct {
	Seqs      []uint64
	SnapStart []uint64
	SnapEnd   []uint64
}

BackfillSequences defines the format used to persist snapshot information to the _sync:dcp_backfill document to support mid-snapshot restart

type BinaryDocument

type BinaryDocument []byte

BinaryDocument is type alias that allows SGTranscoder to differentiate between documents that are intended to be written as binary docs, versus json documents that are being sent as raw bytes Some additional context here: https://play.golang.org/p/p4fkKiZD59

type Bucket

type Bucket sgbucket.Bucket

TODO: unalias these and just pass around sgbucket.X everywhere

func GetBucket

func GetBucket(spec BucketSpec) (bucket Bucket, err error)

type BucketSpec

type BucketSpec struct {
	Server, PoolName, BucketName, FeedType string
	Auth                                   AuthHandler
	CouchbaseDriver                        CouchbaseDriver
	Certpath, Keypath, CACertPath          string         // X.509 auth parameters
	KvTLSPort                              int            // Port to use for memcached over TLS.  Required for cbdatasource auth when using TLS
	MaxNumRetries                          int            // max number of retries before giving up
	InitialRetrySleepTimeMS                int            // the initial time to sleep in between retry attempts (in millisecond), which will double each retry
	UseXattrs                              bool           // Whether to use xattrs to store _sync metadata.  Used during view initialization
	ViewQueryTimeoutSecs                   *uint32        // the view query timeout in seconds (default: 75 seconds)
	BucketOpTimeout                        *time.Duration // How long bucket ops should block returning "operation timed out". If nil, uses GoCB default.  GoCB buckets only.
	KvPoolSize                             int            // gocb kv_pool_size - number of pipelines per node. Initialized on GetGoCBConnString
}

Full specification of how to connect to a bucket

func (*BucketSpec) GetGoCBConnString

func (spec *BucketSpec) GetGoCBConnString() (string, error)

Builds a gocb connection string based on BucketSpec.Server. Adds idle connection configuration, and X.509 auth settings when certpath/keypath/cacertpath specified.

func (BucketSpec) GetPoolName

func (spec BucketSpec) GetPoolName() string

func (BucketSpec) GetViewQueryTimeout

func (b BucketSpec) GetViewQueryTimeout() time.Duration

func (BucketSpec) GetViewQueryTimeoutMs

func (b BucketSpec) GetViewQueryTimeoutMs() uint64

func (BucketSpec) IsTLS

func (spec BucketSpec) IsTLS() bool

func (BucketSpec) IsWalrusBucket

func (spec BucketSpec) IsWalrusBucket() bool

func (BucketSpec) MaxRetrySleeper

func (spec BucketSpec) MaxRetrySleeper(maxSleepMs int) RetrySleeper

func (BucketSpec) RetrySleeper

func (spec BucketSpec) RetrySleeper() RetrySleeper

Create a RetrySleeper based on the bucket spec properties. Used to retry bucket operations after transient errors.

func (BucketSpec) TLSConfig

func (b BucketSpec) TLSConfig() *tls.Config

func (BucketSpec) UseClientCert

func (spec BucketSpec) UseClientCert() bool

type CBGoUtilsLogger

type CBGoUtilsLogger struct{}

************************************************************** Implementation for github.com/couchbase/goutils/logging.Logger **************************************************************

func (CBGoUtilsLogger) Debugf

func (CBGoUtilsLogger) Debugf(fmt string, args ...interface{})

func (CBGoUtilsLogger) Debugm

func (CBGoUtilsLogger) Debugm(msg string, kv logging.Map)

func (CBGoUtilsLogger) Debugp

func (CBGoUtilsLogger) Debugp(msg string, kv ...logging.Pair)

func (CBGoUtilsLogger) Errorf

func (CBGoUtilsLogger) Errorf(fmt string, args ...interface{})

func (CBGoUtilsLogger) Errorm

func (CBGoUtilsLogger) Errorm(msg string, kv logging.Map)

func (CBGoUtilsLogger) Errorp

func (CBGoUtilsLogger) Errorp(msg string, kv ...logging.Pair)

func (CBGoUtilsLogger) Fatalf

func (CBGoUtilsLogger) Fatalf(fmt string, args ...interface{})

func (CBGoUtilsLogger) Fatalm

func (CBGoUtilsLogger) Fatalm(msg string, kv logging.Map)

go-couchbase/gomemcached don't use Pair/Map logs, so these are all stubs

func (CBGoUtilsLogger) Fatalp

func (CBGoUtilsLogger) Fatalp(msg string, kv ...logging.Pair)

func (CBGoUtilsLogger) Infof

func (CBGoUtilsLogger) Infof(fmt string, args ...interface{})

func (CBGoUtilsLogger) Infom

func (CBGoUtilsLogger) Infom(msg string, kv logging.Map)

func (CBGoUtilsLogger) Infop

func (CBGoUtilsLogger) Infop(msg string, kv ...logging.Pair)

func (CBGoUtilsLogger) Level

func (CBGoUtilsLogger) Level() logging.Level

func (CBGoUtilsLogger) Logf

func (CBGoUtilsLogger) Logf(level logging.Level, fmt string, args ...interface{})

func (CBGoUtilsLogger) Logm

func (CBGoUtilsLogger) Logm(level logging.Level, msg string, kv logging.Map)

func (CBGoUtilsLogger) Logp

func (CBGoUtilsLogger) Logp(level logging.Level, msg string, kv ...logging.Pair)

func (CBGoUtilsLogger) Requestf

func (CBGoUtilsLogger) Requestf(rlevel logging.Level, fmt string, args ...interface{})

func (CBGoUtilsLogger) Requestm

func (CBGoUtilsLogger) Requestm(rlevel logging.Level, msg string, kv logging.Map)

func (CBGoUtilsLogger) Requestp

func (CBGoUtilsLogger) Requestp(rlevel logging.Level, msg string, kv ...logging.Pair)

func (CBGoUtilsLogger) SetLevel

func (CBGoUtilsLogger) SetLevel(l logging.Level)

func (CBGoUtilsLogger) Severef

func (CBGoUtilsLogger) Severef(fmt string, args ...interface{})

func (CBGoUtilsLogger) Severem

func (CBGoUtilsLogger) Severem(msg string, kv logging.Map)

func (CBGoUtilsLogger) Severep

func (CBGoUtilsLogger) Severep(msg string, kv ...logging.Pair)

func (CBGoUtilsLogger) Tracef

func (CBGoUtilsLogger) Tracef(fmt string, args ...interface{})

func (CBGoUtilsLogger) Tracem

func (CBGoUtilsLogger) Tracem(msg string, kv logging.Map)

func (CBGoUtilsLogger) Tracep

func (CBGoUtilsLogger) Tracep(msg string, kv ...logging.Pair)

func (CBGoUtilsLogger) Warnf

func (CBGoUtilsLogger) Warnf(fmt string, args ...interface{})

func (CBGoUtilsLogger) Warnm

func (CBGoUtilsLogger) Warnm(msg string, kv logging.Map)

func (CBGoUtilsLogger) Warnp

func (CBGoUtilsLogger) Warnp(msg string, kv ...logging.Pair)

type CbgtContext

type CbgtContext struct {
	Manager *cbgt.Manager // Manager is main entry point for initialization, registering indexes
	Cfg     *CfgSG        // Cfg manages storage of the current pindex set and node assignment
	// contains filtered or unexported fields
}

CbgtContext holds the two handles we have for CBGT-related functionality.

func StartShardedDCPFeed

func StartShardedDCPFeed(dbName string, uuid string, heartbeater Heartbeater, bucket *CouchbaseBucketGoCB, numPartitions uint16, cfg *CfgSG) (*CbgtContext, error)

StartShardedDCPFeed initializes and starts a CBGT Manager targeting the provided bucket. dbName is used to define a unique path name for local file storage of pindex files

func (*CbgtContext) StartManager

func (c *CbgtContext) StartManager(dbName string, bucket Bucket, spec BucketSpec, numPartitions uint16) (err error)

StartManager registers this node with cbgt, and the janitor will start feeds on this node.

func (*CbgtContext) StopHeartbeatListener

func (c *CbgtContext) StopHeartbeatListener()

StopHeartbeatListener unregisters the listener from the heartbeater, and stops it.

type CfgSG

type CfgSG struct {
	// contains filtered or unexported fields
}

CfgSG is used to manage shared information between Sync Gateway nodes. It implements cbgt.Cfg for use with cbgt, but can be used for to manage any shared data. It uses Sync Gateway's existing bucket as a keystore, and existing caching feed for change notifications.

func NewCfgSG

func NewCfgSG(bucket Bucket) (*CfgSG, error)

NewCfgSG returns a Cfg implementation that reads/writes its entries from/to a couchbase bucket, using DCP streams to subscribe to changes.

urlStr: single URL or multiple URLs delimited by ';' bucket: couchbase bucket name

func (*CfgSG) Del

func (c *CfgSG) Del(cfgKey string, cas uint64) error

func (*CfgSG) FireEvent

func (c *CfgSG) FireEvent(docID string, cas uint64, err error)

func (*CfgSG) Get

func (c *CfgSG) Get(cfgKey string, cas uint64) (
	[]byte, uint64, error)

func (*CfgSG) Refresh

func (c *CfgSG) Refresh() error

func (*CfgSG) Set

func (c *CfgSG) Set(cfgKey string, val []byte, cas uint64) (uint64, error)

func (*CfgSG) Subscribe

func (c *CfgSG) Subscribe(cfgKey string, ch chan cbgt.CfgEvent) error

type ConsoleLogger

type ConsoleLogger struct {
	FileLogger

	LogLevel     *LogLevel
	LogKeyMask   *LogKeyMask
	ColorEnabled bool
	// contains filtered or unexported fields
}

ConsoleLogger is a file logger with a default output of stderr, and tunable log level/keys.

type ConsoleLoggerConfig

type ConsoleLoggerConfig struct {
	FileLoggerConfig

	LogLevel     *LogLevel `json:"log_level,omitempty"`     // Log Level for the console output
	LogKeys      []string  `json:"log_keys,omitempty"`      // Log Keys for the console output
	ColorEnabled *bool     `json:"color_enabled,omitempty"` // Log with color for the console output

	// FileOutput can be used to override the default stderr output, and write to the file specified instead.
	FileOutput string `json:"file_output,omitempty"`
}

type CouchbaseBucketGoCB

type CouchbaseBucketGoCB struct {
	*gocb.Bucket            // the underlying gocb bucket
	Spec         BucketSpec // keep a copy of the BucketSpec for DCP usage
	// contains filtered or unexported fields
}

Implementation of sgbucket.Bucket that talks to a Couchbase server and uses gocb

func AsGoCBBucket

func AsGoCBBucket(bucket Bucket) (*CouchbaseBucketGoCB, bool)

AsGoCBBucket tries to return the given bucket as a GoCBBucket.

func GetCouchbaseBucketGoCB

func GetCouchbaseBucketGoCB(spec BucketSpec) (bucket *CouchbaseBucketGoCB, err error)

Creates a Bucket that talks to a real live Couchbase server.

func GetCouchbaseBucketGoCBFromAuthenticatedCluster

func GetCouchbaseBucketGoCBFromAuthenticatedCluster(cluster *gocb.Cluster, spec BucketSpec, bucketPassword string) (bucket *CouchbaseBucketGoCB, err error)

func GetGoCBBucketFromBaseBucket

func GetGoCBBucketFromBaseBucket(baseBucket Bucket) (bucket CouchbaseBucketGoCB, err error)

func (*CouchbaseBucketGoCB) APIBucketItemCount

func (bucket *CouchbaseBucketGoCB) APIBucketItemCount() (itemCount int, err error)

Get the number of items in the bucket. GOCB doesn't currently offer a way to do this, and so this is a workaround to go directly to Couchbase Server REST API.

func (*CouchbaseBucketGoCB) Add

func (bucket *CouchbaseBucketGoCB) Add(k string, exp uint32, v interface{}) (added bool, err error)

func (*CouchbaseBucketGoCB) AddRaw

func (bucket *CouchbaseBucketGoCB) AddRaw(k string, exp uint32, v []byte) (added bool, err error)

GoCB AddRaw writes as BinaryDocument, which results in the document having the binary doc common flag set. Callers that want to write JSON documents as raw bytes should pass v as []byte to the stanard bucket.Add

func (*CouchbaseBucketGoCB) Append

func (bucket *CouchbaseBucketGoCB) Append(k string, data []byte) error

func (*CouchbaseBucketGoCB) BucketItemCount

func (bucket *CouchbaseBucketGoCB) BucketItemCount() (itemCount int, err error)

BucketItemCount first tries to retrieve an accurate bucket count via N1QL, but falls back to the REST API if that cannot be done (when there's no index to count all items in a bucket)

func (*CouchbaseBucketGoCB) BuildDeferredIndexes

func (bucket *CouchbaseBucketGoCB) BuildDeferredIndexes(indexSet []string) error

Issues a build command for any deferred sync gateway indexes associated with the bucket.

func (*CouchbaseBucketGoCB) Close

func (bucket *CouchbaseBucketGoCB) Close()

func (*CouchbaseBucketGoCB) CouchbaseServerVersion

func (bucket *CouchbaseBucketGoCB) CouchbaseServerVersion() (major uint64, minor uint64, micro string)

func (*CouchbaseBucketGoCB) CreateIndex

func (bucket *CouchbaseBucketGoCB) CreateIndex(indexName string, expression string, filterExpression string, options *N1qlIndexOptions) error

CreateIndex issues a CREATE INDEX query in the current bucket, using the form:

CREATE INDEX indexName ON bucket.Name(expression) WHERE filterExpression WITH options

Sample usage with resulting statement:

  CreateIndex("myIndex", "field1, field2, nested.field", "field1 > 0", N1qlIndexOptions{numReplica:1})
CREATE INDEX myIndex on myBucket(field1, field2, nested.field) WHERE field1 > 0 WITH {"numReplica":1}

func (*CouchbaseBucketGoCB) CreatePrimaryIndex

func (bucket *CouchbaseBucketGoCB) CreatePrimaryIndex(indexName string, options *N1qlIndexOptions) error

CreateIndex creates the specified index in the current bucket using on the specified index expression.

func (*CouchbaseBucketGoCB) Delete

func (bucket *CouchbaseBucketGoCB) Delete(k string) error

func (*CouchbaseBucketGoCB) DeleteDDoc

func (bucket *CouchbaseBucketGoCB) DeleteDDoc(docname string) error

func (*CouchbaseBucketGoCB) DeleteWithXattr

func (bucket *CouchbaseBucketGoCB) DeleteWithXattr(k string, xattrKey string) error

Delete a document and it's associated named xattr. Couchbase server will preserve system xattrs as part of the (CBS) tombstone when a document is deleted. To remove the system xattr as well, an explicit subdoc delete operation is required. This is currently called only for Purge operations.

The doc existing doc is expected to be in one of the following states:

  • DocExists and XattrExists
  • DocExists but NoXattr
  • XattrExists but NoDoc
  • NoDoc and NoXattr

In all cases, the end state will be NoDoc and NoXattr. Expected errors:

  • Temporary server overloaded errors, in which case the caller should retry
  • If the doc is in the the NoDoc and NoXattr state, it will return a KeyNotFound error

func (*CouchbaseBucketGoCB) DropIndex

func (bucket *CouchbaseBucketGoCB) DropIndex(indexName string) error

CreateIndex drops the specified index from the current bucket.

func (*CouchbaseBucketGoCB) Dump

func (bucket *CouchbaseBucketGoCB) Dump()

func (*CouchbaseBucketGoCB) ExplainQuery

func (bucket *CouchbaseBucketGoCB) ExplainQuery(statement string, params interface{}) (plan map[string]interface{}, err error)

func (*CouchbaseBucketGoCB) Flush

func (bucket *CouchbaseBucketGoCB) Flush() error

This flushes the bucket.

func (*CouchbaseBucketGoCB) FormatBinaryDocument

func (bucket *CouchbaseBucketGoCB) FormatBinaryDocument(input []byte) interface{}

Formats binary document to the style expected by the transcoder. GoCBCustomSGTranscoder expects binary documents to be wrapped in BinaryDocument (this supports writing JSON as raw bytes). The default goCB transcoder doesn't require additional formatting (assumes all incoming []byte should be stored as binary docs.)

func (*CouchbaseBucketGoCB) Get

func (bucket *CouchbaseBucketGoCB) Get(k string, rv interface{}) (cas uint64, err error)

func (*CouchbaseBucketGoCB) GetAndTouchRaw

func (bucket *CouchbaseBucketGoCB) GetAndTouchRaw(k string, exp uint32) (rv []byte, cas uint64, err error)

func (*CouchbaseBucketGoCB) GetBucketCredentials

func (bucket *CouchbaseBucketGoCB) GetBucketCredentials() (username, password string)

func (*CouchbaseBucketGoCB) GetBulkCounters

func (bucket *CouchbaseBucketGoCB) GetBulkCounters(keys []string) (map[string]uint64, error)

Retrieve keys in bulk for increased efficiency. If any keys are not found, they will not be returned, and so the size of the map may be less than the size of the keys slice, and no error will be returned in that case since it's an expected situation.

If there is an "overall error" calling the underlying GoCB bulk operation, then that error will be returned.

If there are errors on individual keys -- aside from "not found" errors -- such as QueueOverflow errors that can be retried successfully, they will be retried with a backoff loop.

func (*CouchbaseBucketGoCB) GetBulkRaw

func (bucket *CouchbaseBucketGoCB) GetBulkRaw(keys []string) (map[string][]byte, error)

Retrieve keys in bulk for increased efficiency. If any keys are not found, they will not be returned, and so the size of the map may be less than the size of the keys slice, and no error will be returned in that case since it's an expected situation.

If there is an "overall error" calling the underlying GoCB bulk operation, then that error will be returned.

If there are errors on individual keys -- aside from "not found" errors -- such as QueueOverflow errors that can be retried successfully, they will be retried with a backoff loop.

func (*CouchbaseBucketGoCB) GetDDoc

func (bucket *CouchbaseBucketGoCB) GetDDoc(docname string, into interface{}) error

func (*CouchbaseBucketGoCB) GetDDocs

func (bucket *CouchbaseBucketGoCB) GetDDocs(into interface{}) error

func (*CouchbaseBucketGoCB) GetExpiry

func (bucket *CouchbaseBucketGoCB) GetExpiry(k string) (expiry uint32, getMetaError error)

func (*CouchbaseBucketGoCB) GetIndexMeta

func (bucket *CouchbaseBucketGoCB) GetIndexMeta(indexName string) (exists bool, meta *gocb.IndexInfo, err error)

func (*CouchbaseBucketGoCB) GetMaxTTL

func (bucket *CouchbaseBucketGoCB) GetMaxTTL() (int, error)

Gets the bucket max TTL, or 0 if no TTL was set. Sync gateway should fail to bring the DB online if this is non-zero, since it's not meant to operate against buckets that auto-delete data.

func (*CouchbaseBucketGoCB) GetMaxVbno

func (bucket *CouchbaseBucketGoCB) GetMaxVbno() (uint16, error)

func (*CouchbaseBucketGoCB) GetMetadataPurgeInterval

func (bucket *CouchbaseBucketGoCB) GetMetadataPurgeInterval() (int, error)

Gets the metadata purge interval for the bucket. First checks for a bucket-specific value. If not found, retrieves the cluster-wide value.

func (*CouchbaseBucketGoCB) GetName

func (bucket *CouchbaseBucketGoCB) GetName() string

func (*CouchbaseBucketGoCB) GetRaw

func (bucket *CouchbaseBucketGoCB) GetRaw(k string) (rv []byte, cas uint64, err error)

func (*CouchbaseBucketGoCB) GetServerUUID

func (bucket *CouchbaseBucketGoCB) GetServerUUID() (uuid string, err error)

Get the Server UUID of the bucket, this is also known as the Cluster UUID

func (*CouchbaseBucketGoCB) GetStatsVbSeqno

func (bucket *CouchbaseBucketGoCB) GetStatsVbSeqno(maxVbno uint16, useAbsHighSeqNo bool) (uuids map[uint16]uint64, highSeqnos map[uint16]uint64, seqErr error)

func (*CouchbaseBucketGoCB) GetWithXattr

func (bucket *CouchbaseBucketGoCB) GetWithXattr(k string, xattrKey string, rv interface{}, xv interface{}) (cas uint64, err error)

Retrieve a document and it's associated named xattr

func (*CouchbaseBucketGoCB) GetXattr

func (bucket *CouchbaseBucketGoCB) GetXattr(k string, xattrKey string, xv interface{}) (casOut uint64, err error)

func (*CouchbaseBucketGoCB) Incr

func (bucket *CouchbaseBucketGoCB) Incr(k string, amt, def uint64, exp uint32) (uint64, error)

Increment the atomic counter k by amt, where amt must be a non-zero delta for the counter.

func (*CouchbaseBucketGoCB) IsKeyNotFoundError

func (bucket *CouchbaseBucketGoCB) IsKeyNotFoundError(err error) bool

func (*CouchbaseBucketGoCB) IsSubDocPathNotFound

func (bucket *CouchbaseBucketGoCB) IsSubDocPathNotFound(err error) bool

Check if this is a SubDocPathNotFound error Pending question to see if there is an easier way: https://forums.couchbase.com/t/checking-for-errsubdocpathnotfound-errors/13492

func (*CouchbaseBucketGoCB) IsSupported

func (bucket *CouchbaseBucketGoCB) IsSupported(feature sgbucket.BucketFeature) bool

func (*CouchbaseBucketGoCB) PutDDoc

func (bucket *CouchbaseBucketGoCB) PutDDoc(docname string, value interface{}) error

func (*CouchbaseBucketGoCB) Query

func (bucket *CouchbaseBucketGoCB) Query(statement string, params interface{}, consistency gocb.ConsistencyMode, adhoc bool) (results gocb.QueryResults, err error)

Query accepts a parameterized statement, optional list of params, and an optional flag to force adhoc query execution. Params specified using the $param notation in the statement are intended to be used w/ N1QL prepared statements, and will be passed through as params to n1ql. e.g.:

SELECT _sync.sequence FROM $_bucket WHERE _sync.sequence > $minSeq

https://developer.couchbase.com/documentation/server/current/sdk/go/n1ql-queries-with-sdk.html for additional details. Will additionally replace all instances of BucketQueryToken($_bucket) in the statement with the bucket name. 'bucket' should not be included in params.

If adhoc=true, prepared statement handling will be disabled. Should only be set to true for queries that can't be prepared, e.g.:

SELECT _sync.channels.ABC.seq from $bucket

Query retries on Indexer Errors, as these are normally transient

func (*CouchbaseBucketGoCB) QueryBucketItemCount

func (bucket *CouchbaseBucketGoCB) QueryBucketItemCount() (itemCount int, err error)

QueryBucketItemCount uses a request plus query to get the number of items in a bucket, as the REST API can be slow to update its value.

func (*CouchbaseBucketGoCB) Refresh

func (bucket *CouchbaseBucketGoCB) Refresh() error

This is a "better-than-nothing" version of Refresh(). See https://forums.couchbase.com/t/equivalent-of-go-couchbase-bucket-refresh/12498/2

func (*CouchbaseBucketGoCB) Remove

func (bucket *CouchbaseBucketGoCB) Remove(k string, cas uint64) (casOut uint64, err error)

func (*CouchbaseBucketGoCB) Set

func (bucket *CouchbaseBucketGoCB) Set(k string, exp uint32, v interface{}) error

func (*CouchbaseBucketGoCB) SetBulk

func (bucket *CouchbaseBucketGoCB) SetBulk(entries []*sgbucket.BulkSetEntry) (err error)

Retry up to the retry limit, then return. Does not retry items if they had CAS failures, and it's up to the caller to handle those.

func (*CouchbaseBucketGoCB) SetRaw

func (bucket *CouchbaseBucketGoCB) SetRaw(k string, exp uint32, v []byte) error

func (*CouchbaseBucketGoCB) StartDCPFeed

func (bucket *CouchbaseBucketGoCB) StartDCPFeed(args sgbucket.FeedArguments, callback sgbucket.FeedEventCallbackFunc, dbStats *expvar.Map) error

func (*CouchbaseBucketGoCB) StartTapFeed

func (bucket *CouchbaseBucketGoCB) StartTapFeed(args sgbucket.FeedArguments, dbStats *expvar.Map) (sgbucket.MutationFeed, error)

GoCB (and Server 5.0.0) don't support the TapFeed. For legacy support, start a DCP feed and stream over a single channel

func (*CouchbaseBucketGoCB) Touch

func (bucket *CouchbaseBucketGoCB) Touch(k string, exp uint32) (cas uint64, err error)

func (*CouchbaseBucketGoCB) UUID

func (bucket *CouchbaseBucketGoCB) UUID() (string, error)

func (*CouchbaseBucketGoCB) Update

func (bucket *CouchbaseBucketGoCB) Update(k string, exp uint32, callback sgbucket.UpdateFunc) (casOut uint64, err error)

func (*CouchbaseBucketGoCB) UpdateXattr

func (bucket *CouchbaseBucketGoCB) UpdateXattr(k string, xattrKey string, exp uint32, cas uint64, xv interface{}, deleteBody, isDelete bool) (casOut uint64, err error)

CAS-safe update of a document's xattr (only). Deletes the document body if deleteBody is true.

func (*CouchbaseBucketGoCB) VBHash

func (bucket *CouchbaseBucketGoCB) VBHash(docID string) uint32

func (*CouchbaseBucketGoCB) View

func (bucket *CouchbaseBucketGoCB) View(ddoc, name string, params map[string]interface{}) (sgbucket.ViewResult, error)

func (*CouchbaseBucketGoCB) ViewCustom

func (bucket *CouchbaseBucketGoCB) ViewCustom(ddoc, name string, params map[string]interface{}, vres interface{}) error

func (CouchbaseBucketGoCB) ViewQuery

func (bucket CouchbaseBucketGoCB) ViewQuery(ddoc, name string, params map[string]interface{}) (sgbucket.QueryResultIterator, error)

func (*CouchbaseBucketGoCB) WaitForIndexOnline

func (bucket *CouchbaseBucketGoCB) WaitForIndexOnline(indexName string) error

Waits for index state to be online. Waits no longer than provided timeout

func (*CouchbaseBucketGoCB) Write

func (bucket *CouchbaseBucketGoCB) Write(k string, flags int, exp uint32, v interface{}, opt sgbucket.WriteOptions) error

func (*CouchbaseBucketGoCB) WriteCas

func (bucket *CouchbaseBucketGoCB) WriteCas(k string, flags int, exp uint32, cas uint64, v interface{}, opt sgbucket.WriteOptions) (casOut uint64, err error)

func (*CouchbaseBucketGoCB) WriteCasWithXattr

func (bucket *CouchbaseBucketGoCB) WriteCasWithXattr(k string, xattrKey string, exp uint32, cas uint64, v interface{}, xv interface{}) (casOut uint64, err error)

CAS-safe write of a document and it's associated named xattr

func (*CouchbaseBucketGoCB) WriteUpdate

func (bucket *CouchbaseBucketGoCB) WriteUpdate(k string, exp uint32, callback sgbucket.WriteUpdateFunc) (casOut uint64, err error)

func (*CouchbaseBucketGoCB) WriteUpdateWithXattr

func (bucket *CouchbaseBucketGoCB) WriteUpdateWithXattr(k string, xattrKey string, exp uint32, previous *sgbucket.BucketDocument, callback sgbucket.WriteUpdateWithXattrFunc) (casOut uint64, err error)

WriteUpdateWithXattr retrieves the existing doc from the bucket, invokes the callback to update the document, then writes the new document to the bucket. Will repeat this process on cas failure. If previousValue/xattr/cas are provided, will use those on the first iteration instead of retrieving from the bucket.

func (*CouchbaseBucketGoCB) WriteWithXattr

func (bucket *CouchbaseBucketGoCB) WriteWithXattr(k string, xattrKey string, exp uint32, cas uint64, value []byte, xattrValue []byte, isDelete bool, deleteBody bool) (casOut uint64, err error)

Single attempt to update a document and xattr. Setting isDelete=true and value=nil will delete the document body. Both update types (UpdateXattr, WriteCasWithXattr) include recoverable error retry.

type CouchbaseBucketType

type CouchbaseBucketType int
const (
	DataBucket CouchbaseBucketType = iota
	IndexBucket
)

type CouchbaseDriver

type CouchbaseDriver int
const (
	GoCB                   CouchbaseDriver = iota // Use GoCB driver with default Transcoder
	GoCBCustomSGTranscoder                        // Use GoCB driver with a custom Transcoder
)

func ChooseCouchbaseDriver

func ChooseCouchbaseDriver(bucketType CouchbaseBucketType) CouchbaseDriver

func (CouchbaseDriver) String

func (couchbaseDriver CouchbaseDriver) String() string

type DCPCommon

type DCPCommon struct {
	// contains filtered or unexported fields
}

func NewDCPCommon

func NewDCPCommon(callback sgbucket.FeedEventCallbackFunc, bucket Bucket, maxVbNo uint16, persistCheckpoints bool, dbStats *expvar.Map, feedID string) *DCPCommon

func (*DCPCommon) InitVbMeta

func (c *DCPCommon) InitVbMeta(vbNo uint16)

type DCPDest

type DCPDest struct {
	*DCPCommon
	// contains filtered or unexported fields
}

DCPDest implements SGDest (superset of cbgt.Dest) interface to manage updates coming from a cbgt-based DCP feed. Embeds DCPCommon for underlying feed event processing. Metadata initialization is done on-demand per vbucket, as a given Dest isn't expected to manage the full set of vbuckets for a bucket.

func (*DCPDest) Close

func (d *DCPDest) Close() error

func (*DCPDest) ConsistencyWait

func (d *DCPDest) ConsistencyWait(partition, partitionUUID string,
	consistencyLevel string, consistencySeq uint64, cancelCh <-chan bool) error

TODO: Not implemented, review potential usage

func (*DCPDest) Count

func (d *DCPDest) Count(pindex *cbgt.PIndex, cancelCh <-chan bool) (uint64, error)

func (*DCPDest) DataDelete

func (d *DCPDest) DataDelete(partition string, key []byte, seq uint64,
	cas uint64,
	extrasType cbgt.DestExtrasType, extras []byte) error

func (*DCPDest) DataDeleteEx

func (d *DCPDest) DataDeleteEx(partition string, key []byte, seq uint64,
	cas uint64, extrasType cbgt.DestExtrasType, req interface{}) error

func (*DCPDest) DataUpdate

func (d *DCPDest) DataUpdate(partition string, key []byte, seq uint64,
	val []byte, cas uint64, extrasType cbgt.DestExtrasType, extras []byte) error

func (*DCPDest) DataUpdateEx

func (d *DCPDest) DataUpdateEx(partition string, key []byte, seq uint64, val []byte,
	cas uint64, extrasType cbgt.DestExtrasType, req interface{}) error

func (*DCPDest) OpaqueGet

func (d *DCPDest) OpaqueGet(partition string) (value []byte, lastSeq uint64, err error)

func (*DCPDest) OpaqueSet

func (d *DCPDest) OpaqueSet(partition string, value []byte) error

func (*DCPDest) Query

func (d *DCPDest) Query(pindex *cbgt.PIndex, req []byte, w io.Writer,
	cancelCh <-chan bool) error

func (*DCPDest) Rollback

func (d *DCPDest) Rollback(partition string, rollbackSeq uint64) error

func (*DCPDest) RollbackEx

func (d *DCPDest) RollbackEx(partition string, vbucketUUID uint64, rollbackSeq uint64) error

func (*DCPDest) SnapshotStart

func (d *DCPDest) SnapshotStart(partition string,
	snapStart, snapEnd uint64) error

func (*DCPDest) Stats

func (d *DCPDest) Stats(io.Writer) error

Stats would allow SG to return SG-specific stats to cbgt's stats reporting - not currently used.

type DCPLoggingDest

type DCPLoggingDest struct {
	// contains filtered or unexported fields
}

DCPLoggingDest wraps DCPDest to provide per-callback logging

func (*DCPLoggingDest) Close

func (d *DCPLoggingDest) Close() error

func (*DCPLoggingDest) ConsistencyWait

func (d *DCPLoggingDest) ConsistencyWait(partition, partitionUUID string,
	consistencyLevel string, consistencySeq uint64, cancelCh <-chan bool) error

func (*DCPLoggingDest) Count

func (d *DCPLoggingDest) Count(pindex *cbgt.PIndex, cancelCh <-chan bool) (uint64, error)

func (*DCPLoggingDest) DataDelete

func (d *DCPLoggingDest) DataDelete(partition string, key []byte, seq uint64,
	cas uint64, extrasType cbgt.DestExtrasType, extras []byte) error

func (*DCPLoggingDest) DataDeleteEx

func (d *DCPLoggingDest) DataDeleteEx(partition string, key []byte, seq uint64,
	cas uint64, extrasType cbgt.DestExtrasType, req interface{}) error

func (*DCPLoggingDest) DataUpdate

func (d *DCPLoggingDest) DataUpdate(partition string, key []byte, seq uint64,
	val []byte, cas uint64, extrasType cbgt.DestExtrasType, extras []byte) error

func (*DCPLoggingDest) DataUpdateEx

func (d *DCPLoggingDest) DataUpdateEx(partition string, key []byte, seq uint64, val []byte,
	cas uint64, extrasType cbgt.DestExtrasType, req interface{}) error

func (*DCPLoggingDest) OpaqueGet

func (d *DCPLoggingDest) OpaqueGet(partition string) (value []byte, lastSeq uint64, err error)

func (*DCPLoggingDest) OpaqueSet

func (d *DCPLoggingDest) OpaqueSet(partition string, value []byte) error

func (*DCPLoggingDest) Query

func (d *DCPLoggingDest) Query(pindex *cbgt.PIndex, req []byte, w io.Writer,
	cancelCh <-chan bool) error

func (*DCPLoggingDest) Rollback

func (d *DCPLoggingDest) Rollback(partition string, rollbackSeq uint64) error

func (*DCPLoggingDest) RollbackEx

func (d *DCPLoggingDest) RollbackEx(partition string, vbucketUUID uint64, rollbackSeq uint64) error

func (*DCPLoggingDest) SnapshotStart

func (d *DCPLoggingDest) SnapshotStart(partition string,
	snapStart, snapEnd uint64) error

func (*DCPLoggingDest) Stats

func (d *DCPLoggingDest) Stats(w io.Writer) error

type DCPLoggingReceiver

type DCPLoggingReceiver struct {
	// contains filtered or unexported fields
}

DCPLoggingReceiver wraps DCPReceiver to provide per-callback logging

func (*DCPLoggingReceiver) DataDelete

func (r *DCPLoggingReceiver) DataDelete(vbucketId uint16, key []byte, seq uint64,
	req *gomemcached.MCRequest) error

func (*DCPLoggingReceiver) DataUpdate

func (r *DCPLoggingReceiver) DataUpdate(vbucketId uint16, key []byte, seq uint64,
	req *gomemcached.MCRequest) error

func (*DCPLoggingReceiver) GetMetaData

func (r *DCPLoggingReceiver) GetMetaData(vbucketId uint16) (
	value []byte, lastSeq uint64, err error)

func (*DCPLoggingReceiver) OnError

func (r *DCPLoggingReceiver) OnError(err error)

func (*DCPLoggingReceiver) Rollback

func (r *DCPLoggingReceiver) Rollback(vbucketId uint16, rollbackSeq uint64) error

func (*DCPLoggingReceiver) SetMetaData

func (r *DCPLoggingReceiver) SetMetaData(vbucketId uint16, value []byte) error

func (*DCPLoggingReceiver) SnapshotStart

func (r *DCPLoggingReceiver) SnapshotStart(vbucketId uint16,
	snapStart, snapEnd uint64, snapType uint32) error

type DCPReceiver

type DCPReceiver struct {
	*DCPCommon
}

DCPReceiver implements cbdatasource.Receiver to manage updates coming from a cbdatasource BucketDataSource. See go-couchbase/cbdatasource for additional details

func (*DCPReceiver) DataDelete

func (r *DCPReceiver) DataDelete(vbucketId uint16, key []byte, seq uint64,
	req *gomemcached.MCRequest) error

func (*DCPReceiver) DataUpdate

func (r *DCPReceiver) DataUpdate(vbucketId uint16, key []byte, seq uint64,
	req *gomemcached.MCRequest) error

func (*DCPReceiver) GetMetaData

func (r *DCPReceiver) GetMetaData(vbNo uint16) (
	value []byte, lastSeq uint64, err error)

func (*DCPReceiver) OnError

func (r *DCPReceiver) OnError(err error)

func (*DCPReceiver) Rollback

func (r *DCPReceiver) Rollback(vbucketId uint16, rollbackSeq uint64) error

RollbackEx should be called by cbdatasource - Rollback required to maintain the interface. In the event it's called, logs warning and does a hard reset on metadata for the vbucket

func (*DCPReceiver) RollbackEx

func (r *DCPReceiver) RollbackEx(vbucketId uint16, vbucketUUID uint64, rollbackSeq uint64) error

RollbackEx includes the vbucketUUID needed to reset the metadata correctly

func (*DCPReceiver) SetMetaData

func (r *DCPReceiver) SetMetaData(vbucketId uint16, value []byte) error

func (*DCPReceiver) SnapshotStart

func (r *DCPReceiver) SnapshotStart(vbNo uint16,
	snapStart, snapEnd uint64, snapType uint32) error

type DebugIntMeanVar

type DebugIntMeanVar struct {
	// contains filtered or unexported fields
}

func (*DebugIntMeanVar) AddSince

func (d *DebugIntMeanVar) AddSince(start time.Time)

func (*DebugIntMeanVar) AddValue

func (d *DebugIntMeanVar) AddValue(value int64)

func (*DebugIntMeanVar) String

func (d *DebugIntMeanVar) String() string

type DeferredLogFn

type DeferredLogFn func()

DeferredLogFn is an anonymous function that can be executed at a later date to log something.

type EventUpdateFunc

type EventUpdateFunc func(event *sgbucket.FeedEvent) bool

type FeedArguments

type FeedArguments sgbucket.FeedArguments

type FileLogger

type FileLogger struct {
	Enabled bool
	// contains filtered or unexported fields
}

func NewFileLogger

func NewFileLogger(config FileLoggerConfig, level LogLevel, name string, logFilePath string, minAge int) (*FileLogger, error)

NewFileLogger returns a new FileLogger from a config.

func (*FileLogger) Rotate

func (l *FileLogger) Rotate() error

Rotate will rotate the active log file.

func (*FileLogger) String

func (l *FileLogger) String() string

type FileLoggerConfig

type FileLoggerConfig struct {
	Enabled  *bool             `json:"enabled,omitempty"`  // Toggle for this log output
	Rotation logRotationConfig `json:"rotation,omitempty"` // Log rotation settings

	CollationBufferSize *int      `json:"collation_buffer_size,omitempty"` // The size of the log collation buffer.
	Output              io.Writer `json:"-"`                               // Logger output. Defaults to os.Stderr. Can be overridden for testing purposes.
}

type GoCBCoreLogger

type GoCBCoreLogger struct{}

****************************************************** Implementation of github.com/couchbase/gocbcore.Logger ******************************************************

func (GoCBCoreLogger) Log

func (GoCBCoreLogger) Log(level gocbcore.LogLevel, _ int, format string, v ...interface{}) error

Log wraps the levelled SG logs for gocbcore to use. Log levels are mapped as follows:

Error  -> SG Error
Warn   -> SG Warn
Info   -> SG Debug
Debug  -> SG Trace
Trace  -> SG Trace
Others -> no-op

type GoCBLogger

type GoCBLogger struct{}

************************************************** Implementation of github.com/couchbase/gocb.Logger **************************************************

func (GoCBLogger) Log

func (GoCBLogger) Log(level gocb.LogLevel, _ int, format string, v ...interface{}) error

Log wraps the levelled SG logs for gocb to use. Log levels are mapped as follows:

Error  -> SG Error
Warn   -> SG Warn
Info   -> SG Debug
Debug  -> SG Trace
Trace  -> SG Trace
Others -> no-op

type HTTPError

type HTTPError struct {
	Status  int
	Message string
}

Simple error implementation wrapping an HTTP response status.

func HTTPErrorf

func HTTPErrorf(status int, format string, args ...interface{}) *HTTPError

func (*HTTPError) Error

func (err *HTTPError) Error() string

type HeartbeatListener

type HeartbeatListener interface {
	Name() string
	GetNodes() (nodeUUIDs []string, err error)
	StaleHeartbeatDetected(nodeUUID string)
	Stop()
}

A HeartbeatListener defines the set of nodes it wants to monitor, and a callback when one of those nodes stops sending heartbeats.

type Heartbeater

type Heartbeater interface {
	RegisterListener(listener HeartbeatListener) error
	UnregisterListener(name string)
	Start() error
	Stop()
}

Heartbeater defines the interface for heartbeat management

type IntMax

type IntMax struct {
	// contains filtered or unexported fields
}

IntMax is an expvar.Value that tracks the maximum value it's given.

func (*IntMax) SetIfMax

func (v *IntMax) SetIfMax(value int64)

func (*IntMax) String

func (v *IntMax) String() string

type IntMeanVar

type IntMeanVar struct {
	// contains filtered or unexported fields
}

IntMean is an expvar.Value that returns the mean of all values that are sent via AddValue or AddSince.

func (*IntMeanVar) AddSince

func (v *IntMeanVar) AddSince(start time.Time)

func (*IntMeanVar) AddValue

func (v *IntMeanVar) AddValue(value int64)

Adds value. Calculates new mean as iterative mean (avoids int overflow)

func (*IntMeanVar) String

func (v *IntMeanVar) String() string

type IntRollingMeanVar

type IntRollingMeanVar struct {
	// contains filtered or unexported fields
}

IntRollingMean is an expvar.Value that returns the mean of the [size] latest values sent via AddValue. Uses a slice to track values, so setting a large size has memory implications

func NewIntRollingMeanVar

func NewIntRollingMeanVar(capacity int) IntRollingMeanVar

func (*IntRollingMeanVar) AddSince

func (v *IntRollingMeanVar) AddSince(start time.Time)

func (*IntRollingMeanVar) AddSincePerItem

func (v *IntRollingMeanVar) AddSincePerItem(start time.Time, numItems int)

func (*IntRollingMeanVar) AddValue

func (v *IntRollingMeanVar) AddValue(value int64)

Adds value

func (*IntRollingMeanVar) String

func (v *IntRollingMeanVar) String() string

type JSONDecoderI

type JSONDecoderI interface {
	UseNumber()
	DisallowUnknownFields()
	Decode(v interface{}) error
	Buffered() io.Reader
	//Token() (json.Token, error) // Not implemented by jsoniter
	More() bool
}

JSONDecoderI is the common interface between json.Decoder and jsoniter.Decoder

func JSONDecoder

func JSONDecoder(r io.Reader) JSONDecoderI

JSONDecoder returns a new JSON decoder implementing the JSONDecoderI interface

type JSONEncoderI

type JSONEncoderI interface {
	Encode(v interface{}) error
	SetIndent(prefix, indent string)
	SetEscapeHTML(on bool)
}

JSONEncoderI is the common interface between json.Encoder and jsoniter.Encoder

func JSONEncoder

func JSONEncoder(w io.Writer) JSONEncoderI

JSONEncoder returns a new JSON encoder implementing the JSONEncoderI interface

func JSONEncoderCanonical

func JSONEncoderCanonical(w io.Writer) JSONEncoderI

JSONEncoderCanonical returns a new canonical JSON encoder implementing the JSONEncoderI interface

type JSONIterError

type JSONIterError struct {
	E error
}

JSONIterError is returned by the JSON wrapper functions, whenever jsoniter returns a non-nil error.

func (*JSONIterError) Error

func (iterErr *JSONIterError) Error() string

type KVPair

type KVPair struct {
	Key string
	Val interface{}
}

KVPair represents a single KV pair to be used in InjectJSONProperties

type KVPairBytes

type KVPairBytes struct {
	Key string
	Val []byte
}

KVPairBytes represents a single KV pair to be used in InjectJSONPropertiesFromBytes

type LRUCache

type LRUCache struct {
	// contains filtered or unexported fields
}

An LRU cache of document revision bodies, together with their channel access.

func NewLRUCache

func NewLRUCache(capacity int) (*LRUCache, error)

Creates an LRU cache with the given capacity and an optional loader function.

func (*LRUCache) Count

func (lc *LRUCache) Count() int

func (*LRUCache) Get

func (lc *LRUCache) Get(key string) (result interface{}, found bool)

Looks up an entry from the cache.

func (*LRUCache) Put

func (lc *LRUCache) Put(key string, value interface{})

Adds an entry to the cache if it's the first time seen, otherwise just updates the position in the LRU cache but ignores the new value, since the entries in the cache are treated as immutable.

type LRUCacheLoaderFunc

type LRUCacheLoaderFunc func(key string) (value interface{}, err error)

type LeakyBucket

type LeakyBucket struct {
	// contains filtered or unexported fields
}

A wrapper around a Bucket to support forced errors. For testing use only.

func NewLeakyBucket

func NewLeakyBucket(bucket Bucket, config LeakyBucketConfig) *LeakyBucket

func (*LeakyBucket) Add

func (b *LeakyBucket) Add(k string, exp uint32, v interface{}) (added bool, err error)

func (*LeakyBucket) AddRaw

func (b *LeakyBucket) AddRaw(k string, exp uint32, v []byte) (added bool, err error)

func (*LeakyBucket) Append

func (b *LeakyBucket) Append(k string, data []byte) error

func (*LeakyBucket) BuildDeferredIndexes

func (b *LeakyBucket) BuildDeferredIndexes(indexSet []string) error

func (*LeakyBucket) Close

func (b *LeakyBucket) Close()

func (*LeakyBucket) CloseAndDelete

func (b *LeakyBucket) CloseAndDelete() error

func (*LeakyBucket) CouchbaseServerVersion

func (b *LeakyBucket) CouchbaseServerVersion() (major uint64, minor uint64, micro string)

func (*LeakyBucket) CreateIndex

func (b *LeakyBucket) CreateIndex(indexName string, expression string, filterExpression string, options *N1qlIndexOptions) error

func (*LeakyBucket) CreatePrimaryIndex

func (b *LeakyBucket) CreatePrimaryIndex(indexName string, options *N1qlIndexOptions) error

func (*LeakyBucket) Delete

func (b *LeakyBucket) Delete(k string) error

func (*LeakyBucket) DeleteDDoc

func (b *LeakyBucket) DeleteDDoc(docname string) error

func (*LeakyBucket) DeleteWithXattr

func (b *LeakyBucket) DeleteWithXattr(k string, xattr string) error

func (*LeakyBucket) DropIndex

func (b *LeakyBucket) DropIndex(indexName string) error

func (*LeakyBucket) Dump

func (b *LeakyBucket) Dump()

func (*LeakyBucket) ExplainQuery

func (b *LeakyBucket) ExplainQuery(statement string, params interface{}) (plain map[string]interface{}, err error)

func (*LeakyBucket) Get

func (b *LeakyBucket) Get(k string, rv interface{}) (cas uint64, err error)

func (*LeakyBucket) GetAndTouchRaw

func (b *LeakyBucket) GetAndTouchRaw(k string, exp uint32) (v []byte, cas uint64, err error)

func (*LeakyBucket) GetBulkRaw

func (b *LeakyBucket) GetBulkRaw(keys []string) (map[string][]byte, error)

func (*LeakyBucket) GetDDoc

func (b *LeakyBucket) GetDDoc(docname string, value interface{}) error

func (*LeakyBucket) GetDDocs

func (b *LeakyBucket) GetDDocs(value interface{}) error

func (*LeakyBucket) GetIndexMeta

func (b *LeakyBucket) GetIndexMeta(indexName string) (exists bool, meta *gocb.IndexInfo, err error)

func (*LeakyBucket) GetMaxVbno

func (b *LeakyBucket) GetMaxVbno() (uint16, error)

func (*LeakyBucket) GetName

func (b *LeakyBucket) GetName() string

func (*LeakyBucket) GetRaw

func (b *LeakyBucket) GetRaw(k string) (v []byte, cas uint64, err error)

func (*LeakyBucket) GetStatsVbSeqno

func (b *LeakyBucket) GetStatsVbSeqno(maxVbno uint16, useAbsHighSeqNo bool) (uuids map[uint16]uint64, highSeqnos map[uint16]uint64, seqErr error)

func (*LeakyBucket) GetUnderlyingBucket

func (b *LeakyBucket) GetUnderlyingBucket() Bucket

func (*LeakyBucket) GetWithXattr

func (b *LeakyBucket) GetWithXattr(k string, xattr string, rv interface{}, xv interface{}) (cas uint64, err error)

func (*LeakyBucket) GetXattr

func (b *LeakyBucket) GetXattr(k string, xattr string, xv interface{}) (cas uint64, err error)

func (*LeakyBucket) Incr

func (b *LeakyBucket) Incr(k string, amt, def uint64, exp uint32) (uint64, error)

func (*LeakyBucket) IsSupported

func (b *LeakyBucket) IsSupported(feature sgbucket.BucketFeature) bool

func (*LeakyBucket) PutDDoc

func (b *LeakyBucket) PutDDoc(docname string, value interface{}) error

func (*LeakyBucket) Query

func (b *LeakyBucket) Query(statement string, params interface{}, consistency gocb.ConsistencyMode, adhoc bool) (results gocb.QueryResults, err error)

func (*LeakyBucket) Refresh

func (b *LeakyBucket) Refresh() error

func (*LeakyBucket) Remove

func (b *LeakyBucket) Remove(k string, cas uint64) (casOut uint64, err error)

func (*LeakyBucket) Set

func (b *LeakyBucket) Set(k string, exp uint32, v interface{}) error

func (*LeakyBucket) SetBulk

func (b *LeakyBucket) SetBulk(entries []*sgbucket.BulkSetEntry) (err error)

func (*LeakyBucket) SetFirstTimeViewCustomPartialError

func (b *LeakyBucket) SetFirstTimeViewCustomPartialError(val bool)

Accessors to set leaky bucket config for a running bucket. Used to tune properties on a walrus bucket created as part of rest tester - it will be a leaky bucket (due to DCP support), but there's no mechanism to pass in a leaky bucket config to a RestTester bucket at bucket creation time.

func (*LeakyBucket) SetIgnoreClose

func (b *LeakyBucket) SetIgnoreClose(value bool)

For walrus handling, ignore close needs to be set after the bucket is initialized

func (*LeakyBucket) SetPostQueryCallback

func (b *LeakyBucket) SetPostQueryCallback(callback func(ddoc, viewName string, params map[string]interface{}))

func (*LeakyBucket) SetRaw

func (b *LeakyBucket) SetRaw(k string, exp uint32, v []byte) error

func (*LeakyBucket) StartDCPFeed

func (b *LeakyBucket) StartDCPFeed(args sgbucket.FeedArguments, callback sgbucket.FeedEventCallbackFunc, dbStats *expvar.Map) error

func (*LeakyBucket) StartTapFeed

func (b *LeakyBucket) StartTapFeed(args sgbucket.FeedArguments, dbStats *expvar.Map) (sgbucket.MutationFeed, error)

func (*LeakyBucket) Touch

func (b *LeakyBucket) Touch(k string, exp uint32) (cas uint64, err error)

func (*LeakyBucket) UUID

func (b *LeakyBucket) UUID() (string, error)

func (*LeakyBucket) Update

func (b *LeakyBucket) Update(k string, exp uint32, callback sgbucket.UpdateFunc) (casOut uint64, err error)

func (*LeakyBucket) VBHash

func (b *LeakyBucket) VBHash(docID string) uint32

func (*LeakyBucket) View

func (b *LeakyBucket) View(ddoc, name string, params map[string]interface{}) (sgbucket.ViewResult, error)

func (*LeakyBucket) ViewCustom

func (b *LeakyBucket) ViewCustom(ddoc, name string, params map[string]interface{}, vres interface{}) error

func (*LeakyBucket) ViewQuery

func (b *LeakyBucket) ViewQuery(ddoc, name string, params map[string]interface{}) (sgbucket.QueryResultIterator, error)

func (*LeakyBucket) WaitForIndexOnline

func (b *LeakyBucket) WaitForIndexOnline(indexName string) error

func (*LeakyBucket) Write

func (b *LeakyBucket) Write(k string, flags int, exp uint32, v interface{}, opt sgbucket.WriteOptions) error

func (*LeakyBucket) WriteCas

func (b *LeakyBucket) WriteCas(k string, flags int, exp uint32, cas uint64, v interface{}, opt sgbucket.WriteOptions) (uint64, error)

func (*LeakyBucket) WriteCasWithXattr

func (b *LeakyBucket) WriteCasWithXattr(k string, xattr string, exp uint32, cas uint64, v interface{}, xv interface{}) (casOut uint64, err error)

func (*LeakyBucket) WriteUpdate

func (b *LeakyBucket) WriteUpdate(k string, exp uint32, callback sgbucket.WriteUpdateFunc) (casOut uint64, err error)

func (*LeakyBucket) WriteUpdateWithXattr

func (b *LeakyBucket) WriteUpdateWithXattr(k string, xattr string, exp uint32, previous *sgbucket.BucketDocument, callback sgbucket.WriteUpdateWithXattrFunc) (casOut uint64, err error)

func (*LeakyBucket) WriteWithXattr

func (b *LeakyBucket) WriteWithXattr(k string, xattrKey string, exp uint32, cas uint64, value []byte, xattrValue []byte, isDelete bool, deleteBody bool) (casOut uint64, err error)

type LeakyBucketConfig

type LeakyBucketConfig struct {
	// Incr() fails N times before finally succeeding
	IncrTemporaryFailCount uint16

	// Allows us to force a number of failed executions of GetDDoc, DeleteDDoc and DropIndex. It will fail the
	// number of times specific in these values and then succeed.
	DDocDeleteErrorCount int
	DDocGetErrorCount    int

	// Allows us to force a specific index to be deleted. We will always error when attempting to delete an index if its
	// name is specified in this slice
	DropIndexErrorNames []string

	// Emulate TAP/DCP feed de-dupliation behavior, such that within a
	// window of # of mutations or a timeout, mutations for a given document
	// will be filtered such that only the _latest_ mutation will make it through.
	TapFeedDeDuplication bool
	TapFeedVbuckets      bool     // Emulate vbucket numbers on feed
	TapFeedMissingDocs   []string // Emulate entry not appearing on tap feed

	ForceErrorSetRawKeys []string // Issuing a SetRaw call with a specified key will return an error

	// Returns a partial error the first time ViewCustom is called
	FirstTimeViewCustomPartialError bool
	PostQueryCallback               func(ddoc, viewName string, params map[string]interface{}) // Issues callback after issuing query when bucket.ViewQuery is called

	// WriteUpdateCallback issues additional callback in WriteUpdate after standard callback completes, but prior to document write.  Allows
	// tests to trigger CAS retry handling by modifying the underlying document in a WriteUpdateCallback implementation.
	WriteUpdateCallback func(key string)

	// WriteWithXattrCallback is ran before WriteWithXattr is called. This can be used to trigger a CAS retry
	WriteWithXattrCallback func(key string)

	// IncrCallback issues a callback during incr.  Used for sequence allocation race tests
	IncrCallback func()

	// When IgnoreClose is set to true, bucket.Close() is a no-op.  Used when multiple references to a bucket are active.
	IgnoreClose bool
}

The config object that controls the LeakyBucket behavior

type Level

type Level int32
const (
	// DebugLevel logs are typically voluminous, and are usually disabled in
	// production.
	DebugLevel Level = iota - 1
	// InfoLevel is the default logging priority.
	InfoLevel
	// WarnLevel logs are more important than Info, but don't need individual
	// human review.
	WarnLevel
	// ErrorLevel logs are high-priority. If an application is running smoothly,
	// it shouldn't generate any error-level logs.
	ErrorLevel
	// PanicLevel logs a message, then panics.
	PanicLevel
	// FatalLevel logs a message, then calls os.Exit(1).
	FatalLevel
)

By setting DebugLevel to -1, if LogLevel is not set in the logging config it will default to the zero value for int32 (0) which will disable debug logging, InfoLevel logging will be the default output.

func ToDeprecatedLogLevel

func ToDeprecatedLogLevel(logLevel LogLevel) *Level

For transforming a new log level to the old type.

func (*Level) MarshalText

func (l *Level) MarshalText() ([]byte, error)

Implementaion of the go encoding.TextMarshaller interface for the Level type This method will also be called by the JSON Marshaller

MarshalText marshals the Level to text. Note that the text representation drops the -Level suffix (see example).

func (Level) String

func (l Level) String() string

String returns a lower-case ASCII representation of the log level.

func (*Level) UnmarshalText

func (l *Level) UnmarshalText(text []byte) error

Implementaion of the go encoding.TextUnmarshaller interface for the Level type This method will also be called by the JSON Unmarshaller e.g. when loading from logging configuration.

UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText expects the text representation of a Level to drop the -Level suffix (see example).

In particular, this makes it easy to configure logging levels using YAML, TOML, or JSON files.

type ListenerMap

type ListenerMap map[string][]HeartbeatListener

func (ListenerMap) String

func (l ListenerMap) String() string

Custom string format for ListenerMap logging to only print map keys as slice when logging ListenerMap contents

type LogAppenderConfig

type LogAppenderConfig struct {
	// Filename is the file to write logs to.  Backup log files will be retained
	// in the same directory.  It uses <processname>-lumberjack.log in
	// os.TempDir() if empty.
	LogFilePath    *string            `json:",omitempty"`
	LogKeys        []string           `json:",omitempty"` // Log keywords to enable
	LogLevel       Level              `json:",omitempty"`
	Rotation       *LogRotationConfig `json:",omitempty"`
	RedactionLevel RedactionLevel     `json:",omitempty"`
}

type LogContext

type LogContext struct {
	// CorrelationID is a pre-formatted identifier used to correlate logs.
	// E.g: Either blip context ID or HTTP Serial number.
	CorrelationID string

	// TestName can be a unit test name (from t.Name())
	TestName string

	// TestBucketName is the name of a bucket used during a test
	TestBucketName string
}

LogContext stores values which may be useful to include in logs

type LogContextKey

type LogContextKey struct{}

LogContextKey is used to key a LogContext value

type LogKey

type LogKey uint8

LogKeyMask is slice index for log keys. Entries in LogKeySet are stored at 2^LogKeyMask

const (
	// KeyNone is shorthand for no log keys.
	KeyNone LogKey = iota

	// KeyAll is a wildcard for all log keys.
	KeyAll

	KeyAdmin
	KeyAccess
	KeyAuth
	KeyBucket
	KeyCache
	KeyChanges
	KeyCluster
	KeyCRUD
	KeyDCP
	KeyEvents
	KeyGoCB
	KeyHTTP
	KeyHTTPResp
	KeyImport
	KeyJavascript
	KeyMigrate
	KeyQuery
	KeyReplicate
	KeySync
	KeySyncMsg
	KeyWebSocket
	KeyWebSocketFrame
	KeySGTest

	LogKeyCount // Count for logKeyNames init
)

Values for log keys.

func (LogKey) KeyMaskValue

func (i LogKey) KeyMaskValue() uint64

KeyMaskValue converts a log key index to the bitfield position. e.g. 0->0, 1->1, 2->2, 3->4, 4->8, 5->16...

func (LogKey) String

func (logKey LogKey) String() string

String returns the string representation of one log key.

type LogKeyMask

type LogKeyMask uint64

LogKeyMask is a bitfield of log keys.

func ConsoleLogKey

func ConsoleLogKey() *LogKeyMask

ConsoleLogKey returns the console log key.

func (*LogKeyMask) Disable

func (keyMask *LogKeyMask) Disable(logKey LogKey)

Disable will disable the given logKey in keyMask.

func (*LogKeyMask) Enable

func (keyMask *LogKeyMask) Enable(logKey LogKey)

Enable will enable the given logKey in keyMask.

func (*LogKeyMask) Enabled

func (keyMask *LogKeyMask) Enabled(logKey LogKey) bool

Enabled returns true if the given logKey is enabled in keyMask. Always returns true if KeyAll is enabled in keyMask.

func (*LogKeyMask) EnabledExcludingWildcard

func (keyMask *LogKeyMask) EnabledExcludingWildcard(logKey LogKey) bool

Enabled returns true if the given logKey is enabled in keyMask.

func (*LogKeyMask) EnabledLogKeys

func (keyMask *LogKeyMask) EnabledLogKeys() []string

EnabledLogKeys returns a slice of enabled log key names.

func (*LogKeyMask) Set

func (keyMask *LogKeyMask) Set(logKeyMask *LogKeyMask)

Set will override the keyMask with the given logKeyMask.

func (LogKeyMask) String

func (logKeyMask LogKeyMask) String() string

String returns the string representation of one or more log keys in a LogKeyMask

type LogLevel

type LogLevel uint32

LogLevel is used to represent a log level.

const (
	// LevelNone disables all logging
	LevelNone LogLevel = iota
	// LevelError enables only error logging.
	LevelError
	// LevelWarn enables warn, and error logging.
	LevelWarn
	// LevelInfo enables info, warn, and error logging.
	LevelInfo
	// LevelDebug enables debug, info, warn, and error logging.
	LevelDebug
	// LevelTrace enables trace, debug, info, warn, and error logging logging.
	LevelTrace
)

func ConsoleLogLevel

func ConsoleLogLevel() *LogLevel

ConsoleLogLevel returns the console log level.

func ToLogLevel

func ToLogLevel(deprecatedLogLevel Level) *LogLevel

For transforming an old log level to the new type.

func (*LogLevel) Enabled

func (l *LogLevel) Enabled(logLevel LogLevel) bool

Enabled returns true if the log level is enabled.

func (*LogLevel) MarshalText

func (l *LogLevel) MarshalText() (text []byte, err error)

MarshalText implements the TextMarshaler interface.

func (*LogLevel) Set

func (l *LogLevel) Set(newLevel LogLevel)

Set will override the log level with the given log level.

func (LogLevel) String

func (l LogLevel) String() string

String returns the string representation of a log level (e.g. "debug" or "warn")

func (LogLevel) StringShort

func (l LogLevel) StringShort() string

StringShort returns the short string representation of a log level (e.g. "DBG" or "WRN")

func (*LogLevel) UnmarshalText

func (l *LogLevel) UnmarshalText(text []byte) error

UnmarshalText implements the TextUnmarshaler interface.

type LogRotationConfig

type LogRotationConfig struct {
	// MaxSize is the maximum size in megabytes of the log file before it gets
	// rotated. It defaults to 100 megabytes.
	MaxSize int `json:",omitempty"`

	// MaxAge is the maximum number of days to retain old log files based on the
	// timestamp encoded in their filename.  Note that a day is defined as 24
	// hours and may not exactly correspond to calendar days due to daylight
	// savings, leap seconds, etc. The default is not to remove old log files
	// based on age.
	MaxAge int `json:",omitempty"`

	// MaxBackups is the maximum number of old log files to retain.  The default
	// is to retain all old log files (though MaxAge may still cause them to get
	// deleted.)
	MaxBackups int `json:",omitempty"`

	// LocalTime determines if the time used for formatting the timestamps in
	// backup files is the computer's local time.  The default is to use UTC
	// time.
	LocalTime bool `json:",omitempty"`
}

type LoggerWriter

type LoggerWriter struct {
	LogKey                LogKey        // The log key to log to, eg, KeyHTTP
	FormattedSerialNumber string        // The request ID
	Request               *http.Request // The request
	QueryValues           url.Values    // A cached copy of the URL query values
}

This provides an io.Writer interface around the base.Infof API

func NewLoggerWriter

func NewLoggerWriter(logKey LogKey, formattedSerialNumber string, req *http.Request, queryValues url.Values) *LoggerWriter

Create a new LoggerWriter

func (*LoggerWriter) Write

func (lw *LoggerWriter) Write(p []byte) (n int, err error)

Write() method to satisfy the io.Writer interface

type LoggingBucket

type LoggingBucket struct {
	// contains filtered or unexported fields
}

A wrapper around a Bucket that transparently adds logging of all the API calls.

func (*LoggingBucket) Add

func (b *LoggingBucket) Add(k string, exp uint32, v interface{}) (added bool, err error)

func (*LoggingBucket) AddRaw

func (b *LoggingBucket) AddRaw(k string, exp uint32, v []byte) (added bool, err error)

func (*LoggingBucket) Append

func (b *LoggingBucket) Append(k string, data []byte) error

func (*LoggingBucket) Close

func (b *LoggingBucket) Close()

func (*LoggingBucket) CouchbaseServerVersion

func (b *LoggingBucket) CouchbaseServerVersion() (major uint64, minor uint64, micro string)

func (*LoggingBucket) Delete

func (b *LoggingBucket) Delete(k string) error

func (*LoggingBucket) DeleteDDoc

func (b *LoggingBucket) DeleteDDoc(docname string) error

func (*LoggingBucket) DeleteWithXattr

func (b *LoggingBucket) DeleteWithXattr(k string, xattr string) error

func (*LoggingBucket) Dump

func (b *LoggingBucket) Dump()

func (*LoggingBucket) Get

func (b *LoggingBucket) Get(k string, rv interface{}) (uint64, error)

func (*LoggingBucket) GetAndTouchRaw

func (b *LoggingBucket) GetAndTouchRaw(k string, exp uint32) (v []byte, cas uint64, err error)

func (*LoggingBucket) GetBulkRaw

func (b *LoggingBucket) GetBulkRaw(keys []string) (map[string][]byte, error)

func (*LoggingBucket) GetDDoc

func (b *LoggingBucket) GetDDoc(docname string, value interface{}) error

func (*LoggingBucket) GetDDocs

func (b *LoggingBucket) GetDDocs(value interface{}) error

func (*LoggingBucket) GetMaxVbno

func (b *LoggingBucket) GetMaxVbno() (uint16, error)

func (*LoggingBucket) GetName

func (b *LoggingBucket) GetName() string

func (*LoggingBucket) GetRaw

func (b *LoggingBucket) GetRaw(k string) (v []byte, cas uint64, err error)

func (*LoggingBucket) GetStatsVbSeqno

func (b *LoggingBucket) GetStatsVbSeqno(maxVbno uint16, useAbsHighSeqNo bool) (uuids map[uint16]uint64, highSeqnos map[uint16]uint64, seqErr error)

func (*LoggingBucket) GetUnderlyingBucket

func (b *LoggingBucket) GetUnderlyingBucket() Bucket

GetUnderlyingBucket returns the underlying bucket for the LoggingBucket.

func (*LoggingBucket) GetWithXattr

func (b *LoggingBucket) GetWithXattr(k string, xattr string, rv interface{}, xv interface{}) (cas uint64, err error)

func (*LoggingBucket) GetXattr

func (b *LoggingBucket) GetXattr(k string, xattr string, xv interface{}) (cas uint64, err error)

func (*LoggingBucket) Incr

func (b *LoggingBucket) Incr(k string, amt, def uint64, exp uint32) (uint64, error)

func (*LoggingBucket) IsSupported

func (b *LoggingBucket) IsSupported(feature sgbucket.BucketFeature) bool

func (*LoggingBucket) PutDDoc

func (b *LoggingBucket) PutDDoc(docname string, value interface{}) error

func (*LoggingBucket) Refresh

func (b *LoggingBucket) Refresh() error

func (*LoggingBucket) Remove

func (b *LoggingBucket) Remove(k string, cas uint64) (casOut uint64, err error)

func (*LoggingBucket) Set

func (b *LoggingBucket) Set(k string, exp uint32, v interface{}) error

func (*LoggingBucket) SetBulk

func (b *LoggingBucket) SetBulk(entries []*sgbucket.BulkSetEntry) (err error)

func (*LoggingBucket) SetRaw

func (b *LoggingBucket) SetRaw(k string, exp uint32, v []byte) error

func (*LoggingBucket) StartDCPFeed

func (b *LoggingBucket) StartDCPFeed(args sgbucket.FeedArguments, callback sgbucket.FeedEventCallbackFunc, dbStats *expvar.Map) error

func (*LoggingBucket) StartTapFeed

func (b *LoggingBucket) StartTapFeed(args sgbucket.FeedArguments, dbStats *expvar.Map) (sgbucket.MutationFeed, error)

func (*LoggingBucket) Touch

func (b *LoggingBucket) Touch(k string, exp uint32) (cas uint64, err error)

func (*LoggingBucket) UUID

func (b *LoggingBucket) UUID() (string, error)

func (*LoggingBucket) Update

func (b *LoggingBucket) Update(k string, exp uint32, callback sgbucket.UpdateFunc) (casOut uint64, err error)

func (*LoggingBucket) VBHash

func (b *LoggingBucket) VBHash(docID string) uint32

func (*LoggingBucket) View

func (b *LoggingBucket) View(ddoc, name string, params map[string]interface{}) (sgbucket.ViewResult, error)

func (*LoggingBucket) ViewCustom

func (b *LoggingBucket) ViewCustom(ddoc, name string, params map[string]interface{}, vres interface{}) error

func (*LoggingBucket) ViewQuery

func (b *LoggingBucket) ViewQuery(ddoc, name string, params map[string]interface{}) (sgbucket.QueryResultIterator, error)

func (*LoggingBucket) Write

func (b *LoggingBucket) Write(k string, flags int, exp uint32, v interface{}, opt sgbucket.WriteOptions) error

func (*LoggingBucket) WriteCas

func (b *LoggingBucket) WriteCas(k string, flags int, exp uint32, cas uint64, v interface{}, opt sgbucket.WriteOptions) (uint64, error)

func (*LoggingBucket) WriteCasWithXattr

func (b *LoggingBucket) WriteCasWithXattr(k string, xattr string, exp uint32, cas uint64, v interface{}, xv interface{}) (casOut uint64, err error)

func (*LoggingBucket) WriteUpdate

func (b *LoggingBucket) WriteUpdate(k string, exp uint32, callback sgbucket.WriteUpdateFunc) (casOut uint64, err error)

func (*LoggingBucket) WriteUpdateWithXattr

func (b *LoggingBucket) WriteUpdateWithXattr(k string, xattr string, exp uint32, previous *sgbucket.BucketDocument, callback sgbucket.WriteUpdateWithXattrFunc) (casOut uint64, err error)

func (*LoggingBucket) WriteWithXattr

func (b *LoggingBucket) WriteWithXattr(k string, xattrKey string, exp uint32, cas uint64, value []byte, xattrValue []byte, isDelete bool, deleteBody bool) (casOut uint64, err error)

type LoggingConfig

type LoggingConfig struct {
	LogFilePath          string              `json:"log_file_path,omitempty"`   // Absolute or relative path on the filesystem to the log file directory. A relative path is from the directory that contains the Sync Gateway executable file.
	RedactionLevel       RedactionLevel      `json:"redaction_level,omitempty"` // Redaction level to apply to log output.
	Console              ConsoleLoggerConfig `json:"console,omitempty"`         // Console output
	Error                FileLoggerConfig    `json:"error,omitempty"`           // Error log file output
	Warn                 FileLoggerConfig    `json:"warn,omitempty"`            // Warn log file output
	Info                 FileLoggerConfig    `json:"info,omitempty"`            // Info log file output
	Debug                FileLoggerConfig    `json:"debug,omitempty"`           // Debug log file output
	Trace                FileLoggerConfig    `json:"trace,omitempty"`           // Trace log file output
	Stats                FileLoggerConfig    `json:"stats,omitempty"`           // Stats log file output
	DeprecatedDefaultLog *LogAppenderConfig  `json:"default,omitempty"`         // Deprecated "default" logging option.
}

func (*LoggingConfig) Init

func (c *LoggingConfig) Init(defaultLogFilePath string) (warnings []DeferredLogFn, err error)

Init will initialize logging, return any warnings that need to be logged at a later time.

type Metadata

type Metadata string

Metadata is a type which implements the Redactor interface for logging purposes of metadata.

Metadata is logical data needed by Couchbase to store and process User data. - Cluster name - Bucket names - DDoc/view names - View code - Index names - Mapreduce Design Doc Name and Definition (IP) - XDCR Replication Stream Names - And other couchbase resource specific meta data

func (Metadata) Redact

func (md Metadata) Redact() string

Redact tags the string with Metadata tags for post-processing.

func (Metadata) String

func (md Metadata) String() string

type N1QLBucket

type N1QLBucket interface {
	Bucket
	Query(statement string, params interface{}, consistency gocb.ConsistencyMode, adhoc bool) (results gocb.QueryResults, err error)
	ExplainQuery(statement string, params interface{}) (plain map[string]interface{}, err error)
	CreateIndex(indexName string, expression string, filterExpression string, options *N1qlIndexOptions) error
	BuildDeferredIndexes(indexSet []string) error
	CreatePrimaryIndex(indexName string, options *N1qlIndexOptions) error
	WaitForIndexOnline(indexName string) error
	GetIndexMeta(indexName string) (exists bool, meta *gocb.IndexInfo, err error)
	DropIndex(indexName string) error
}

type N1qlIndexOptions

type N1qlIndexOptions struct {
	NumReplica      uint `json:"num_replica,omitempty"`          // Number of replicas
	IndexTombstones bool `json:"retain_deleted_xattr,omitempty"` // Whether system xattrs on tombstones should be indexed
	DeferBuild      bool `json:"defer_build,omitempty"`          // Whether to defer initial build of index (requires a subsequent BUILD INDEX invocation)
}

IndexOptions used to build the 'with' clause

type NoPasswordAuthHandler

type NoPasswordAuthHandler struct {
	Handler AuthHandler
}

NoPasswordAuthHandler is used for client cert-based auth by cbdatasource

func (NoPasswordAuthHandler) GetCredentials

func (nph NoPasswordAuthHandler) GetCredentials() (username string, password string, bucketname string)

type RangeSafeCollection

type RangeSafeCollection struct {
	// contains filtered or unexported fields
}

RangeSafeCollection is a concurrency-safe collection comprising an AppendOnlyList and a map for key-based retrieval of list elements. It has the following characteristics

  • concurrency-safe
  • snapshot-based iteration, Range doesn't block Append
  • key-based access to entries

func NewRangeSafeCollection

func NewRangeSafeCollection() *RangeSafeCollection

NewRangeSafeCollection creates and initializes a new RangeSafeCollection

func (*RangeSafeCollection) Get

func (r *RangeSafeCollection) Get(key string) (value interface{}, ok bool)

Get performs map-style retrieval from the collection.

func (*RangeSafeCollection) GetOrInsert

func (r *RangeSafeCollection) GetOrInsert(key string, value interface{}) (actual interface{}, created bool, length int)

GetOrInsert returns the value of the specified key if already present in the collection - otherwise will append the element to the collection. Does not block on active Range operations.

func (*RangeSafeCollection) Init

func (r *RangeSafeCollection) Init()

Init initializes or resets a RangeSafeCollection

func (*RangeSafeCollection) Length

func (r *RangeSafeCollection) Length() int

Length returns the length of the collection.

func (*RangeSafeCollection) Range

func (r *RangeSafeCollection) Range(f func(value interface{}) bool)

Range iterates over the set up to the last element at the time range was called, invoking the callback function with the element value.

func (*RangeSafeCollection) RangeElements

func (r *RangeSafeCollection) RangeElements(f func(item *AppendOnlyListElement) bool)

RangeElements iterates over the set up to the last element at the time range was called, invoking the callback function with the element. Exposed to facilitate subsequent invocation of RemoveElements. Range function invocations (f) occur while holding a read lock for r.iterateLock - calls to r.Remove/r.RemoveElements or nested range invocations within f will deadlock.

func (*RangeSafeCollection) Remove

func (r *RangeSafeCollection) Remove(key string) (length int)

Remove removes an entry from the collection, and returns the new length of the collection. Obtains both the iterate and value lock, so will block on active Range operations.

func (*RangeSafeCollection) RemoveElements

func (r *RangeSafeCollection) RemoveElements(elements []*AppendOnlyListElement) (length int)

Remove removes a set of list elements from the collection, and returns the new length of the collection. Obtains both the iterate and value lock, so will block on active Range operations.

type RedactableError

type RedactableError struct {
	// contains filtered or unexported fields
}

A redactable error can be used as a drop-in replacement for a base error (as would have been created via fmt.Errorf), which has the ability to redact any sensitive user data by calling redact() on all if it's stored args.

func RedactErrorf

func RedactErrorf(fmt string, args ...interface{}) *RedactableError

Create a new redactable error. Same signature as fmt.Errorf() for easy drop-in replacement.

func (*RedactableError) Error

func (re *RedactableError) Error() string

Satisfy error interface

func (*RedactableError) Redact

func (re *RedactableError) Redact() string

Satisfy redact interface

type RedactionLevel

type RedactionLevel int
const (
	RedactNone RedactionLevel = iota
	RedactPartial
	RedactFull
)

func (*RedactionLevel) MarshalText

func (l *RedactionLevel) MarshalText() ([]byte, error)

MarshalText marshals the RedactionLevel to text.

func (RedactionLevel) String

func (l RedactionLevel) String() string

String returns a lower-case ASCII representation of the log redaction level.

func (*RedactionLevel) UnmarshalText

func (l *RedactionLevel) UnmarshalText(text []byte) error

UnmarshalText unmarshals text to a RedactionLevel.

type Redactor

type Redactor interface {
	// Redact returns the given string in a redacted form. This may be tagged,
	// changed, hashed, or removed completely depending on desired behaviour.
	Redact() string
	// String returns the non-redacted form of the given string.
	String() string
}

Redactor provides an interface for log redaction.

type RedactorFunc

type RedactorFunc func() Redactor

This allows for lazy evaluation for a Redactor. Means that we don't have to process redaction unless we are definitely performing a redaction

func MD

func MD(i interface{}) RedactorFunc

MD returns a Metadata type for any given value.

func SD

func SD(i interface{}) RedactorFunc

SD returns a SystemData type for any given value.

func UD

func UD(i interface{}) RedactorFunc

UD returns a UserData type for any given value.

func (RedactorFunc) Redact

func (redactorFunc RedactorFunc) Redact() string

func (RedactorFunc) String

func (redactorFunc RedactorFunc) String() string

type RedactorSlice

type RedactorSlice []Redactor

func (RedactorSlice) Redact

func (redactorSlice RedactorSlice) Redact() string

func (RedactorSlice) String

func (redactorSlice RedactorSlice) String() string

type Replicator

type Replicator struct {
	// contains filtered or unexported fields
}

Replication manager

func NewReplicator

func NewReplicator() *Replicator

func (*Replicator) ActiveTasks

func (r *Replicator) ActiveTasks() []Task

ActiveTasks returns the tasks for active replications.

func (*Replicator) Replicate

func (r *Replicator) Replicate(params sgreplicate.ReplicationParameters, isCancel bool) (*Task, error)

Replicate starts or stops the replication for the given parameters.

func (*Replicator) StopReplications

func (r *Replicator) StopReplications() error

StopReplications stops all active replications.

type RetryCasWorker

type RetryCasWorker func() (shouldRetry bool, err error, value uint64)

type RetrySleeper

type RetrySleeper func(retryCount int) (shouldContinue bool, timeTosleepMs int)

A retry sleeper is called back by the retry loop and passed the current retryCount, and should return the amount of milliseconds that the retry should sleep.

func CreateDoublingSleeperFunc

func CreateDoublingSleeperFunc(maxNumAttempts, initialTimeToSleepMs int) RetrySleeper

Create a RetrySleeper that will double the retry time on every iteration and use the given parameters. The longest wait time can be calculated with: initialTimeToSleepMs * 2^maxNumAttempts The total wait time can be calculated with: initialTimeToSleepMs * 2^maxNumAttempts+1

func CreateIndefiniteMaxDoublingSleeperFunc

func CreateIndefiniteMaxDoublingSleeperFunc(initialTimeToSleepMs int, maxSleepPerRetryMs int) RetrySleeper

CreateIndefiniteMaxDoublingSleeperFunc is similar to CreateMaxDoublingSleeperFunc, with the exception that there is no number of maximum retries.

func CreateMaxDoublingSleeperFunc

func CreateMaxDoublingSleeperFunc(maxNumAttempts, initialTimeToSleepMs int, maxSleepPerRetryMs int) RetrySleeper

Create a RetrySleeper that will double the retry time on every iteration, with each sleep not exceeding maxSleepPerRetryMs.

func CreateSleeperFunc

func CreateSleeperFunc(maxNumAttempts, timeToSleepMs int) RetrySleeper

Create a sleeper function that sleeps up to maxNumAttempts, sleeping timeToSleepMs each attempt

func SleeperFuncCtx

func SleeperFuncCtx(sleeperFunc RetrySleeper, ctx context.Context) RetrySleeper

SleeperFuncCtx wraps the given RetrySleeper with a context, so it can be cancelled, or have a deadline.

type RetryTimeoutError

type RetryTimeoutError struct {
	// contains filtered or unexported fields
}

func NewRetryTimeoutError

func NewRetryTimeoutError(description string, attempts int) *RetryTimeoutError

func (*RetryTimeoutError) Error

func (r *RetryTimeoutError) Error() string

type RetryUntilTrueFunc

type RetryUntilTrueFunc func() bool

testRetryUntilTrue performs a short sleep-based retry loop until the timeout is reached or the criteria in RetryUntilTrueFunc is met. Intended to avoid arbitrarily long sleeps in tests that don't have any alternative to polling. Default sleep time is 50ms, timeout is 10s. Can be customized with testRetryUntilTrueCustom

type RetryWorker

type RetryWorker func() (shouldRetry bool, err error, value interface{})

A RetryWorker encapsulates the work being done in a Retry Loop. The shouldRetry return value determines whether the worker will retry, regardless of the err value. If the worker has exceeded it's retry attempts, then it will not be called again even if it returns shouldRetry = true.

type SGDest

type SGDest interface {
	cbgt.Dest
	// contains filtered or unexported methods
}

func NewDCPDest

func NewDCPDest(callback sgbucket.FeedEventCallbackFunc, bucket Bucket, maxVbNo uint16, persistCheckpoints bool, dcpStats *expvar.Map, feedID string, importPartitionStat *expvar.Int) (SGDest, context.Context)

type SGTranscoder

type SGTranscoder struct {
}

func (SGTranscoder) Decode

func (t SGTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error

Decode applies the default Couchbase transcoding behaviour to decode into a Go type.

func (SGTranscoder) Encode

func (t SGTranscoder) Encode(value interface{}) ([]byte, uint32, error)

Encode applies the default Couchbase transcoding behaviour to encode a Go type. Figures out how to convert the given struct into bytes and then sets the json flag.

type SequenceTimingExpvar

type SequenceTimingExpvar struct {
	// contains filtered or unexported fields
}

SequenceTimingExpvarMap attempts to track timing information for targeted sequences as they move through the system. Creates a map that looks like the following, where Indexed, Polled, Changes are the incoming stages, the values are nanosecond timestamps, and the sequences are the target sequences, based on the specified vb and frequency (in the example frequency=1000). Since we won't necessarily see every vb sequence, we track the first sequence we see higher than the target frequency. (e.g. if our last sequence was 1000 and frequency is 1000, it will track the first sequence seen higher than 2000). Note: Frequency needs to be high enough that a sequence can move through the system before the next sequence is seen, otherwise earlier stages could be updating current before the later stages have processed it.

{
	"timingMap": {
		"seq1000.Indexed" :  4738432432,
		"seq1000.Polled" : 5743785947,
		"seq1000.Changes" :
		"seq2002.Indexed" :  4738432432,
		"seq2002.Polled" : 5743785947,
		"seq2002.Changes" :
	}
}

func NewSequenceTimingExpvar

func NewSequenceTimingExpvar(frequency uint64, targetVbNo uint16, name string) SequenceTimingExpvar

func (*SequenceTimingExpvar) String

func (s *SequenceTimingExpvar) String() string

func (*SequenceTimingExpvar) UpdateBySequence

func (s *SequenceTimingExpvar) UpdateBySequence(stage string, vbNo uint16, seq uint64)

func (*SequenceTimingExpvar) UpdateBySequenceAt

func (s *SequenceTimingExpvar) UpdateBySequenceAt(stage string, vbNo uint16, seq uint64, time time.Time)

func (*SequenceTimingExpvar) UpdateBySequenceRange

func (s *SequenceTimingExpvar) UpdateBySequenceRange(stage string, vbNo uint16, startSeq uint64, endSeq uint64)

Update by sequence range is used for events (like clock polling) that don't see every sequence. Writes when current target sequence is in range. Assumes callers don't report overlapping ranges

type Set

type Set map[string]present

An set of strings, represented as a map.

func SetFromArray

func SetFromArray(names []string) Set

Creates a new Set from an array of strings.

func SetOf

func SetOf(names ...string) Set

Creates a new Set from zero or more inline string arguments.

func (Set) Add

func (set Set) Add(value string) Set

Adds a value to a set

func (Set) Contains

func (set Set) Contains(ch string) bool

Returns true if the set includes the channel.

func (Set) Equals

func (set Set) Equals(other Set) bool

func (Set) MarshalJSON

func (set Set) MarshalJSON() ([]byte, error)

func (Set) Removing

func (set Set) Removing(str string) Set

Returns a set with any instance of 'str' removed

func (Set) String

func (set Set) String() string

func (Set) ToArray

func (set Set) ToArray() []string

Converts a Set to an array of strings (ordering is undefined).

func (Set) Union

func (set Set) Union(other Set) Set

Returns the union of two sets as a new set.

func (*Set) UnmarshalJSON

func (setPtr *Set) UnmarshalJSON(data []byte) error

func (Set) Update

func (set Set) Update(other Set) Set

Updates the set based on the contents of another set

func (Set) UpdateWithSlice

func (set Set) UpdateWithSlice(slice []string) Set

type SimpleFeed

type SimpleFeed struct {
	// contains filtered or unexported fields
}

func (*SimpleFeed) Close

func (s *SimpleFeed) Close() error

func (*SimpleFeed) Events

func (s *SimpleFeed) Events() <-chan sgbucket.FeedEvent

func (*SimpleFeed) WriteEvents

func (s *SimpleFeed) WriteEvents() chan<- sgbucket.FeedEvent

type SortedUint64Slice

type SortedUint64Slice []uint64

SortedUint64Slice attaches the methods of sort.Interface to []uint64, sorting in increasing order.

func (SortedUint64Slice) Len

func (s SortedUint64Slice) Len() int

func (SortedUint64Slice) Less

func (s SortedUint64Slice) Less(i, j int) bool

func (SortedUint64Slice) Sort

func (s SortedUint64Slice) Sort()

Sort is a convenience method.

func (SortedUint64Slice) Swap

func (s SortedUint64Slice) Swap(i, j int)

type SystemData

type SystemData string

SystemData is a type which implements the Redactor interface for logging purposes of system data.

System data is data from other parts of the system Couchbase interacts with over the network - IP addresses - IP tables - Hosts names - Ports - DNS topology

func (SystemData) Redact

func (sd SystemData) Redact() string

Redact tags the string with SystemData tags for post-processing.

func (SystemData) String

func (sd SystemData) String() string

type TBPBucketInitFunc

type TBPBucketInitFunc func(ctx context.Context, b Bucket, tbp *TestBucketPool) error

TBPBucketInitFunc is a function that is run once (synchronously) when creating/opening a bucket.

var NoopInitFunc TBPBucketInitFunc = func(ctx context.Context, b Bucket, tbp *TestBucketPool) error {
	return nil
}

NoopInitFunc does nothing to init a bucket. This can be used in conjunction with FlushBucketReadier when there's no requirement for views/GSI.

var PrimaryIndexInitFunc TBPBucketInitFunc = func(ctx context.Context, b Bucket, tbp *TestBucketPool) error {
	gocbBucket, ok := AsGoCBBucket(b)
	if !ok {
		tbp.Logf(ctx, "skipping primary index creation for non-gocb bucket")
		return nil
	}

	if hasPrimary, _, err := gocbBucket.getIndexMetaWithoutRetry(PrimaryIndexName); err != nil {
		return err
	} else if !hasPrimary {
		err := gocbBucket.CreatePrimaryIndex(PrimaryIndexName, nil)
		if err != nil {
			return err
		}
	}
	return nil
}

PrimaryIndexInitFunc creates a primary index on the given bucket. This can then be used with N1QLBucketEmptierFunc, for improved compatibility with GSI. Will be used when GSI is re-enabled (CBG-813)

type TBPBucketReadierFunc

type TBPBucketReadierFunc func(ctx context.Context, b *CouchbaseBucketGoCB, tbp *TestBucketPool) error

TBPBucketReadierFunc is a function that runs once a test is finished with a bucket. This runs asynchronously.

var FlushBucketEmptierFunc TBPBucketReadierFunc = func(ctx context.Context, b *CouchbaseBucketGoCB, tbp *TestBucketPool) error {
	return b.Flush()
}

FlushBucketEmptierFunc ensures the bucket is empty by flushing. It is not recommended to use with GSI.

var N1QLBucketEmptierFunc TBPBucketReadierFunc = func(ctx context.Context, b *CouchbaseBucketGoCB, tbp *TestBucketPool) error {
	if hasPrimary, _, err := b.getIndexMetaWithoutRetry(PrimaryIndexName); err != nil {
		return err
	} else if !hasPrimary {
		return fmt.Errorf("bucket does not have primary index, so can't empty bucket using N1QL")
	}

	if itemCount, err := b.QueryBucketItemCount(); err != nil {
		return err
	} else if itemCount == 0 {
		tbp.Logf(ctx, "Bucket already empty - skipping")
	} else {
		tbp.Logf(ctx, "Bucket not empty (%d items), emptying bucket via N1QL", itemCount)

		res, err := b.Query(`DELETE FROM $_bucket`, nil, gocb.RequestPlus, false)
		if err != nil {
			return err
		}
		_ = res.Close()
	}

	return nil
}

N1QLBucketEmptierFunc ensures the bucket is empty by using N1QL deletes. This is the preferred approach when using GSI. Will be used when GSI is re-enabled (CBG-813)

type TapFeed

type TapFeed sgbucket.MutationFeed

type Task

type Task struct {
	TaskType         string      `json:"type"`
	ReplicationID    string      `json:"replication_id"`
	Continuous       bool        `json:"continuous"`
	Source           string      `json:"source"`
	Target           string      `json:"target"`
	DocsRead         int64       `json:"docs_read"`
	DocsWritten      int64       `json:"docs_written"`
	DocWriteFailures int64       `json:"doc_write_failures"`
	StartLastSeq     int64       `json:"start_last_seq"`
	EndLastSeq       interface{} `json:"end_last_seq"`
}

type TestAuthenticator

type TestAuthenticator struct {
	Username   string
	Password   string
	BucketName string
}

func (TestAuthenticator) GetCredentials

func (t TestAuthenticator) GetCredentials() (username, password, bucketname string)

type TestBucket

type TestBucket struct {
	Bucket
	BucketSpec BucketSpec
	// contains filtered or unexported fields
}

func GetTestBucket

func GetTestBucket(t testing.TB) *TestBucket

func (TestBucket) Close

func (tb TestBucket) Close()

func (*TestBucket) NoCloseClone

func (tb *TestBucket) NoCloseClone() *TestBucket

NoCloseClone returns a new test bucket referencing the same underlying bucket and bucketspec, but with an IgnoreClose leaky bucket, and a no-op close function. Used when multiple references to the same bucket are needed.

type TestBucketPool

type TestBucketPool struct {
	// contains filtered or unexported fields
}

TestBucketPool is used to manage a pool of gocb buckets on a Couchbase Server for testing purposes. The zero-value/uninitialized version of this struct is safe to use as Walrus buckets are returned.

var GTestBucketPool *TestBucketPool

GTestBucketPool is a global instance of a TestBucketPool used to manage a pool of buckets for integration testing.

func NewTestBucketPool

func NewTestBucketPool(bucketReadierFunc TBPBucketReadierFunc, bucketInitFunc TBPBucketInitFunc) *TestBucketPool

NewTestBucketPool initializes a new TestBucketPool. To be called from TestMain for packages requiring test buckets.

func (*TestBucketPool) Close

func (tbp *TestBucketPool) Close()

Close waits for any buckets to be cleaned, and closes the pool.

func (*TestBucketPool) GetTestBucketAndSpec

func (tbp *TestBucketPool) GetTestBucketAndSpec(t testing.TB) (b Bucket, s BucketSpec, teardownFn func())

GetTestBucketAndSpec returns a bucket to be used during a test. The returned teardownFn MUST be called once the test is done, which closes the bucket, readies it for a new test, and releases back into the pool.

func (*TestBucketPool) Logf

func (tbp *TestBucketPool) Logf(ctx context.Context, format string, args ...interface{})

Logf formats the given test bucket logging and logs to stderr.

func (*TestBucketPool) NumUsableBuckets

func (tbp *TestBucketPool) NumUsableBuckets() int

NumUsableBuckets returns the total number of buckets in the pool that can be used by a test.

type TimingStatus

type TimingStatus int
const (
	TimingStatusCurrent TimingStatus = iota
	TimingStatusNext
	TimingStatusNone
	TimingStatusInit
)

type UserData

type UserData string

UserData is a type which implements the Redactor interface for logging purposes of user data.

User data is data that is stored into Couchbase by the application user account:
- Key and value pairs in JSON documents, or the key exclusively
- Application/Admin usernames that identify the human person
- Query statements included in the log file collected by support that leak the document fields (Select floor_price from stock).
- Names and email addresses asked during product registration and alerting
- Usernames
- Document xattrs

func (UserData) Redact

func (ud UserData) Redact() string

Redact tags the string with UserData tags for post-processing.

func (UserData) String

func (ud UserData) String() string

type XattrEnabledDesignDoc

type XattrEnabledDesignDoc struct {
	*gocb.DesignDocument
	IndexXattrOnTombstones bool `json:"index_xattr_on_deleted_docs,omitempty"`
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL