cbft

package module
v0.0.0-...-c37f13c Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Dec 9, 2014 License: Apache-2.0 Imports: 28 Imported by: 0

README

cbft

Couchbase Full Text engine

This project integrates the bleve full-text search engine and Couchbase Server.

Build Status Coverage Status GoDoc

LICENSE: Apache 2.0

Usage

Getting

go get -u github.com/couchbaselabs/cbft

Running against local Couchbase Server

./cbft -server http://localhost:8091

Create a new index (for the default bucket)

curl -XPUT http://localhost:8095/api/index/default

Check how many documents are indexed

curl http://localhost:8095/api/index/default/count

Submit search query

curl -XPOST -d '{"query":{"size":10,"query":{"query":"your-search-term"}}}' --header Content-Type:text/json http://localhost:9090/api/index/default/query

Delete index

curl -XDELETE http://localhost:8095/api/index/default

For cbft developers

Getting

mkdir -p $GOPATH/src/github.com/couchbaselabs
cd $GOPATH/src/github.com/couchbaselabs
git clone git://github.com/couchbaselabs/cbft.git

Building

make

Unit tests

make test

To get local coverage reports with heatmaps...

make coverage

To get more coverage reports that include dependencies like the bleve library...

go test -coverpkg github.com/couchbaselabs/cbft,github.com/blevesearch/bleve,github.com/blevesearch/bleve/index -coverprofile=coverage.out -covermode=count && go tool cover -html=coverage.out

Documentation

Index

Constants

View Source
const BLEVE_DEST_APPLY_BUF_SIZE_BYTES = 200000
View Source
const BLEVE_DEST_INITIAL_BUF_SIZE_BYTES = 20000
View Source
const FEED_BACKOFF_FACTOR = 1.5
View Source
const FEED_SLEEP_INIT_MS = 100
View Source
const FEED_SLEEP_MAX_MS = 10000

Default values for feed parameters.

View Source
const INDEX_DEFS_KEY = "indexDefs"
View Source
const JANITOR_CLOSE_PINDEX = "janitor_close_pindex"
View Source
const JANITOR_REMOVE_PINDEX = "janitor_remove_pindex"
View Source
const NODE_DEFS_KEY = "nodeDefs"
View Source
const NODE_DEFS_KNOWN = "known"
View Source
const NODE_DEFS_WANTED = "wanted"
View Source
const PINDEX_META_FILENAME string = "PINDEX_META"
View Source
const PLAN_PINDEXES_KEY = "planPIndexes"
View Source
const VERSION = "2.1.0"

NOTE: You *must* update VERSION if you change what's stored in the Cfg (such as the JSON/struct definitions or planning algorithms).

View Source
const VERSION_KEY = "version"
View Source
const WORK_KICK = "kick"
View Source
const WORK_NOOP = ""

Variables

This section is empty.

Functions

func CalcPIndexesDelta

func CalcPIndexesDelta(mgrUUID string,
	currPIndexes map[string]*PIndex,
	wantedPlanPIndexes *PlanPIndexes) (
	addPlanPIndexes []*PlanPIndex,
	removePIndexes []*PIndex)

Functionally determine the delta of which pindexes need creation and which should be shut down on our local node (mgrUUID).

func CfgNodeDefsKey

func CfgNodeDefsKey(kind string) string

func CfgSetIndexDefs

func CfgSetIndexDefs(cfg Cfg, indexDefs *IndexDefs, cas uint64) (uint64, error)

func CfgSetNodeDefs

func CfgSetNodeDefs(cfg Cfg, kind string, nodeDefs *NodeDefs,
	cas uint64) (uint64, error)

func CfgSetPlanPIndexes

func CfgSetPlanPIndexes(cfg Cfg, planPIndexes *PlanPIndexes, cas uint64) (uint64, error)

func CheckVersion

func CheckVersion(cfg Cfg, myVersion string) (bool, error)

Returns true if a given version is modern enough to modify the Cfg. Older versions (which are running with older JSON/struct defintions or planning algorithms) will see false from their CheckVersion()'s.

func CouchbasePartitions

func CouchbasePartitions(sourceType, sourceName, sourceUUID, sourceParams,
	server string) ([]string, error)

func CountAlias

func CountAlias(mgr *Manager, indexName, indexUUID string) (uint64, error)

func CountBlevePIndexImpl

func CountBlevePIndexImpl(mgr *Manager, indexName, indexUUID string) (uint64, error)

func DataSourcePartitions

func DataSourcePartitions(sourceType, sourceName, sourceUUID, sourceParams,
	server string) ([]string, error)

func DestFeedPartitions

func DestFeedPartitions(sourceType, sourceName, sourceUUID, sourceParams,
	server string) ([]string, error)

func ExponentialBackoffLoop

func ExponentialBackoffLoop(name string,
	f func() int,
	startSleepMS int,
	backoffFactor float32,
	maxSleepMS int)

Calls f() in a loop, sleeping in an exponential backoff if needed. The provided f() function should return < 0 to stop the loop; >= 0 to continue the loop, where > 0 means there was progress which allows an immediate retry of f() with no sleeping. A return of < 0 is useful when f() will never make any future progress.

func FeedName

func FeedName(pindex *PIndex) string

func NewBlackHolePIndexImpl

func NewBlackHolePIndexImpl(indexType, indexParams, path string, restart func()) (
	PIndexImpl, Dest, error)

func NewBlevePIndexImpl

func NewBlevePIndexImpl(indexType, indexParams, path string, restart func()) (
	PIndexImpl, Dest, error)

func NewManagerRESTRouter

func NewManagerRESTRouter(mgr *Manager, staticDir, staticETag string, mr *MsgRing) (
	*mux.Router, error)

func NewPIndexImpl

func NewPIndexImpl(indexType, indexParams, path string, restart func()) (
	PIndexImpl, Dest, error)

func NewUUID

func NewUUID() string

func OpenBlackHolePIndexImpl

func OpenBlackHolePIndexImpl(indexType, path string, restart func()) (
	PIndexImpl, Dest, error)

func OpenBlevePIndexImpl

func OpenBlevePIndexImpl(indexType, path string, restart func()) (PIndexImpl, Dest, error)

func OpenPIndexImpl

func OpenPIndexImpl(indexType, path string, restart func()) (PIndexImpl, Dest, error)

func PIndexMatchesPlan

func PIndexMatchesPlan(pindex *PIndex, planPIndex *PlanPIndex) bool

Returns true if both the PIndex meets the PlanPIndex, ignoring UUID.

func PIndexPath

func PIndexPath(dataDir, pindexName string) string

func ParsePIndexPath

func ParsePIndexPath(dataDir, pindexPath string) (string, bool)

func ParsePartitionsToVBucketIds

func ParsePartitionsToVBucketIds(dests map[string]Dest) ([]uint16, error)

func PlanPIndexName

func PlanPIndexName(indexDef *IndexDef, sourcePartitions string) string

NOTE: PlanPIndex.Name must be unique across the cluster and ideally functionally based off of the indexDef so that the SamePlanPIndex() comparison works even if concurrent planners are racing to calculate plans.

NOTE: We can't use sourcePartitions directly as part of a PlanPIndex.Name suffix because in vbucket/hash partitioning the string would be too long -- since PIndexes might use PlanPIndex.Name for filesystem paths.

func PlanPIndexNodeCanRead

func PlanPIndexNodeCanRead(p *PlanPIndexNode) bool

func PlanPIndexNodeCanWrite

func PlanPIndexNodeCanWrite(p *PlanPIndexNode) bool

func PlannerCheckVersion

func PlannerCheckVersion(cfg Cfg, version string) error

func QueryAlias

func QueryAlias(mgr *Manager, indexName, indexUUID string,
	req []byte, res io.Writer) error

func QueryBlevePIndexImpl

func QueryBlevePIndexImpl(mgr *Manager, indexName, indexUUID string,
	req []byte, res io.Writer) error

func RegisterFeedType

func RegisterFeedType(sourceType string, f *FeedType)

func RegisterPIndexImplType

func RegisterPIndexImplType(indexType string, t *PIndexImplType)

func RewriteURL

func RewriteURL(to string, h http.Handler) http.Handler

func SamePlanPIndex

func SamePlanPIndex(a, b *PlanPIndex) bool

Returns true if both PlanPIndex are the same, ignoring PlanPIndex.UUID.

func SamePlanPIndexes

func SamePlanPIndexes(a, b *PlanPIndexes) bool

Returns true if both PlanPIndexes are the same, where we ignore any differences in UUID or ImplVersion.

func StartDCPFeed

func StartDCPFeed(mgr *Manager, feedName, indexName, indexUUID,
	sourceType, bucketName, bucketUUID, params string, dests map[string]Dest) error

func StartTAPFeed

func StartTAPFeed(mgr *Manager, feedName, indexName, indexUUID,
	sourceType, bucketName, bucketUUID, params string, dests map[string]Dest) error

func StringsIntersectStrings

func StringsIntersectStrings(a, b []string) []string

StringsIntersectStrings returns a brand new array that has the intersection of a and b.

func StringsRemoveStrings

func StringsRemoveStrings(stringArr, removeArr []string) []string

StringsRemoveStrings returns a copy of stringArr, but with some strings removed, keeping the same order as stringArr.

func StringsToMap

func StringsToMap(strsArr []string) map[string]bool

func SubsetPlanPIndexes

func SubsetPlanPIndexes(a, b *PlanPIndexes) bool

Returns true if PlanPIndex children in a are a subset of those in b, using SamePlanPIndex() for sameness comparion.

func SyncWorkReq

func SyncWorkReq(ch chan *WorkReq, op, msg string, obj interface{}) error

func ValidateAlias

func ValidateAlias(indexType, indexName, indexParams string) error

func ValidateBlevePIndexImpl

func ValidateBlevePIndexImpl(indexType, indexName, indexParams string) error

func VersionGTE

func VersionGTE(x, y string) bool

Compares two dotted versioning strings, like "1.0.1" and "1.2.3". Returns true when x >= y.

Types

type AliasParams

type AliasParams struct {
	Targets map[string]*AliasParamsTarget `json:"targets"` // Keyed by indexName.
}

AliasParams holds the definition for a user-defined index alias. A user-defined index alias can be used as a level of indirection (the "LastQuartersSales" alias points currently to the "2014-Q3-Sales" index, but the administrator might repoint it in the future without changing the application) or to scatter-gather or fan-out a query across multiple real indexes (e.g., to query across customer records, product catalog, call-center records, etc, in one shot).

type AliasParamsTarget

type AliasParamsTarget struct {
	IndexUUID string `json:"indexUUID"` // Optional.
}

type BlackHole

type BlackHole struct {
	// contains filtered or unexported fields
}

Implements both Dest and PIndexImpl interfaces.

func (*BlackHole) Close

func (t *BlackHole) Close() error

func (*BlackHole) ConsistencyWait

func (t *BlackHole) ConsistencyWait(partition string,
	consistencyLevel string,
	consistencySeq uint64,
	cancelCh chan struct{}) error

func (*BlackHole) Count

func (t *BlackHole) Count(pindex *PIndex,
	cancelCh chan struct{}) (uint64, error)

func (*BlackHole) GetOpaque

func (t *BlackHole) GetOpaque(partition string) (
	value []byte, lastSeq uint64, err error)

func (*BlackHole) OnDataDelete

func (t *BlackHole) OnDataDelete(partition string,
	key []byte, seq uint64) error

func (*BlackHole) OnDataUpdate

func (t *BlackHole) OnDataUpdate(partition string,
	key []byte, seq uint64, val []byte) error

func (*BlackHole) OnSnapshotStart

func (t *BlackHole) OnSnapshotStart(partition string,
	snapStart, snapEnd uint64) error

func (*BlackHole) Query

func (t *BlackHole) Query(pindex *PIndex, req []byte, w io.Writer,
	cancelCh chan struct{}) error

func (*BlackHole) Rollback

func (t *BlackHole) Rollback(partition string, rollbackSeq uint64) error

func (*BlackHole) SetOpaque

func (t *BlackHole) SetOpaque(partition string, value []byte) error

type BleveClient

type BleveClient struct {
	QueryURL    string
	CountURL    string
	Consistency *ConsistencyParams
}

BleveClient implements the Search() and DocCount() subset of the bleve.Index interface by accessing a remote cbft server via REST protocol. This allows callers to add a BleveClient as a target of a bleve.IndexAlias, and implements cbft protocol features like query consistency and auth.

TODO: Implement propagating auth info in BleveClient.

func (*BleveClient) Batch

func (r *BleveClient) Batch(b *bleve.Batch) error

func (*BleveClient) Close

func (r *BleveClient) Close() error

func (*BleveClient) Delete

func (r *BleveClient) Delete(id string) error

func (*BleveClient) DeleteInternal

func (r *BleveClient) DeleteInternal(key []byte) error

func (*BleveClient) DocCount

func (r *BleveClient) DocCount() (uint64, error)

func (*BleveClient) Document

func (r *BleveClient) Document(id string) (*document.Document, error)

func (*BleveClient) DumpAll

func (r *BleveClient) DumpAll() chan interface{}

func (*BleveClient) DumpDoc

func (r *BleveClient) DumpDoc(id string) chan interface{}

func (*BleveClient) DumpFields

func (r *BleveClient) DumpFields() chan interface{}

func (*BleveClient) Fields

func (r *BleveClient) Fields() ([]string, error)

func (*BleveClient) GetInternal

func (r *BleveClient) GetInternal(key []byte) ([]byte, error)

func (*BleveClient) Index

func (r *BleveClient) Index(id string, data interface{}) error

func (*BleveClient) Mapping

func (r *BleveClient) Mapping() *bleve.IndexMapping

func (*BleveClient) Search

func (r *BleveClient) Search(req *bleve.SearchRequest) (*bleve.SearchResult, error)

func (*BleveClient) SetInternal

func (r *BleveClient) SetInternal(key, val []byte) error

func (*BleveClient) Stats

func (r *BleveClient) Stats() *bleve.IndexStat

type BleveDest

type BleveDest struct {
	// contains filtered or unexported fields
}

func (*BleveDest) Close

func (t *BleveDest) Close() error

func (*BleveDest) ConsistencyWait

func (t *BleveDest) ConsistencyWait(partition string,
	consistencyLevel string,
	consistencySeq uint64,
	cancelCh chan struct{}) error

func (*BleveDest) Count

func (t *BleveDest) Count(pindex *PIndex, cancelCh chan struct{}) (uint64, error)

func (*BleveDest) GetOpaque

func (t *BleveDest) GetOpaque(partition string) (
	value []byte, lastSeq uint64, err error)

func (*BleveDest) OnDataDelete

func (t *BleveDest) OnDataDelete(partition string,
	key []byte, seq uint64) error

func (*BleveDest) OnDataUpdate

func (t *BleveDest) OnDataUpdate(partition string,
	key []byte, seq uint64, val []byte) error

func (*BleveDest) OnSnapshotStart

func (t *BleveDest) OnSnapshotStart(partition string,
	snapStart, snapEnd uint64) error

func (*BleveDest) Query

func (t *BleveDest) Query(pindex *PIndex, req []byte, res io.Writer,
	cancelCh chan struct{}) error

func (*BleveDest) Rollback

func (t *BleveDest) Rollback(partition string, rollbackSeq uint64) error

func (*BleveDest) SetOpaque

func (t *BleveDest) SetOpaque(partition string, value []byte) error

type BleveDestPartition

type BleveDestPartition struct {
	// contains filtered or unexported fields
}

Used to track state for a single partition.

func (*BleveDestPartition) GetOpaque

func (t *BleveDestPartition) GetOpaque(bindex bleve.Index) ([]byte, uint64, error)

func (*BleveDestPartition) OnDataDelete

func (t *BleveDestPartition) OnDataDelete(bindex bleve.Index,
	key []byte, seq uint64) error

func (*BleveDestPartition) OnDataUpdate

func (t *BleveDestPartition) OnDataUpdate(bindex bleve.Index,
	key []byte, seq uint64, val []byte) error

func (*BleveDestPartition) OnSnapshotStart

func (t *BleveDestPartition) OnSnapshotStart(bindex bleve.Index,
	snapStart, snapEnd uint64) error

func (*BleveDestPartition) SetOpaque

func (t *BleveDestPartition) SetOpaque(bindex bleve.Index, value []byte) error

type BleveQueryParams

type BleveQueryParams struct {
	Query       *bleve.SearchRequest `json:"query"`
	Consistency *ConsistencyParams   `json:"consistency"`
	Timeout     int64                `json:"timeout"`
}

type Cfg

type Cfg interface {
	// Get retrieves an entry from the Cfg.  A zero cas means don't do
	// a CAS match on Get(), and a non-zero cas value means the Get()
	// will succeed only if the CAS matches.
	Get(key string, cas uint64) (val []byte, casSuccess uint64, err error)

	// Set creates or updates an entry in the Cfg.  A non-zero cas
	// that does not match will result in an error.  A zero cas means
	// the Set() operation must be an entry creation, where a zero cas
	// Set() will error if the entry already exists.
	Set(key string, val []byte, cas uint64) (casSuccess uint64, err error)

	// Del removes an entry from the Cfg.  A non-zero cas that does
	// not match will result in an error.  A zero cas means a CAS
	// match will be skipped, so that clients can perform a
	// "don't-care, out-of-the-blue" deletion.
	Del(key string, cas uint64) error

	// Subscribe allows clients to receive events on changes to a key.
	// During a deletion event, the CfgEvent.CAS field will be 0.
	Subscribe(key string, ch chan CfgEvent) error

	// Refresh forces the Cfg implementation to reload from its
	// backend-specific data source, clearing any locally cached data.
	// Any subscribers will receive events on a Refresh, where it's up
	// to subscribers to detect if there were actual changes or not.
	Refresh() error
}

Cfg is the interface that configuration providers must implement.

type CfgCASError

type CfgCASError struct{}

The error used on mismatches of CAS (compare and set/swap) values.

func (*CfgCASError) Error

func (e *CfgCASError) Error() string

type CfgCB

type CfgCB struct {
	// contains filtered or unexported fields
}

CfgCB is an implementation of Cfg that uses a couchbase bucket.

TODO: This current implementation is race-y! Instead of storing everything as a single uber key/value, we should instead be storing individual key/value's on every get/set/del operation.

func NewCfgCB

func NewCfgCB(url, bucket string) (*CfgCB, error)

func (*CfgCB) DataDelete

func (r *CfgCB) DataDelete(vbucketId uint16, key []byte, seq uint64,
	req *gomemcached.MCRequest) error

func (*CfgCB) DataUpdate

func (r *CfgCB) DataUpdate(vbucketId uint16, key []byte, seq uint64,
	req *gomemcached.MCRequest) error

func (*CfgCB) Del

func (c *CfgCB) Del(key string, cas uint64) error

func (*CfgCB) Get

func (c *CfgCB) Get(key string, cas uint64) (
	[]byte, uint64, error)

func (*CfgCB) GetCredentials

func (a *CfgCB) GetCredentials() (string, string)

func (*CfgCB) GetMetaData

func (r *CfgCB) GetMetaData(vbucketId uint16) (
	value []byte, lastSeq uint64, err error)

func (*CfgCB) Load

func (c *CfgCB) Load() error

func (*CfgCB) OnError

func (r *CfgCB) OnError(err error)

func (*CfgCB) Refresh

func (c *CfgCB) Refresh() error

func (*CfgCB) Rollback

func (r *CfgCB) Rollback(vbucketId uint16, rollbackSeq uint64) error

func (*CfgCB) Set

func (c *CfgCB) Set(key string, val []byte, cas uint64) (
	uint64, error)

func (*CfgCB) SetMetaData

func (r *CfgCB) SetMetaData(vbucketId uint16, value []byte) error

func (*CfgCB) SnapshotStart

func (r *CfgCB) SnapshotStart(vbucketId uint16,
	snapStart, snapEnd uint64, snapType uint32) error

func (*CfgCB) Subscribe

func (c *CfgCB) Subscribe(key string, ch chan CfgEvent) error

type CfgEvent

type CfgEvent struct {
	Key string
	CAS uint64
}

See the Cfg.Subscribe() method.

type CfgGetHandler

type CfgGetHandler struct {
	// contains filtered or unexported fields
}

func NewCfgGetHandler

func NewCfgGetHandler(mgr *Manager) *CfgGetHandler

func (*CfgGetHandler) ServeHTTP

func (h *CfgGetHandler) ServeHTTP(w http.ResponseWriter, req *http.Request)

type CfgMem

type CfgMem struct {
	CASNext uint64
	Entries map[string]*CfgMemEntry
	// contains filtered or unexported fields
}

func NewCfgMem

func NewCfgMem() *CfgMem

func (*CfgMem) Del

func (c *CfgMem) Del(key string, cas uint64) error

func (*CfgMem) Get

func (c *CfgMem) Get(key string, cas uint64) (
	[]byte, uint64, error)

func (*CfgMem) Refresh

func (c *CfgMem) Refresh() error

func (*CfgMem) Set

func (c *CfgMem) Set(key string, val []byte, cas uint64) (
	uint64, error)

func (*CfgMem) Subscribe

func (c *CfgMem) Subscribe(key string, ch chan CfgEvent) error

type CfgMemEntry

type CfgMemEntry struct {
	CAS uint64
	Val []byte
}

type CfgRefreshHandler

type CfgRefreshHandler struct {
	// contains filtered or unexported fields
}

func NewCfgRefreshHandler

func NewCfgRefreshHandler(mgr *Manager) *CfgRefreshHandler

func (*CfgRefreshHandler) ServeHTTP

func (h *CfgRefreshHandler) ServeHTTP(w http.ResponseWriter, req *http.Request)

type CfgSimple

type CfgSimple struct {
	// contains filtered or unexported fields
}

func NewCfgSimple

func NewCfgSimple(path string) *CfgSimple

func (*CfgSimple) Del

func (c *CfgSimple) Del(key string, cas uint64) error

func (*CfgSimple) Get

func (c *CfgSimple) Get(key string, cas uint64) (
	[]byte, uint64, error)

func (*CfgSimple) Load

func (c *CfgSimple) Load() error

func (*CfgSimple) Refresh

func (c *CfgSimple) Refresh() error

func (*CfgSimple) Set

func (c *CfgSimple) Set(key string, val []byte, cas uint64) (
	uint64, error)

func (*CfgSimple) Subscribe

func (c *CfgSimple) Subscribe(key string, ch chan CfgEvent) error

type ConsistencyParams

type ConsistencyParams struct {
	// A Level value of "" means stale is ok; "at_plus" means we need
	// consistency at least at or beyond the consistency vector but
	// not before.
	Level string `json:"level"`

	// Keyed by indexName.
	Vectors map[string]ConsistencyVector `json:"vectors"`
}

type ConsistencyVector

type ConsistencyVector map[string]uint64

Key is partition, value is seq.

type CountHandler

type CountHandler struct {
	// contains filtered or unexported fields
}

func NewCountHandler

func NewCountHandler(mgr *Manager) *CountHandler

func (*CountHandler) ServeHTTP

func (h *CountHandler) ServeHTTP(w http.ResponseWriter, req *http.Request)

type CountPIndexHandler

type CountPIndexHandler struct {
	// contains filtered or unexported fields
}

func NewCountPIndexHandler

func NewCountPIndexHandler(mgr *Manager) *CountPIndexHandler

func (*CountPIndexHandler) ServeHTTP

func (h *CountPIndexHandler) ServeHTTP(w http.ResponseWriter, req *http.Request)

type CreateIndexHandler

type CreateIndexHandler struct {
	// contains filtered or unexported fields
}

func NewCreateIndexHandler

func NewCreateIndexHandler(mgr *Manager) *CreateIndexHandler

func (*CreateIndexHandler) ServeHTTP

func (h *CreateIndexHandler) ServeHTTP(w http.ResponseWriter, req *http.Request)

type DCPFeed

type DCPFeed struct {
	// contains filtered or unexported fields
}

A DCPFeed implements both Feed and cbdatasource.Receiver interfaces.

func NewDCPFeed

func NewDCPFeed(name, url, poolName, bucketName, bucketUUID, paramsStr string,
	pf DestPartitionFunc, dests map[string]Dest) (*DCPFeed, error)

func (*DCPFeed) Close

func (t *DCPFeed) Close() error

func (*DCPFeed) DataDelete

func (r *DCPFeed) DataDelete(vbucketId uint16, key []byte, seq uint64,
	req *gomemcached.MCRequest) error

func (*DCPFeed) DataUpdate

func (r *DCPFeed) DataUpdate(vbucketId uint16, key []byte, seq uint64,
	req *gomemcached.MCRequest) error

func (*DCPFeed) Dests

func (t *DCPFeed) Dests() map[string]Dest

func (*DCPFeed) GetMetaData

func (r *DCPFeed) GetMetaData(vbucketId uint16) (value []byte, lastSeq uint64, err error)

func (*DCPFeed) Name

func (t *DCPFeed) Name() string

func (*DCPFeed) OnError

func (r *DCPFeed) OnError(err error)

func (*DCPFeed) Rollback

func (r *DCPFeed) Rollback(vbucketId uint16, rollbackSeq uint64) error

func (*DCPFeed) SetMetaData

func (r *DCPFeed) SetMetaData(vbucketId uint16, value []byte) error

func (*DCPFeed) SnapshotStart

func (r *DCPFeed) SnapshotStart(vbucketId uint16,
	snapStart, snapEnd uint64, snapType uint32) error

func (*DCPFeed) Start

func (t *DCPFeed) Start() error

func (*DCPFeed) Stats

func (t *DCPFeed) Stats(w io.Writer) error

type DCPFeedParams

type DCPFeedParams struct {
	AuthUser     string `json:"authUser"` // May be "" for no auth.
	AuthPassword string `json:"authPassword"`

	// Factor (like 1.5) to increase sleep time between retries
	// in connecting to a cluster manager node.
	ClusterManagerBackoffFactor float32 `json:"clusterManagerBackoffFactor"`

	// Initial sleep time (millisecs) before first retry to cluster manager.
	ClusterManagerSleepInitMS int `json:"clusterManagerSleepInitMS"`

	// Maximum sleep time (millisecs) between retries to cluster manager.
	ClusterManagerSleepMaxMS int `json:"clusterManagerSleepMaxMS"`

	// Factor (like 1.5) to increase sleep time between retries
	// in connecting to a data manager node.
	DataManagerBackoffFactor float32 `json:"dataManagerBackoffFactor"`

	// Initial sleep time (millisecs) before first retry to data manager.
	DataManagerSleepInitMS int `json:"dataManagerSleepInitMS"`

	// Maximum sleep time (millisecs) between retries to data manager.
	DataManagerSleepMaxMS int `json:"dataManagerSleepMaxMS"`

	// Buffer size in bytes provided for UPR flow control.
	FeedBufferSizeBytes uint32 `json:"feedBufferSizeBytes"`

	// Used for UPR flow control and buffer-ack messages when this
	// percentage of FeedBufferSizeBytes is reached.
	FeedBufferAckThreshold float32 `json:"feedBufferAckThreshold"`
}

func (*DCPFeedParams) GetCredentials

func (d *DCPFeedParams) GetCredentials() (string, string)

type DeleteIndexHandler

type DeleteIndexHandler struct {
	// contains filtered or unexported fields
}

func NewDeleteIndexHandler

func NewDeleteIndexHandler(mgr *Manager) *DeleteIndexHandler

func (*DeleteIndexHandler) ServeHTTP

func (h *DeleteIndexHandler) ServeHTTP(w http.ResponseWriter, req *http.Request)

type Dest

type Dest interface {
	// Invoked by PIndex.Close().
	Close() error

	// Invoked when there's a new mutation from a data source for a
	// partition.  Dest implementation is responsible for making its
	// own copies of the key and val data.
	OnDataUpdate(partition string, key []byte, seq uint64, val []byte) error

	// Invoked by the data source when there's a data deletion in a
	// partition.  Dest implementation is responsible for making its
	// own copies of the key data.
	OnDataDelete(partition string, key []byte, seq uint64) error

	// An callback invoked by the data source when there's a start of
	// a new snapshot for a partition.  The Receiver implementation,
	// for example, might choose to optimize persistence perhaps by
	// preparing a batch write to application-specific storage.
	OnSnapshotStart(partition string, snapStart, snapEnd uint64) error

	// The Dest implementation should persist the value parameter of
	// SetOpaque() for retrieval during some future call to
	// GetOpaque() by the system.  The metadata value should be
	// considered "in-stream", or as part of the sequence history of
	// mutations.  That is, a later Rollback() to some previous
	// sequence number for a particular partition should rollback
	// both persisted metadata and regular data.  The Dest
	// implementation should make its own copy of the value data.
	SetOpaque(partition string, value []byte) error

	// GetOpaque() should return the opaque value previously
	// provided by an earlier call to SetOpaque().  If there was no
	// previous call to SetOpaque(), such as in the case of a brand
	// new instance of a Dest (as opposed to a restarted or reloaded
	// Dest), the Dest should return (nil, 0, nil) for (value,
	// lastSeq, err), respectively.  The lastSeq should be the last
	// sequence number received and persisted during calls to the
	// Dest's OnDataUpdate() & OnDataDelete() methods.
	GetOpaque(partition string) (value []byte, lastSeq uint64, err error)

	// Invoked by when the datasource signals a rollback during dest
	// initialization.  Note that both regular data and opaque data
	// should be rolled back to at a maximum of the rollbackSeq.  Of
	// note, the Dest is allowed to rollback even further, even all
	// the way back to the start or to zero.
	Rollback(partition string, rollbackSeq uint64) error

	// Blocks until the Dest has reached the desired consistency for
	// the partition or until the cancelCh is closed by some goroutine
	// related to the calling goroutine.
	ConsistencyWait(partition string,
		consistencyLevel string,
		consistencySeq uint64,
		cancelCh chan struct{}) error

	// Counts the underlying pindex implementation.
	Count(pindex *PIndex, cancelCh chan struct{}) (uint64, error)

	// Queries the underlying pindex implementation, blocking if
	// needed for the Dest to reach the desired consistency.
	Query(pindex *PIndex, req []byte, w io.Writer,
		cancelCh chan struct{}) error
}

func BasicPartitionFunc

func BasicPartitionFunc(partition string, key []byte,
	dests map[string]Dest) (Dest, error)

This basic partition func first tries a direct lookup by partition string, else it tries the "" partition.

func NewBleveDest

func NewBleveDest(path string, bindex bleve.Index, restart func()) Dest

func VBucketIdToPartitionDest

func VBucketIdToPartitionDest(pf DestPartitionFunc,
	dests map[string]Dest, vbucketId uint16, key []byte) (
	partition string, dest Dest, err error)

type DestFeed

type DestFeed struct {
	// contains filtered or unexported fields
}

A DestFeed implements both the Feed and Dest interfaces, for chainability; and is also useful for testing.

func NewDestFeed

func NewDestFeed(name string, pf DestPartitionFunc, dests map[string]Dest) *DestFeed

func (*DestFeed) Close

func (t *DestFeed) Close() error

func (*DestFeed) ConsistencyWait

func (t *DestFeed) ConsistencyWait(partition string,
	consistencyLevel string,
	consistencySeq uint64,
	cancelCh chan struct{}) error

func (*DestFeed) Count

func (t *DestFeed) Count(pindex *PIndex, cancelCh chan struct{}) (
	uint64, error)

func (*DestFeed) Dests

func (t *DestFeed) Dests() map[string]Dest

func (*DestFeed) GetOpaque

func (t *DestFeed) GetOpaque(partition string) (
	value []byte, lastSeq uint64, err error)

func (*DestFeed) Name

func (t *DestFeed) Name() string

func (*DestFeed) OnDataDelete

func (t *DestFeed) OnDataDelete(partition string,
	key []byte, seq uint64) error

func (*DestFeed) OnDataUpdate

func (t *DestFeed) OnDataUpdate(partition string,
	key []byte, seq uint64, val []byte) error

func (*DestFeed) OnSnapshotStart

func (t *DestFeed) OnSnapshotStart(partition string,
	snapStart, snapEnd uint64) error

func (*DestFeed) Query

func (t *DestFeed) Query(pindex *PIndex, req []byte, w io.Writer,
	cancelCh chan struct{}) error

func (*DestFeed) Rollback

func (t *DestFeed) Rollback(partition string,
	rollbackSeq uint64) error

func (*DestFeed) SetOpaque

func (t *DestFeed) SetOpaque(partition string,
	value []byte) error

func (*DestFeed) Start

func (t *DestFeed) Start() error

func (*DestFeed) Stats

func (t *DestFeed) Stats(w io.Writer) error

type DestPartitionFunc

type DestPartitionFunc func(partition string, key []byte,
	dests map[string]Dest) (Dest, error)

type DestSourceParams

type DestSourceParams struct {
	NumPartitions int `json:"numPartitions"`
}

type Feed

type Feed interface {
	Name() string
	Start() error
	Close() error
	Dests() map[string]Dest // Key is partition identifier.

	// Writes stats as JSON to the given writer.
	Stats(io.Writer) error
}

func CalcFeedsDelta

func CalcFeedsDelta(nodeUUID string, planPIndexes *PlanPIndexes,
	currFeeds map[string]Feed, pindexes map[string]*PIndex) (
	addFeeds [][]*PIndex, removeFeeds []Feed)

Functionally determine the delta of which feeds need creation and which should be shut down.

type FeedPartitionsFunc

type FeedPartitionsFunc func(sourceType, sourceName, sourceUUID, sourceParams,
	server string) ([]string, error)

type FeedStartFunc

type FeedStartFunc func(mgr *Manager, feedName, indexName, indexUUID string,
	sourceType, sourceName, sourceUUID, sourceParams string,
	dests map[string]Dest) error

type FeedStatsHandler

type FeedStatsHandler struct {
	// contains filtered or unexported fields
}

func NewFeedStatsHandler

func NewFeedStatsHandler(mgr *Manager) *FeedStatsHandler

func (*FeedStatsHandler) ServeHTTP

func (h *FeedStatsHandler) ServeHTTP(w http.ResponseWriter, req *http.Request)

type FeedType

type FeedType struct {
	Start       FeedStartFunc
	Partitions  FeedPartitionsFunc
	Public      bool
	Description string
	StartSample interface{}
}

type GetIndexHandler

type GetIndexHandler struct {
	// contains filtered or unexported fields
}

func NewGetIndexHandler

func NewGetIndexHandler(mgr *Manager) *GetIndexHandler

func (*GetIndexHandler) ServeHTTP

func (h *GetIndexHandler) ServeHTTP(w http.ResponseWriter, req *http.Request)

type GetLogHandler

type GetLogHandler struct {
	// contains filtered or unexported fields
}

func NewGetLogHandler

func NewGetLogHandler(mr *MsgRing) *GetLogHandler

func (*GetLogHandler) ServeHTTP

func (h *GetLogHandler) ServeHTTP(w http.ResponseWriter, req *http.Request)

type IndexDef

type IndexDef struct {
	Type         string     `json:"type"` // Ex: "bleve", "alias", "blackhole", etc.
	Name         string     `json:"name"`
	UUID         string     `json:"uuid"`
	Params       string     `json:"params"`
	SourceType   string     `json:"sourceType"`
	SourceName   string     `json:"sourceName"`
	SourceUUID   string     `json:"sourceUUID"`
	SourceParams string     `json:"sourceParams"` // Optional connection info.
	PlanParams   PlanParams `json:"planParams"`
}

type IndexDefs

type IndexDefs struct {
	// IndexDefs.UUID changes whenever any child IndexDef changes.
	UUID        string               `json:"uuid"`
	IndexDefs   map[string]*IndexDef `json:"indexDefs"`   // Key is IndexDef.Name.
	ImplVersion string               `json:"implVersion"` // See VERSION.
}

func CfgGetIndexDefs

func CfgGetIndexDefs(cfg Cfg) (*IndexDefs, uint64, error)

func NewIndexDefs

func NewIndexDefs(version string) *IndexDefs

func PlannerGetIndexDefs

func PlannerGetIndexDefs(cfg Cfg, version string) (*IndexDefs, error)

type ListIndexHandler

type ListIndexHandler struct {
	// contains filtered or unexported fields
}

func NewListIndexHandler

func NewListIndexHandler(mgr *Manager) *ListIndexHandler

func (*ListIndexHandler) ServeHTTP

func (h *ListIndexHandler) ServeHTTP(w http.ResponseWriter, req *http.Request)

type Manager

type Manager struct {
	// contains filtered or unexported fields
}

func NewManager

func NewManager(version string, cfg Cfg, uuid string, tags []string,
	container string, weight int, bindAddr, dataDir string, server string,
	meh ManagerEventHandlers) *Manager

func (*Manager) Cfg

func (mgr *Manager) Cfg() Cfg

func (*Manager) ClosePIndex

func (mgr *Manager) ClosePIndex(pindex *PIndex) error

func (*Manager) CoveringPIndexes

func (mgr *Manager) CoveringPIndexes(indexName, indexUUID string,
	wantNode func(*PlanPIndexNode) bool) (
	localPIndexes []*PIndex, remotePlanPIndexes []*RemotePlanPIndex, err error)

Returns a non-overlapping, disjoint set (or cut) of PIndexes (either local or remote) that cover all the partitons of an index so that the caller can perform scatter/gather queries, etc. Only PlanPIndexes on wanted nodes that pass the wantNode filter will be returned.

TODO: Perhaps need a tighter check around indexUUID, as the current implementation might have a race where old pindexes with a matching (but outdated) indexUUID might be chosen.

TODO: This implementation currently always favors the local node's pindex, but should it? Perhaps a remote node is more up-to-date than the local pindex?

TODO: We should favor the most up-to-date node rather than the first one that we run into here? But, perhaps the most up-to-date node is also the most overloaded? Or, perhaps the planner may be trying to rebalance away the most up-to-date node and hitting it with load just makes the rebalance take longer?

func (*Manager) CreateIndex

func (mgr *Manager) CreateIndex(sourceType, sourceName, sourceUUID, sourceParams,
	indexType, indexName, indexParams string, planParams PlanParams) error

Creates a logical index, which might be comprised of many PIndex objects.

func (*Manager) CurrentMaps

func (mgr *Manager) CurrentMaps() (map[string]Feed, map[string]*PIndex)

Returns a snapshot copy of the current feeds and pindexes.

func (*Manager) DataDir

func (mgr *Manager) DataDir() string

func (*Manager) DeleteIndex

func (mgr *Manager) DeleteIndex(indexName string) error

Deletes a logical index, which might be comprised of many PIndex objects.

func (*Manager) GetIndexDefs

func (mgr *Manager) GetIndexDefs(refresh bool) (
	*IndexDefs, map[string]*IndexDef, error)

Returns read-only snapshot of the IndexDefs, also with IndexDef's organized by name. Use refresh of true to force a read from Cfg.

func (*Manager) GetPIndex

func (mgr *Manager) GetPIndex(pindexName string) *PIndex

func (*Manager) GetPlanPIndexes

func (mgr *Manager) GetPlanPIndexes(refresh bool) (
	*PlanPIndexes, map[string][]*PlanPIndex, error)

Returns read-only snapshot of the PlanPIndexes, also with PlanPIndex's organized by IndexName. Use refresh of true to force a read from Cfg.

func (*Manager) JanitorKick

func (mgr *Manager) JanitorKick(msg string)

JanitorKick synchronously kicks the manager's janitor, if any.

func (*Manager) JanitorLoop

func (mgr *Manager) JanitorLoop()

JanitorLoop is the main loop for the janitor.

func (*Manager) JanitorNOOP

func (mgr *Manager) JanitorNOOP(msg string)

JanitorNOOP sends a synchronous NOOP request to the manager's janitor, if any.

func (*Manager) JanitorOnce

func (mgr *Manager) JanitorOnce(reason string) error

func (*Manager) Kick

func (mgr *Manager) Kick(msg string)

func (*Manager) LoadDataDir

func (mgr *Manager) LoadDataDir() error

Walk the data dir and register pindexes.

func (*Manager) PIndexPath

func (mgr *Manager) PIndexPath(pindexName string) string

func (*Manager) ParsePIndexPath

func (mgr *Manager) ParsePIndexPath(pindexPath string) (string, bool)

func (*Manager) PlannerKick

func (mgr *Manager) PlannerKick(msg string)

PlannerKick synchronously kicks the manager's planner, if any.

func (*Manager) PlannerLoop

func (mgr *Manager) PlannerLoop()

PlannerLoop is the main loop for the planner.

func (*Manager) PlannerNOOP

func (mgr *Manager) PlannerNOOP(msg string)

PlannerNOOP sends a synchronous NOOP request to the manager's planner, if any.

func (*Manager) PlannerOnce

func (mgr *Manager) PlannerOnce(reason string) (bool, error)

func (*Manager) RemovePIndex

func (mgr *Manager) RemovePIndex(pindex *PIndex) error

func (*Manager) SaveNodeDef

func (mgr *Manager) SaveNodeDef(kind string, force bool) error

func (*Manager) Start

func (mgr *Manager) Start(register string) error

func (*Manager) UUID

func (mgr *Manager) UUID() string

type ManagerEventHandlers

type ManagerEventHandlers interface {
	OnRegisterPIndex(pindex *PIndex)
	OnUnregisterPIndex(pindex *PIndex)
}

type ManagerKickHandler

type ManagerKickHandler struct {
	// contains filtered or unexported fields
}

func NewManagerKickHandler

func NewManagerKickHandler(mgr *Manager) *ManagerKickHandler

func (*ManagerKickHandler) ServeHTTP

func (h *ManagerKickHandler) ServeHTTP(w http.ResponseWriter, req *http.Request)

type ManagerMetaHandler

type ManagerMetaHandler struct {
	// contains filtered or unexported fields
}

func NewManagerMetaHandler

func NewManagerMetaHandler(mgr *Manager) *ManagerMetaHandler

func (*ManagerMetaHandler) ServeHTTP

func (h *ManagerMetaHandler) ServeHTTP(w http.ResponseWriter, req *http.Request)

type MetaDesc

type MetaDesc struct {
	Description string      `json:"description"`
	StartSample interface{} `json:"startSample"`
}

type MsgRing

type MsgRing struct {
	Next int      `json:"next"`
	Msgs [][]byte `json:"msgs"`
	// contains filtered or unexported fields
}

func NewMsgRing

func NewMsgRing(inner io.Writer, ringSize int) (*MsgRing, error)

func (*MsgRing) Messages

func (m *MsgRing) Messages() [][]byte

func (*MsgRing) Write

func (m *MsgRing) Write(p []byte) (n int, err error)

Implements the io.Writer interface.

type NILFeed

type NILFeed struct {
	// contains filtered or unexported fields
}

A NILFeed never feeds any data to its dests. It's useful for testing and for pindexes that are actually primary data sources.

func NewNILFeed

func NewNILFeed(name string, dests map[string]Dest) *NILFeed

func (*NILFeed) Close

func (t *NILFeed) Close() error

func (*NILFeed) Dests

func (t *NILFeed) Dests() map[string]Dest

func (*NILFeed) Name

func (t *NILFeed) Name() string

func (*NILFeed) Start

func (t *NILFeed) Start() error

func (*NILFeed) Stats

func (t *NILFeed) Stats(w io.Writer) error

type NodeDef

type NodeDef struct {
	HostPort    string   `json:"hostPort"`
	UUID        string   `json:"uuid"`
	ImplVersion string   `json:"implVersion"` // See VERSION.
	Tags        []string `json:"tags"`
	Container   string   `json:"container"`
	Weight      int      `json:"weight"`
}

type NodeDefs

type NodeDefs struct {
	// NodeDefs.UUID changes whenever any child NodeDef changes.
	UUID        string              `json:"uuid"`
	NodeDefs    map[string]*NodeDef `json:"nodeDefs"`    // Key is NodeDef.HostPort.
	ImplVersion string              `json:"implVersion"` // See VERSION.
}

func CfgGetNodeDefs

func CfgGetNodeDefs(cfg Cfg, kind string) (*NodeDefs, uint64, error)

func NewNodeDefs

func NewNodeDefs(version string) *NodeDefs

func PlannerGetNodeDefs

func PlannerGetNodeDefs(cfg Cfg, version, uuid, bindAddr string) (*NodeDefs, error)

type PIndex

type PIndex struct {
	Name             string     `json:"name"`
	UUID             string     `json:"uuid"`
	IndexType        string     `json:"indexType"`
	IndexName        string     `json:"indexName"`
	IndexUUID        string     `json:"indexUUID"`
	IndexParams      string     `json:"indexParams"`
	SourceType       string     `json:"sourceType"`
	SourceName       string     `json:"sourceName"`
	SourceUUID       string     `json:"sourceUUID"`
	SourceParams     string     `json:"sourceParams"`
	SourcePartitions string     `json:"sourcePartitions"`
	Path             string     `json:"-"` // Transient, not persisted.
	Impl             PIndexImpl `json:"-"` // Transient, not persisted.
	Dest             Dest       `json:"-"` // Transient, not persisted.
	// contains filtered or unexported fields
}

func NewPIndex

func NewPIndex(mgr *Manager, name, uuid,
	indexType, indexName, indexUUID, indexParams,
	sourceType, sourceName, sourceUUID, sourceParams, sourcePartitions string,
	path string) (*PIndex, error)

func OpenPIndex

func OpenPIndex(mgr *Manager, path string) (*PIndex, error)

NOTE: Path argument must be a directory.

func (*PIndex) Close

func (p *PIndex) Close(remove bool) error

type PIndexImpl

type PIndexImpl interface {
	Close() error
}

type PIndexImplType

type PIndexImplType struct {
	Validate func(indexType, indexName, indexParams string) error

	New func(indexType, indexParams, path string, restart func()) (
		PIndexImpl, Dest, error)

	Open func(indexType, path string, restart func()) (
		PIndexImpl, Dest, error)

	Count func(mgr *Manager, indexName, indexUUID string) (
		uint64, error)

	Query func(mgr *Manager, indexName, indexUUID string,
		req []byte, res io.Writer) error

	Description string
	StartSample interface{}
}

func PIndexImplTypeForIndex

func PIndexImplTypeForIndex(cfg Cfg, indexName string) (*PIndexImplType, error)

type PlanPIndex

type PlanPIndex struct {
	Name             string `json:"name"` // Stable & unique cluster wide.
	UUID             string `json:"uuid"`
	IndexType        string `json:"indexType"`   // See IndexDef.Type.
	IndexName        string `json:"indexName"`   // See IndexDef.Name.
	IndexUUID        string `json:"indexUUID"`   // See IndefDef.UUID.
	IndexParams      string `json:"indexParams"` // See IndexDef.Params.
	SourceType       string `json:"sourceType"`
	SourceName       string `json:"sourceName"`
	SourceUUID       string `json:"sourceUUID"`
	SourceParams     string `json:"sourceParams"` // Optional connection info.
	SourcePartitions string `json:"sourcePartitions"`

	Nodes map[string]*PlanPIndexNode `json:"nodes"` // Keyed by NodeDef.UUID.
}

type PlanPIndexNode

type PlanPIndexNode struct {
	CanRead  bool `json:"canRead"`
	CanWrite bool `json:"canWrite"`
	Priority int  `json:"priority"`
}

type PlanPIndexNodeRef

type PlanPIndexNodeRef struct {
	UUID string
	Node *PlanPIndexNode
}

type PlanPIndexNodeRefs

type PlanPIndexNodeRefs []*PlanPIndexNodeRef

func (PlanPIndexNodeRefs) Len

func (pms PlanPIndexNodeRefs) Len() int

func (PlanPIndexNodeRefs) Less

func (pms PlanPIndexNodeRefs) Less(i, j int) bool

func (PlanPIndexNodeRefs) Swap

func (pms PlanPIndexNodeRefs) Swap(i, j int)

type PlanPIndexes

type PlanPIndexes struct {
	// PlanPIndexes.UUID changes whenever any child PlanPIndex changes.
	UUID         string                 `json:"uuid"`
	PlanPIndexes map[string]*PlanPIndex `json:"planPIndexes"` // Key is PlanPIndex.Name.
	ImplVersion  string                 `json:"implVersion"`  // See VERSION.
	Warnings     map[string][]string    `json:"warnings"`     // Key is IndexDef.Name.
}

func CalcPlan

func CalcPlan(indexDefs *IndexDefs, nodeDefs *NodeDefs,
	planPIndexesPrev *PlanPIndexes, version, server string) (
	*PlanPIndexes, error)

Split logical indexes into PIndexes and assign PIndexes to nodes.

func CfgGetPlanPIndexes

func CfgGetPlanPIndexes(cfg Cfg) (*PlanPIndexes, uint64, error)

func NewPlanPIndexes

func NewPlanPIndexes(version string) *PlanPIndexes

func PlannerGetPlanPIndexes

func PlannerGetPlanPIndexes(cfg Cfg, version string) (*PlanPIndexes, uint64, error)

type PlanParams

type PlanParams struct {
	MaxPartitionsPerPIndex int `json:"maxPartitionsPerPIndex"`

	// The first copy is not counted as a replica.  For example, a
	// NumReplicas setting of 2 means there should be a primary and 2
	// replicas... so 3 copies in total.  A NumReplicas of 0 means
	// just the first, primary copy only.
	NumReplicas int `json:"numReplicas"`

	HierarchyRules blance.HierarchyRules `json:"hierarchyRules"`
}

type QueryHandler

type QueryHandler struct {
	// contains filtered or unexported fields
}

func NewQueryHandler

func NewQueryHandler(mgr *Manager) *QueryHandler

func (*QueryHandler) ServeHTTP

func (h *QueryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request)

type QueryPIndexHandler

type QueryPIndexHandler struct {
	// contains filtered or unexported fields
}

func NewQueryPIndexHandler

func NewQueryPIndexHandler(mgr *Manager) *QueryPIndexHandler

func (*QueryPIndexHandler) ServeHTTP

func (h *QueryPIndexHandler) ServeHTTP(w http.ResponseWriter, req *http.Request)

type RemotePlanPIndex

type RemotePlanPIndex struct {
	PlanPIndex *PlanPIndex
	NodeDef    *NodeDef
}

type TAPFeed

type TAPFeed struct {
	// contains filtered or unexported fields
}

A TAPFeed uses TAP protocol to dump data from a couchbase data source.

func NewTAPFeed

func NewTAPFeed(name, url, poolName, bucketName, bucketUUID, paramsStr string,
	pf DestPartitionFunc, dests map[string]Dest) (*TAPFeed, error)

func (*TAPFeed) Close

func (t *TAPFeed) Close() error

func (*TAPFeed) Dests

func (t *TAPFeed) Dests() map[string]Dest

func (*TAPFeed) Name

func (t *TAPFeed) Name() string

func (*TAPFeed) Start

func (t *TAPFeed) Start() error

func (*TAPFeed) Stats

func (t *TAPFeed) Stats(w io.Writer) error

type TAPFeedParams

type TAPFeedParams struct {
	BackoffFactor float32 `json:"backoffFactor"`
	SleepInitMS   int     `json:"sleepInitMS"`
	SleepMaxMS    int     `json:"sleepMaxMS"`
}

type WorkReq

type WorkReq struct {
	// contains filtered or unexported fields
}

Directories

Path Synopsis
cmd
cbft command

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL