arangodb

package
v2.0.0-alpha.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: May 30, 2023 License: Apache-2.0 Imports: 16 Imported by: 33

Documentation

Index

Constants

View Source
const (
	// PrimaryIndexType  is automatically created for each collections. It indexes the documents’ primary keys,
	//  which are stored in the _key system attribute. The primary index is unique and can be used for queries on both the _key and _id attributes.
	// There is no way to explicitly create or delete primary indexes.
	PrimaryIndexType = IndexType("primary")

	// EdgeIndexType is automatically created for edge collections. It contains connections between vertex documents
	// and is invoked when the connecting edges of a vertex are queried. There is no way to explicitly create or delete edge indexes.
	// The edge index is non-unique.
	EdgeIndexType = IndexType("edge")

	// PersistentIndexType is a sorted index that can be used for finding individual documents or ranges of documents.
	PersistentIndexType = IndexType("persistent")

	// GeoIndexType index can accelerate queries that filter and sort by the distance between stored coordinates and coordinates provided in a query.
	GeoIndexType = IndexType("geo")

	// TTLIndexType index can be used for automatically removing expired documents from a collection.
	// Documents which are expired are eventually removed by a background thread.
	TTLIndexType = IndexType("ttl")

	// ZKDIndexType == multi-dimensional index. The zkd index type is an experimental index for indexing two- or higher dimensional data such as time ranges,
	// for efficient intersection of multiple range queries.
	ZKDIndexType = IndexType("zkd")

	// InvertedIndexType can be used to speed up a broad range of AQL queries, from simple to complex, including full-text search
	InvertedIndexType = IndexType("inverted")

	// FullTextIndex - Deprecated: since 3.10 version. Use ArangoSearch view instead.
	FullTextIndex = IndexType("fulltext")

	// HashIndex are an aliases for the persistent index type and should no longer be used to create new indexes.
	// The aliases will be removed in a future version.
	HashIndex = IndexType("hash")

	// SkipListIndex are an aliases for the persistent index type and should no longer be used to create new indexes.
	// The aliases will be removed in a future version.
	SkipListIndex = IndexType("skiplist")
)
View Source
const (
	CollectionStatusNewBorn   = CollectionStatus(1)
	CollectionStatusUnloaded  = CollectionStatus(2)
	CollectionStatusLoaded    = CollectionStatus(3)
	CollectionStatusUnloading = CollectionStatus(4)
	CollectionStatusDeleted   = CollectionStatus(5)
	CollectionStatusLoading   = CollectionStatus(6)
)
View Source
const (
	// CollectionTypeDocument specifies a document collection
	CollectionTypeDocument = CollectionType(2)
	// CollectionTypeEdge specifies an edges collection
	CollectionTypeEdge = CollectionType(3)
)
View Source
const (
	KeyGeneratorTraditional   = KeyGeneratorType("traditional")
	KeyGeneratorAutoIncrement = KeyGeneratorType("autoincrement")
)
View Source
const (
	EngineTypeMMFiles = EngineType("mmfiles")
	EngineTypeRocksDB = EngineType("rocksdb")
)

Variables

This section is empty.

Functions

func CreateDocuments

func CreateDocuments(ctx context.Context, col Collection, docCount int, generator func(i int) any) error

CreateDocuments creates given number of documents for the provided collection.

Types

type AbortTransactionOptions

type AbortTransactionOptions struct{}

AbortTransactionOptions provides options for CommitTransaction. Currently unused

type AnalyzerFeature

type AnalyzerFeature string

AnalyzerFeature specifies a feature to an analyzer

const (
	// AnalyzerFeatureFrequency how often a term is seen, required for PHRASE()
	AnalyzerFeatureFrequency AnalyzerFeature = "frequency"

	// AnalyzerFeatureNorm the field normalization factor
	AnalyzerFeatureNorm AnalyzerFeature = "norm"

	// AnalyzerFeaturePosition sequentially increasing term position, required for PHRASE(). If present then the frequency feature is also required
	AnalyzerFeaturePosition AnalyzerFeature = "position"

	// AnalyzerFeatureOffset can be specified if 'position' feature is set
	AnalyzerFeatureOffset AnalyzerFeature = "offset"
)

type BeginTransactionOptions

type BeginTransactionOptions struct {
	WaitForSync         bool          `json:"waitForSync,omitempty"`
	AllowImplicit       bool          `json:"allowImplicit,omitempty"`
	LockTimeoutDuration time.Duration `json:"-"`
	LockTimeout         float64       `json:"lockTimeout,omitempty"`
	MaxTransactionSize  uint64        `json:"maxTransactionSize,omitempty"`
}

BeginTransactionOptions provides options for BeginTransaction call

type Client

type Client interface {
	// Connection returns current Driver Connection
	Connection() connection.Connection

	Requests

	ClientDatabase
	ClientServerInfo
	ClientAdmin
}

func NewClient

func NewClient(connection connection.Connection) Client

type ClientAdmin

type ClientAdmin interface {
	ClientAdminLog
	ClientAdminBackup
	// Health returns the cluster configuration & health.
	// It works in cluster or active fail-over mode.
	Health(ctx context.Context) (ClusterHealth, error)
}

type ClientAdminBackup

type ClientAdminBackup interface {
}

type ClientAdminLog

type ClientAdminLog interface {
	// GetLogLevels returns log levels for topics.
	GetLogLevels(ctx context.Context, opts *LogLevelsGetOptions) (LogLevels, error)
	// SetLogLevels sets log levels for a given topics.
	SetLogLevels(ctx context.Context, logLevels LogLevels, opts *LogLevelsSetOptions) error
}

type ClientDatabase

type ClientDatabase interface {
	// Database opens a connection to an existing database.
	// If no database with given name exists, an NotFoundError is returned.
	Database(ctx context.Context, name string) (Database, error)

	// DatabaseExists returns true if a database with given name exists.
	DatabaseExists(ctx context.Context, name string) (bool, error)

	// Databases returns a list of all databases found by the client.
	Databases(ctx context.Context) ([]Database, error)

	// AccessibleDatabases returns a list of all databases that can be accessed by the authenticated user.
	AccessibleDatabases(ctx context.Context) ([]Database, error)

	// CreateDatabase creates a new database with given name and opens a connection to it.
	// If the a database with given name already exists, a DuplicateError is returned.
	CreateDatabase(ctx context.Context, name string, options *CreateDatabaseOptions) (Database, error)
}

type ClientServerInfo

type ClientServerInfo interface {
	// Version returns version information from the connected database server.
	// Use WithDetails to configure a context that will include additional details in the return VersionInfo.
	Version(ctx context.Context) (VersionInfo, error)
	// ServerRole returns the role of the server that answers the request.
	ServerRole(ctx context.Context) (ServerRole, error)
}

type ClusterHealth

type ClusterHealth struct {
	// Unique identifier of the entire cluster.
	// This ID is created when the cluster was first created.
	ID string `json:"ClusterId"`
	// Health per server
	Health map[ServerID]ServerHealth `json:"Health"`
}

ClusterHealth contains health information for all servers in a cluster.

type Collection

type Collection interface {
	Name() string
	Database() Database

	// Shards fetches shards information of the collection.
	Shards(ctx context.Context, details bool) (CollectionShards, error)

	// Remove removes the entire collection.
	// If the collection does not exist, a NotFoundError is returned.
	Remove(ctx context.Context) error

	CollectionDocuments
	CollectionIndexes
}

type CollectionDocumentCreate

type CollectionDocumentCreate interface {

	// CreateDocument creates a single document in the collection.
	// The document data is loaded from the given document, the document meta data is returned.
	// If the document data already contains a `_key` field, this will be used as key of the new document,
	// otherwise a unique key is created.
	// A ConflictError is returned when a `_key` field contains a duplicate key, other any other field violates an index constraint.
	CreateDocument(ctx context.Context, document interface{}) (CollectionDocumentCreateResponse, error)

	// CreateDocumentWithOptions creates a single document in the collection.
	// The document data is loaded from the given document, the document meta data is returned.
	// If the document data already contains a `_key` field, this will be used as key of the new document,
	// otherwise a unique key is created.
	// A ConflictError is returned when a `_key` field contains a duplicate key, other any other field violates an index constraint.
	CreateDocumentWithOptions(ctx context.Context, document interface{}, options *CollectionDocumentCreateOptions) (CollectionDocumentCreateResponse, error)

	// CreateDocuments creates multiple documents in the collection.
	// The document data is loaded from the given documents slice, the documents meta data is returned.
	// If a documents element already contains a `_key` field, this will be used as key of the new document,
	// otherwise a unique key is created.
	// If a documents element contains a `_key` field with a duplicate key, other any other field violates an index constraint,
	// a ConflictError is returned in its inded in the errors slice.
	// To return the NEW documents, prepare a context with `WithReturnNew`. The data argument passed to `WithReturnNew` must be
	// a slice with the same number of entries as the `documents` slice.
	// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`.
	// If the create request itself fails or one of the arguments is invalid, an error is returned.
	CreateDocuments(ctx context.Context, documents interface{}) (CollectionDocumentCreateResponseReader, error)

	// CreateDocumentsWithOptions creates multiple documents in the collection.
	// The document data is loaded from the given documents slice, the documents meta data is returned.
	// If a documents element already contains a `_key` field, this will be used as key of the new document,
	// otherwise a unique key is created.
	// If a documents element contains a `_key` field with a duplicate key, other any other field violates an index constraint,
	// a ConflictError is returned in its inded in the errors slice.
	// To return the NEW documents, prepare a context with `WithReturnNew`. The data argument passed to `WithReturnNew` must be
	// a slice with the same number of entries as the `documents` slice.
	// To wait until document has been synced to disk, prepare a context with `WithWaitForSync`.
	// If the create request itself fails or one of the arguments is invalid, an error is returned.
	CreateDocumentsWithOptions(ctx context.Context, documents interface{}, opts *CollectionDocumentCreateOptions) (CollectionDocumentCreateResponseReader, error)
}

type CollectionDocumentCreateOptions

type CollectionDocumentCreateOptions struct {
	WithWaitForSync *bool
	Overwrite       *bool
	Silent          *bool
	OverwriteMode   *CollectionDocumentCreateOverwriteMode
	NewObject       interface{}
	OldObject       interface{}
	// RefillIndexCaches if set to true then refills the in-memory index caches.
	RefillIndexCaches *bool
}

type CollectionDocumentCreateOverwriteMode

type CollectionDocumentCreateOverwriteMode string
const (
	CollectionDocumentCreateOverwriteModeIgnore   CollectionDocumentCreateOverwriteMode = "ignore"
	CollectionDocumentCreateOverwriteModeReplace  CollectionDocumentCreateOverwriteMode = "replace"
	CollectionDocumentCreateOverwriteModeUpdate   CollectionDocumentCreateOverwriteMode = "update"
	CollectionDocumentCreateOverwriteModeConflict CollectionDocumentCreateOverwriteMode = "conflict"
)

func (*CollectionDocumentCreateOverwriteMode) Get

func (CollectionDocumentCreateOverwriteMode) New

func (*CollectionDocumentCreateOverwriteMode) String

type CollectionDocumentCreateResponse

type CollectionDocumentCreateResponse struct {
	DocumentMeta
	shared.ResponseStruct `json:",inline"`
	Old, New              interface{}
}

type CollectionDocumentCreateResponseReader

type CollectionDocumentCreateResponseReader interface {
	Read() (CollectionDocumentCreateResponse, error)
}

type CollectionDocumentDelete

type CollectionDocumentDelete interface {
	// DeleteDocument removes a single document with given key from the collection.
	// The document metadata is returned.
	// If no document exists with given key, a NotFoundError is returned.
	DeleteDocument(ctx context.Context, key string) (CollectionDocumentDeleteResponse, error)

	// DeleteDocumentWithOptions removes a single document with given key from the collection.
	// The document metadata is returned.
	// If no document exists with given key, a NotFoundError is returned.
	DeleteDocumentWithOptions(ctx context.Context, key string, opts *CollectionDocumentDeleteOptions) (CollectionDocumentDeleteResponse, error)

	// DeleteDocuments removes multiple documents with given keys from the collection.
	// The document metadata are returned.
	// If no document exists with a given key, a NotFoundError is returned at its errors index.
	DeleteDocuments(ctx context.Context, keys []string) (CollectionDocumentDeleteResponseReader, error)

	// DeleteDocumentsWithOptions removes multiple documents with given keys from the collection.
	// The document metadata are returned.
	// If no document exists with a given key, a NotFoundError is returned at its errors index.
	DeleteDocumentsWithOptions(ctx context.Context, keys []string, opts *CollectionDocumentDeleteOptions) (CollectionDocumentDeleteResponseReader, error)
}

type CollectionDocumentDeleteOptions

type CollectionDocumentDeleteOptions struct {
	// Wait until deletion operation has been synced to disk.
	WithWaitForSync *bool

	// Return additionally the complete previous revision of the changed document
	ReturnOld *bool

	// If set to true, an empty object is returned as response if the document operation succeeds.
	// No meta-data is returned for the deleted document. If the operation raises an error, an error object is returned.
	// You can use this option to save network traffic.
	Silent *bool

	// RefillIndexCaches if set to true then refills the in-memory index caches.
	RefillIndexCaches *bool
}

type CollectionDocumentDeleteResponse

type CollectionDocumentDeleteResponse struct {
	DocumentMeta          `json:",inline"`
	shared.ResponseStruct `json:",inline"`
	Old                   interface{}
}

type CollectionDocumentDeleteResponseReader

type CollectionDocumentDeleteResponseReader interface {
	Read(i interface{}) (CollectionDocumentDeleteResponse, error)
}

type CollectionDocumentRead

type CollectionDocumentRead interface {
	// ReadDocument reads a single document with given key from the collection.
	// The document data is stored into result, the document meta data is returned.
	// If no document exists with given key, a NotFoundError is returned.
	ReadDocument(ctx context.Context, key string, result interface{}) (DocumentMeta, error)

	// ReadDocumentWithOptions reads a single document with given key from the collection.
	// The document data is stored into result, the document meta data is returned.
	// If no document exists with given key, a NotFoundError is returned.
	ReadDocumentWithOptions(ctx context.Context, key string, result interface{}, opts *CollectionDocumentReadOptions) (DocumentMeta, error)

	// ReadDocuments reads multiple documents with given keys from the collection.
	// The documents data is stored into elements of the given results slice,
	// the documents meta data is returned.
	// If no document exists with a given key, a NotFoundError is returned at its errors index.
	ReadDocuments(ctx context.Context, keys []string) (CollectionDocumentReadResponseReader, error)

	// ReadDocumentsWithOptions reads multiple documents with given keys from the collection.
	// The documents data is stored into elements of the given results slice,
	// the documents meta data is returned.
	// If no document exists with a given key, a NotFoundError is returned at its errors index.
	ReadDocumentsWithOptions(ctx context.Context, keys []string, opts *CollectionDocumentReadOptions) (CollectionDocumentReadResponseReader, error)
}

type CollectionDocumentReadOptions

type CollectionDocumentReadOptions struct {
}

type CollectionDocumentReadResponse

type CollectionDocumentReadResponse struct {
	DocumentMeta
}

type CollectionDocumentReadResponseReader

type CollectionDocumentReadResponseReader interface {
	Read(i interface{}) (CollectionDocumentReadResponse, error)
}

type CollectionDocumentUpdate

type CollectionDocumentUpdate interface {

	// UpdateDocument updates a single document with given key in the collection.
	// The document meta data is returned.
	// If no document exists with given key, a NotFoundError is returned.
	UpdateDocument(ctx context.Context, key string, document interface{}) (CollectionDocumentUpdateResponse, error)

	// UpdateDocumentWithOptions updates a single document with given key in the collection.
	// The document meta data is returned.
	// If no document exists with given key, a NotFoundError is returned.
	UpdateDocumentWithOptions(ctx context.Context, key string, document interface{}, options *CollectionDocumentUpdateOptions) (CollectionDocumentUpdateResponse, error)

	// UpdateDocuments updates multiple document with given keys in the collection.
	// The updates are loaded from the given updates slice, the documents meta data are returned.
	// If no document exists with a given key, a NotFoundError is returned at its errors index.
	// If keys is nil, each element in the updates slice must contain a `_key` field.
	UpdateDocuments(ctx context.Context, documents interface{}) (CollectionDocumentUpdateResponseReader, error)

	// UpdateDocumentsWithOptions updates multiple document with given keys in the collection.
	// The updates are loaded from the given updates slice, the documents meta data are returned.
	// If no document exists with a given key, a NotFoundError is returned at its errors index.
	// If keys is nil, each element in the updates slice must contain a `_key` field.
	UpdateDocumentsWithOptions(ctx context.Context, documents interface{}, opts *CollectionDocumentUpdateOptions) (CollectionDocumentUpdateResponseReader, error)
}

type CollectionDocumentUpdateOptions

type CollectionDocumentUpdateOptions struct {
	WithWaitForSync *bool
	NewObject       interface{}
	OldObject       interface{}
	// RefillIndexCaches if set to true then refills the in-memory index caches.
	RefillIndexCaches *bool
}

type CollectionDocumentUpdateResponse

type CollectionDocumentUpdateResponse struct {
	DocumentMeta
	shared.ResponseStruct `json:",inline"`
	Old, New              interface{}
}

type CollectionDocumentUpdateResponseReader

type CollectionDocumentUpdateResponseReader interface {
	Read() (CollectionDocumentUpdateResponse, error)
}

type CollectionDocuments

type CollectionDocuments interface {
	// DocumentExists checks if a document with given key exists in the collection.
	DocumentExists(ctx context.Context, key string) (bool, error)

	CollectionDocumentCreate
	CollectionDocumentRead
	CollectionDocumentUpdate
	CollectionDocumentDelete
}

type CollectionExtendedInfo

type CollectionExtendedInfo struct {
	CollectionInfo
	// CacheEnabled set cacheEnabled option in collection properties.
	CacheEnabled bool `json:"cacheEnabled,omitempty"`
	KeyOptions   struct {
		// Type specifies the type of the key generator. The currently available generators are traditional and autoincrement.
		Type KeyGeneratorType `json:"type,omitempty"`
		// AllowUserKeys; if set to true, then it is allowed to supply own key values in the _key attribute of a document.
		// If set to false, then the key generator is solely responsible for generating keys and supplying own key values in
		// the _key attribute of documents is considered an error.
		AllowUserKeys bool `json:"allowUserKeys,omitempty"`
	} `json:"keyOptions,omitempty"`
	// Deprecated: use 'WriteConcern' instead.
	MinReplicationFactor int `json:"minReplicationFactor,omitempty"`
	// NumberOfShards is the number of shards of the collection.
	// Only available in cluster setup.
	NumberOfShards int `json:"numberOfShards,omitempty"`
	// This attribute specifies the name of the sharding strategy to use for the collection.
	// Can not be changed after creation.
	ShardingStrategy ShardingStrategy `json:"shardingStrategy,omitempty"`
	// ShardKeys contains the names of document attributes that are used to determine the target shard for documents.
	// Only available in cluster setup.
	ShardKeys []string `json:"shardKeys,omitempty"`
	// ReplicationFactor contains how many copies of each shard are kept on different DBServers.
	// Only available in cluster setup.
	ReplicationFactor ReplicationFactor `json:"replicationFactor,omitempty"`
	// WaitForSync; If true then creating, changing or removing documents will wait
	// until the data has been synchronized to disk.
	WaitForSync bool `json:"waitForSync,omitempty"`
	// WriteConcern contains how many copies must be available before a collection can be written.
	// It is required that 1 <= WriteConcern <= ReplicationFactor.
	// Default is 1. Not available for satellite collections.
	// Available from 3.6 arangod version.
	WriteConcern int `json:"writeConcern,omitempty"`
}

CollectionExtendedInfo contains extended information about a collection.

type CollectionIndexes

type CollectionIndexes interface {
	// Index opens a connection to an existing index within the collection.
	// If no index with given name exists, an NotFoundError is returned.
	Index(ctx context.Context, name string) (IndexResponse, error)

	// IndexExists returns true if an index with given name exists within the collection.
	IndexExists(ctx context.Context, name string) (bool, error)

	// Indexes returns a list of all indexes in the collection.
	Indexes(ctx context.Context) ([]IndexResponse, error)

	// EnsurePersistentIndex creates a persistent index in the collection, if it does not already exist.
	// Fields is a slice of attribute paths.
	// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false).
	// NOTE: 'hash' and 'skiplist' being mere aliases for the persistent index type nowadays
	EnsurePersistentIndex(ctx context.Context, fields []string, options *CreatePersistentIndexOptions) (IndexResponse, bool, error)

	// EnsureGeoIndex creates a hash index in the collection, if it does not already exist.
	// Fields is a slice with one or two attribute paths. If it is a slice with one attribute path location,
	// then a geo-spatial index on all documents is created using location as path to the coordinates.
	// The value of the attribute must be a slice with at least two double values. The slice must contain the latitude (first value)
	// and the longitude (second value). All documents, which do not have the attribute path or with value that are not suitable, are ignored.
	// If it is a slice with two attribute paths latitude and longitude, then a geo-spatial index on all documents is created
	// using latitude and longitude as paths the latitude and the longitude. The value of the attribute latitude and of the
	// attribute longitude must a double. All documents, which do not have the attribute paths or which values are not suitable, are ignored.
	// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false).
	EnsureGeoIndex(ctx context.Context, fields []string, options *CreateGeoIndexOptions) (IndexResponse, bool, error)

	// EnsureTTLIndex creates a TLL collection, if it does not already exist.
	// expireAfter is the time interval (in seconds) from the point in time stored in the fields attribute after which the documents count as expired.
	// Can be set to 0 to let documents expire as soon as the server time passes the point in time stored in the document attribute, or to a higher number to delay the expiration.
	// fields The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false).
	EnsureTTLIndex(ctx context.Context, fields []string, expireAfter int, options *CreateTTLIndexOptions) (IndexResponse, bool, error)

	// EnsureZKDIndex creates a ZKD multi-dimensional index for the collection, if it does not already exist.
	// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false).
	EnsureZKDIndex(ctx context.Context, fields []string, options *CreateZKDIndexOptions) (IndexResponse, bool, error)

	// EnsureInvertedIndex creates an inverted index in the collection, if it does not already exist.
	// The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false).
	// Available in ArangoDB 3.10 and later.
	// InvertedIndexOptions is an obligatory parameter and must contain at least `Fields` field
	EnsureInvertedIndex(ctx context.Context, options *InvertedIndexOptions) (IndexResponse, bool, error)

	// DeleteIndex deletes an index from the collection.
	DeleteIndex(ctx context.Context, name string) error

	// DeleteIndexByID deletes an index from the collection.
	DeleteIndexByID(ctx context.Context, id string) error
}

CollectionIndexes provides access to the indexes in a single collection.

type CollectionInfo

type CollectionInfo struct {
	// The identifier of the collection.
	ID string `json:"id,omitempty"`
	// The name of the collection.
	Name string `json:"name,omitempty"`
	// The status of the collection
	Status CollectionStatus `json:"status,omitempty"`
	// The type of the collection
	Type CollectionType `json:"type,omitempty"`
	// If true then the collection is a system collection.
	IsSystem bool `json:"isSystem,omitempty"`
	// Global unique name for the collection
	GloballyUniqueId string `json:"globallyUniqueId,omitempty"`
}

CollectionInfo contains basic information about a collection.

type CollectionKeyOptions

type CollectionKeyOptions struct {
	// If set to true, then it is allowed to supply own key values in the _key attribute of a document.
	// If set to false, then the key generator will solely be responsible for generating keys and supplying own
	// key values in the _key attribute of documents is considered an error.
	// Deprecated: Use AllowUserKeysPtr instead
	AllowUserKeys bool `json:"-"`
	// If set to true, then it is allowed to supply own key values in the _key attribute of a document.
	// If set to false, then the key generator will solely be responsible for generating keys and supplying own
	// key values in the _key attribute of documents is considered an error.
	AllowUserKeysPtr *bool `json:"allowUserKeys,omitempty"`
	// Specifies the type of the key generator. The currently available generators are traditional and autoincrement.
	Type KeyGeneratorType `json:"type,omitempty"`
	// increment value for autoincrement key generator. Not used for other key generator types.
	Increment int `json:"increment,omitempty"`
	// Initial offset value for autoincrement key generator. Not used for other key generator types.
	Offset int `json:"offset,omitempty"`
}

CollectionKeyOptions specifies ways for creating keys of a collection.

func (*CollectionKeyOptions) Init

func (c *CollectionKeyOptions) Init()

Init translate deprecated fields into current one for backward compatibility

type CollectionProperties

type CollectionProperties struct {
	CollectionExtendedInfo
	// DoCompact specifies whether or not the collection will be compacted.
	DoCompact bool `json:"doCompact,omitempty"`
	// JournalSize is the maximal size setting for journals / datafiles in bytes.
	JournalSize int64 `json:"journalSize,omitempty"`
	// SmartJoinAttribute
	// See documentation for smart joins.
	// This requires ArangoDB Enterprise Edition.
	SmartJoinAttribute string `json:"smartJoinAttribute,omitempty"`
	// This attribute specifies that the sharding of a collection follows that of another
	// one.
	DistributeShardsLike string `json:"distributeShardsLike,omitempty"`
	// This attribute specifies if the new format introduced in 3.7 is used for this
	// collection.
	UsesRevisionsAsDocumentIds bool `json:"usesRevisionsAsDocumentIds,omitempty"`
	// The following attribute specifies if the new MerkleTree based sync protocol
	// can be used on the collection.
	SyncByRevision bool `json:"syncByRevision,omitempty"`
	// Schema for collection validation
	Schema *CollectionSchemaOptions `json:"schema,omitempty"`
}

CollectionProperties contains extended information about a collection.

func (*CollectionProperties) IsSatellite

func (p *CollectionProperties) IsSatellite() bool

IsSatellite returns true if the collection is a satellite collection

type CollectionSchemaLevel

type CollectionSchemaLevel string
const (
	CollectionSchemaLevelNone     CollectionSchemaLevel = "none"
	CollectionSchemaLevelNew      CollectionSchemaLevel = "new"
	CollectionSchemaLevelModerate CollectionSchemaLevel = "moderate"
	CollectionSchemaLevelStrict   CollectionSchemaLevel = "strict"
)

type CollectionSchemaOptions

type CollectionSchemaOptions struct {
	Rule    interface{}           `json:"rule,omitempty"`
	Level   CollectionSchemaLevel `json:"level,omitempty"`
	Message string                `json:"message,omitempty"`
}

func (*CollectionSchemaOptions) LoadRule

func (d *CollectionSchemaOptions) LoadRule(data []byte) error

type CollectionShards

type CollectionShards struct {
	CollectionExtendedInfo
	// Set to create a smart edge or vertex collection.
	// This requires ArangoDB Enterprise Edition.
	IsSmart bool `json:"isSmart,omitempty"`

	// Shards is a list of shards that belong to the collection.
	// Each shard contains a list of DB servers where the first one is the leader and the rest are followers.
	Shards map[ShardID][]ServerID `json:"shards,omitempty"`

	// StatusString represents status as a string.
	StatusString string `json:"statusString,omitempty"`
}

CollectionShards contains shards information about a collection.

type CollectionStatistics

type CollectionStatistics struct {
	//The number of documents currently present in the collection.
	Count int64 `json:"count,omitempty"`
	// The maximal size of a journal or datafile in bytes.
	JournalSize int64 `json:"journalSize,omitempty"`
	Figures     struct {
		DataFiles struct {
			// The number of datafiles.
			Count int64 `json:"count,omitempty"`
			// The total filesize of datafiles (in bytes).
			FileSize int64 `json:"fileSize,omitempty"`
		} `json:"datafiles"`
		// The number of markers in the write-ahead log for this collection that have not been transferred to journals or datafiles.
		UncollectedLogfileEntries int64 `json:"uncollectedLogfileEntries,omitempty"`
		// The number of references to documents in datafiles that JavaScript code currently holds. This information can be used for debugging compaction and unload issues.
		DocumentReferences int64 `json:"documentReferences,omitempty"`
		CompactionStatus   struct {
			// The action that was performed when the compaction was last run for the collection. This information can be used for debugging compaction issues.
			Message string `json:"message,omitempty"`
			// The point in time the compaction for the collection was last executed. This information can be used for debugging compaction issues.
			Time time.Time `json:"time,omitempty"`
		} `json:"compactionStatus"`
		Compactors struct {
			// The number of compactor files.
			Count int64 `json:"count,omitempty"`
			// The total filesize of all compactor files (in bytes).
			FileSize int64 `json:"fileSize,omitempty"`
		} `json:"compactors"`
		Dead struct {
			// The number of dead documents. This includes document versions that have been deleted or replaced by a newer version. Documents deleted or replaced that are contained the write-ahead log only are not reported in this figure.
			Count int64 `json:"count,omitempty"`
			// The total number of deletion markers. Deletion markers only contained in the write-ahead log are not reporting in this figure.
			Deletion int64 `json:"deletion,omitempty"`
			// The total size in bytes used by all dead documents.
			Size int64 `json:"size,omitempty"`
		} `json:"dead"`
		Indexes struct {
			// The total number of indexes defined for the collection, including the pre-defined indexes (e.g. primary index).
			Count int64 `json:"count,omitempty"`
			// The total memory allocated for indexes in bytes.
			Size int64 `json:"size,omitempty"`
		} `json:"indexes"`
		ReadCache struct {
			// The number of revisions of this collection stored in the document revisions cache.
			Count int64 `json:"count,omitempty"`
			// The memory used for storing the revisions of this collection in the document revisions cache (in bytes). This figure does not include the document data but only mappings from document revision ids to cache entry locations.
			Size int64 `json:"size,omitempty"`
		} `json:"readcache"`
		// An optional string value that contains information about which object type is at the head of the collection's cleanup queue. This information can be used for debugging compaction and unload issues.
		WaitingFor string `json:"waitingFor,omitempty"`
		Alive      struct {
			// The number of currently active documents in all datafiles and journals of the collection. Documents that are contained in the write-ahead log only are not reported in this figure.
			Count int64 `json:"count,omitempty"`
			// The total size in bytes used by all active documents of the collection. Documents that are contained in the write-ahead log only are not reported in this figure.
			Size int64 `json:"size,omitempty"`
		} `json:"alive"`
		// The tick of the last marker that was stored in a journal of the collection. This might be 0 if the collection does not yet have a journal.
		LastTick int64 `json:"lastTick,omitempty"`
		Journals struct {
			// The number of journal files.
			Count int64 `json:"count,omitempty"`
			// The total filesize of all journal files (in bytes).
			FileSize int64 `json:"fileSize,omitempty"`
		} `json:"journals"`
		Revisions struct {
			// The number of revisions of this collection managed by the storage engine.
			Count int64 `json:"count,omitempty"`
			// The memory used for storing the revisions of this collection in the storage engine (in bytes). This figure does not include the document data but only mappings from document revision ids to storage engine datafile positions.
			Size int64 `json:"size,omitempty"`
		} `json:"revisions"`
	} `json:"figures"`
}

CollectionStatistics contains the number of documents and additional statistical information about a collection.

type CollectionStatus

type CollectionStatus int

CollectionStatus indicates the status of a collection.

type CollectionType

type CollectionType int

CollectionType is the type of a collection.

type CommitTransactionOptions

type CommitTransactionOptions struct{}

CommitTransactionOptions provides options for CommitTransaction. Currently unused

type ConsolidationPolicy

type ConsolidationPolicy struct {
	// Type returns the type of the ConsolidationPolicy. This interface can then be casted to the corresponding ConsolidationPolicy struct.
	Type ConsolidationPolicyType `json:"type,omitempty"`

	ConsolidationPolicyBytesAccum
	ConsolidationPolicyTier
}

ConsolidationPolicy holds threshold values specifying when to consolidate view data. Semantics of the values depend on where they are used.

type ConsolidationPolicyBytesAccum

type ConsolidationPolicyBytesAccum struct {
	// Threshold, see ConsolidationTypeBytesAccum
	Threshold *float64 `json:"threshold,omitempty"`
}

ConsolidationPolicyBytesAccum contains fields used for ConsolidationPolicyTypeBytesAccum

type ConsolidationPolicyTier

type ConsolidationPolicyTier struct {
	// MinScore Filter out consolidation candidates with a score less than this. Default: 0
	MinScore *int64 `json:"minScore,omitempty"`

	// SegmentsMin The minimum number of segments that are evaluated as candidates for consolidation. Default: 1
	SegmentsMin *int64 `json:"segmentsMin,omitempty"`

	// SegmentsMax The maximum number of segments that are evaluated as candidates for consolidation. Default: 10
	SegmentsMax *int64 `json:"segmentsMax,omitempty"`

	// SegmentsBytesMax  The maximum allowed size of all consolidated segments in bytes. Default: 5368709120
	SegmentsBytesMax *int64 `json:"segmentsBytesMax,omitempty"`

	// SegmentsBytesFloor Defines the value (in bytes) to treat all smaller segments as equal for consolidation selection. Default: 2097152
	SegmentsBytesFloor *int64 `json:"segmentsBytesFloor,omitempty"`
}

ConsolidationPolicyTier contains fields used for ConsolidationPolicyTypeTier

type ConsolidationPolicyType

type ConsolidationPolicyType string

ConsolidationPolicyType strings for consolidation types

const (
	// ConsolidationPolicyTypeTier consolidate based on segment byte size and live document count as dictated by the customization attributes.
	ConsolidationPolicyTypeTier ConsolidationPolicyType = "tier"

	// ConsolidationPolicyTypeBytesAccum consolidate if and only if ({threshold} range [0.0, 1.0])
	// {threshold} > (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes,
	// i.e. the sum of all candidate segment's byte size is less than the total segment byte size multiplied by the {threshold}.
	ConsolidationPolicyTypeBytesAccum ConsolidationPolicyType = "bytes_accum"
)

type CreateCollectionOptions

type CreateCollectionOptions struct {
	// CacheEnabled set cacheEnabled option in collection properties
	CacheEnabled *bool `json:"cacheEnabled,omitempty"`
	// This field is used for internal purposes only. DO NOT USE.
	DistributeShardsLike string `json:"distributeShardsLike,omitempty"`
	// DoCompact checks if the collection will be compacted (default is true)
	DoCompact *bool `json:"doCompact,omitempty"`
	// The number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power
	// of 2 and less than or equal to 1024. For very large collections one should increase this to avoid long pauses when the hash
	// table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel.
	// For example, 64 might be a sensible value for a collection with 100 000 000 documents.
	// Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions.
	// Changes are applied when the collection is loaded the next time.
	IndexBuckets int `json:"indexBuckets,omitempty"`
	// Available from 3.9 ArangoD version.
	InternalValidatorType int `json:"internalValidatorType,omitempty"`
	// IsDisjoint set isDisjoint flag for Graph. Required ArangoDB 3.7+
	IsDisjoint bool `json:"isDisjoint,omitempty"`
	// Set to create a smart edge or vertex collection.
	// This requires ArangoDB Enterprise Edition.
	IsSmart bool `json:"isSmart,omitempty"`
	// If true, create a system collection. In this case collection-name should start with an underscore.
	// End users should normally create non-system collections only. API implementors may be required to create system
	// collections in very special occasions, but normally a regular collection will do. (The default is false)
	IsSystem bool `json:"isSystem,omitempty"`
	// If true then the collection data is kept in-memory only and not made persistent.
	// Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also
	// cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster
	// than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any
	// CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only,
	// and not for data that cannot be re-created otherwise. (The default is false)
	IsVolatile bool `json:"isVolatile,omitempty"`
	// The maximal size of a journal or datafile in bytes. The value must be at least 1048576 (1 MiB). (The default is a configuration parameter)
	JournalSize int `json:"journalSize,omitempty"`
	// Specifies how keys in the collection are created.
	KeyOptions *CollectionKeyOptions `json:"keyOptions,omitempty"`
	// Deprecated: use 'WriteConcern' instead
	MinReplicationFactor int `json:"minReplicationFactor,omitempty"`
	// In a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless. (default is 1)
	NumberOfShards int `json:"numberOfShards,omitempty"`
	// ReplicationFactor in a cluster (default is 1), this attribute determines how many copies of each shard are kept on different DBServers.
	// The value 1 means that only one copy (no synchronous replication) is kept.
	// A value of k means that k-1 replicas are kept. Any two copies reside on different DBServers.
	// Replication between them is synchronous, that is, every write operation to the "leader" copy will be replicated to all "follower" replicas,
	// before the write operation is reported successful. If a server fails, this is detected automatically
	// and one of the servers holding copies take over, usually without an error being reported.
	ReplicationFactor ReplicationFactor `json:"replicationFactor,omitempty"`
	// Schema for collection validation
	Schema *CollectionSchemaOptions `json:"schema,omitempty"`
	// This attribute specifies the name of the sharding strategy to use for the collection.
	// Must be one of ShardingStrategy* values.
	ShardingStrategy ShardingStrategy `json:"shardingStrategy,omitempty"`
	// In a cluster, this attribute determines which document attributes are used to
	// determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes.
	// The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard.
	// Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
	// The default is []string{"_key"}.
	ShardKeys []string `json:"shardKeys,omitempty"`
	// This field must be set to the attribute that will be used for sharding or smart graphs.
	// All vertices are required to have this attribute set. Edges derive the attribute from their connected vertices.
	// This requires ArangoDB Enterprise Edition.
	SmartGraphAttribute string `json:"smartGraphAttribute,omitempty"`
	// SmartJoinAttribute
	// In the specific case that the two collections have the same number of shards, the data of the two collections can
	// be co-located on the same server for the same shard key values. In this case the extra hop via the coordinator will not be necessary.
	// See documentation for smart joins.
	// This requires ArangoDB Enterprise Edition.
	SmartJoinAttribute string `json:"smartJoinAttribute,omitempty"`
	// Available from 3.7 ArangoDB version
	SyncByRevision bool `json:"syncByRevision,omitempty"`
	// The type of the collection to create. (default is CollectionTypeDocument)
	Type CollectionType `json:"type,omitempty"`
	// If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
	WaitForSync bool `json:"waitForSync,omitempty"`
	// WriteConcern contains how many copies must be available before a collection can be written.
	// It is required that 1 <= WriteConcern <= ReplicationFactor.
	// Default is 1. Not available for satellite collections.
	// Available from 3.6 ArangoDB version.
	WriteConcern int `json:"writeConcern,omitempty"`
}

CreateCollectionOptions contains options that customize the creating of a collection.

func (*CreateCollectionOptions) Init

func (c *CreateCollectionOptions) Init()

Init translate deprecated fields into current one for backward compatibility

type CreateDatabaseDefaultOptions

type CreateDatabaseDefaultOptions struct {
	// Default replication factor for collections in database
	ReplicationFactor ReplicationFactor `json:"replicationFactor,omitempty"`
	// Default write concern for collections in database
	WriteConcern int `json:"writeConcern,omitempty"`
	// Default sharding for collections in database
	Sharding DatabaseSharding `json:"sharding,omitempty"`
	// Replication version to use for this database
	// Available since ArangoDB version 3.11
	ReplicationVersion DatabaseReplicationVersion `json:"replicationVersion,omitempty"`
}

CreateDatabaseDefaultOptions contains options that change defaults for collections

type CreateDatabaseOptions

type CreateDatabaseOptions struct {
	// List of users to initially create for the new database. User information will not be changed for users that already exist.
	// If users is not specified or does not contain any users, a default user root will be created with an empty string password.
	// This ensures that the new database will be accessible after it is created.
	Users []CreateDatabaseUserOptions `json:"users,omitempty"`

	// Options database defaults
	Options CreateDatabaseDefaultOptions `json:"options,omitempty"`
}

CreateDatabaseOptions contains options that customize the creating of a database.

type CreateDatabaseUserOptions

type CreateDatabaseUserOptions struct {
	// Loginname of the user to be created
	UserName string `json:"user,omitempty"`
	// The user password as a string. If not specified, it will default to an empty string.
	Password string `json:"passwd,omitempty"`
	// A flag indicating whether the user account should be activated or not. The default value is true. If set to false, the user won't be able to log into the database.
	Active *bool `json:"active,omitempty"`
	// A JSON object with extra user information. The data contained in extra will be stored for the user but not be interpreted further by ArangoDB.
	Extra interface{} `json:"extra,omitempty"`
}

CreateDatabaseUserOptions contains options for creating a single user for a database.

type CreateGeoIndexOptions

type CreateGeoIndexOptions struct {
	// Name optional user defined name used for hints in AQL queries
	Name string `json:"name,omitempty"`

	// If a geo-spatial index on a location is constructed and GeoJSON is true, then the order within the array
	// is longitude followed by latitude. This corresponds to the format described in http://geojson.org/geojson-spec.html#positions
	GeoJSON *bool `json:"geoJson,omitempty"`

	// LegacyPolygons determines if the to-be-created index should use legacy polygons or not.
	// It is relevant for those that have geoJson set to true only.
	// Old geo indexes from versions from below 3.10 will always implicitly have the legacyPolygons option set to true.
	// Newly generated geo indexes from 3.10 on will have the legacyPolygons option by default set to false,
	// however, it can still be explicitly overwritten with true to create a legacy index but is not recommended.
	LegacyPolygons *bool `json:"legacyPolygons,omitempty"`
}

CreateGeoIndexOptions contains specific options for creating a geo index.

type CreatePersistentIndexOptions

type CreatePersistentIndexOptions struct {
	// Name optional user defined name used for hints in AQL queries
	Name string `json:"name,omitempty"`

	// CacheEnabled if true, then the index will be cached in memory. Caching is turned off by default.
	CacheEnabled *bool `json:"cacheEnabled,omitempty"`

	// StoreValues if true, then the additional attributes will be included.
	// These additional attributes cannot be used for index lookups or sorts, but they can be used for projections.
	// There must be no overlap of attribute paths between `fields` and `storedValues`. The maximum number of values is 32.
	StoredValues []string `json:"storedValues,omitempty"`

	// Sparse You can control the sparsity for persistent indexes.
	// The inverted, fulltext, and geo index types are sparse by definition.
	Sparse *bool `json:"sparse,omitempty"`

	// Unique is supported by persistent indexes. By default, all user-defined indexes are non-unique.
	// Only the attributes in fields are checked for uniqueness.
	// Any attributes in from storedValues are not checked for their uniqueness.
	Unique *bool `json:"unique,omitempty"`

	// Deduplicate is supported by array indexes of type persistent. It controls whether inserting duplicate index
	// values from the same document into a unique array index will lead to a unique constraint error or not.
	// The default value is true, so only a single instance of each non-unique index value will be inserted into
	// the index per document.
	// Trying to insert a value into the index that already exists in the index will always fail,
	// regardless of the value of this attribute.
	Deduplicate *bool `json:"deduplicate,omitempty"`

	// Estimates determines if the to-be-created index should maintain selectivity estimates or not.
	// Is supported by indexes of type persistent
	// This attribute controls whether index selectivity estimates are maintained for the index.
	// Not maintaining index selectivity estimates can have a slightly positive impact on write performance.
	// The downside of turning off index selectivity estimates will be that the query optimizer will not be able
	// to determine the usefulness of different competing indexes in AQL queries when there are multiple candidate
	// indexes to choose from. The estimates attribute is optional and defaults to true if not set.
	// It will have no effect on indexes other than persistent (with hash and skiplist being mere aliases for the persistent index type nowadays).
	Estimates *bool `json:"estimates,omitempty"`
}

CreatePersistentIndexOptions contains specific options for creating a persistent index. Note: "hash" and "skiplist" are only aliases for "persistent" with the RocksDB storage engine which is only storage engine since 3.7

type CreateTTLIndexOptions

type CreateTTLIndexOptions struct {
	// Name optional user defined name used for hints in AQL queries
	Name string `json:"name,omitempty"`
}

CreateTTLIndexOptions provides specific options for creating a TTL index

type CreateZKDIndexOptions

type CreateZKDIndexOptions struct {
	// Name optional user defined name used for hints in AQL queries
	Name string `json:"name,omitempty"`

	// FieldValueTypes is required and the only allowed value is "double". Future extensions of the index will allow other types.
	FieldValueTypes ZKDFieldType `json:"fieldValueTypes,required"`
}

CreateZKDIndexOptions provides specific options for creating a ZKD index

type Cursor

type Cursor interface {
	io.Closer

	// CloseWithContext run Close with specified Context
	CloseWithContext(ctx context.Context) error

	// HasMore returns true if the next call to ReadDocument does not return a NoMoreDocuments error.
	HasMore() bool

	// ReadDocument reads the next document from the cursor.
	// The document data is stored into result, the document meta data is returned.
	// If the cursor has no more documents, a NoMoreDocuments error is returned.
	// Note: If the query (resulting in this cursor) does not return documents,
	//       then the returned DocumentMeta will be empty.
	ReadDocument(ctx context.Context, result interface{}) (DocumentMeta, error)

	// Count returns the total number of result documents available.
	// A valid return value is only available when the cursor has been created with `Count` and not with `Stream`.
	Count() int64

	// Statistics returns the query execution statistics for this cursor.
	// This might not be valid if the cursor has been created with `Stream`
	Statistics() CursorStats

	// Plan returns the query execution plan for this cursor.
	Plan() CursorPlan
}

Cursor is returned from a query, used to iterate over a list of documents. Note that a Cursor must always be closed to avoid holding on to resources in the server while they are no longer needed.

type CursorBatch

type CursorBatch interface {
	io.Closer

	// CloseWithContext run Close with specified Context
	CloseWithContext(ctx context.Context) error

	// HasMoreBatches returns true if the next call to ReadNextBatch does not return a NoMoreDocuments error.
	HasMoreBatches() bool

	// ReadNextBatch reads the next batch of documents from the cursor.
	// The result must be a pointer to a slice of documents.
	// E.g. `var result []MyStruct{}`.
	ReadNextBatch(ctx context.Context, result interface{}) error

	// RetryReadBatch retries the last batch read made by ReadNextBatch.
	// The result must be a pointer to a slice of documents.
	// E.g. `var result []MyStruct{}`.
	RetryReadBatch(ctx context.Context, result interface{}) error

	// Count returns the total number of result documents available.
	// A valid return value is only available when the cursor has been created with `Count` and not with `Stream`.
	Count() int64

	// Statistics returns the query execution statistics for this cursor.
	// This might not be valid if the cursor has been created with `Stream`
	Statistics() CursorStats

	// Plan returns the query execution plan for this cursor.
	Plan() CursorPlan
}

CursorBatch is returned from a query, used to iterate over a list of documents. In contrast to Cursor, CursorBatch does not load all documents into memory, but returns them in batches and allows for retries in case of errors. Note that a Cursor must always be closed to avoid holding on to resources in the server while they are no longer needed.

type CursorPlan

type CursorPlan struct {
	// Nodes describes a nested list of the execution plan nodes.
	Nodes []CursorPlanNodes `json:"nodes,omitempty"`
	// Rules describes a list with the names of the applied optimizer rules.
	Rules []string `json:"rules,omitempty"`
	// Collections describes list of the collections involved in the query.
	Collections []CursorPlanCollection `json:"collections,omitempty"`
	// Variables describes list of variables involved in the query.
	Variables []CursorPlanVariable `json:"variables,omitempty"`
	// EstimatedCost is an estimated cost of the query.
	EstimatedCost float64 `json:"estimatedCost,omitempty"`
	// EstimatedNrItems is an estimated number of results.
	EstimatedNrItems int `json:"estimatedNrItems,omitempty"`
	// IsModificationQuery describes whether the query contains write operations.
	IsModificationQuery bool `json:"isModificationQuery,omitempty"`
}

CursorPlan describes execution plan for a query.

type CursorPlanCollection

type CursorPlanCollection struct {
	// Name is a name of collection.
	Name string `json:"name"`
	// Type describes how the collection is used: read, write or exclusive.
	Type string `json:"type"`
}

CursorPlanCollection describes a collection involved in the query.

type CursorPlanNodes

type CursorPlanNodes map[string]interface{}

CursorPlanNodes describes map of nodes which take part in the execution.

type CursorPlanVariable

type CursorPlanVariable struct {
	// ID is a variable's id.
	ID int `json:"id"`
	// Name is a variable's name.
	Name string `json:"name"`
	// IsDataFromCollection is set to true when data comes from a collection.
	IsDataFromCollection bool `json:"isDataFromCollection"`
	// IsFullDocumentFromCollection is set to true when all data comes from a collection.
	IsFullDocumentFromCollection bool `json:"isFullDocumentFromCollection"`
}

CursorPlanVariable describes variable's settings.

type CursorStats

type CursorStats struct {
	// The total number of data-modification operations successfully executed.
	WritesExecutedInt int64 `json:"writesExecuted,omitempty"`
	// The total number of data-modification operations that were unsuccessful
	WritesIgnoredInt int64 `json:"writesIgnored,omitempty"`
	// The total number of documents iterated over when scanning a collection without an index.
	ScannedFullInt int64 `json:"scannedFull,omitempty"`
	// The total number of documents iterated over when scanning a collection using an index.
	ScannedIndexInt int64 `json:"scannedIndex,omitempty"`
	// The total number of documents that were removed after executing a filter condition in a FilterNode
	FilteredInt int64 `json:"filtered,omitempty"`
	// The total number of documents that matched the search condition if the query's final LIMIT statement were not present.
	FullCountInt int64 `json:"fullCount,omitempty"`
	// Query execution time (wall-clock time). value will be set from the outside
	ExecutionTimeInt float64 `json:"executionTime,omitempty"`

	HTTPRequests    uint64 `json:"httpRequests,omitempty"`
	PeakMemoryUsage uint64 `json:"peakMemoryUsage,omitempty"`

	// CursorsCreated the total number of cursor objects created during query execution. Cursor objects are created for index lookups.
	CursorsCreated uint64 `json:"cursorsCreated,omitempty"`
	// CursorsRearmed the total number of times an existing cursor object was repurposed.
	// Repurposing an existing cursor object is normally more efficient compared to destroying an existing cursor object
	// and creating a new one from scratch.
	CursorsRearmed uint64 `json:"cursorsRearmed,omitempty"`
	// CacheHits the total number of index entries read from in-memory caches for indexes of type edge or persistent.
	// This value will only be non-zero when reading from indexes that have an in-memory cache enabled,
	// and when the query allows using the in-memory cache (i.e. using equality lookups on all index attributes).
	CacheHits uint64 `json:"cacheHits,omitempty"`
	// CacheMisses the total number of cache read attempts for index entries that could not be served from in-memory caches for indexes of type edge or persistent.
	// This value will only be non-zero when reading from indexes that have an in-memory cache enabled,
	// the query allows using the in-memory cache (i.e. using equality lookups on all index attributes) and the looked up values are not present in the cache.
	CacheMisses uint64 `json:"cacheMisses,omitempty"`
}

CursorStats TODO: all these int64 should be changed into uint64

type Database

type Database interface {
	// Name returns the name of the database.
	Name() string

	// Info fetches information about the database.
	Info(ctx context.Context) (DatabaseInfo, error)

	// Remove removes the entire database.
	// If the database does not exist, a NotFoundError is returned.
	Remove(ctx context.Context) error

	DatabaseCollection
	DatabaseTransaction
	DatabaseQuery
}

type DatabaseCollection

type DatabaseCollection interface {
	// Collection opens a connection to an existing collection within the database.
	// If no collection with given name exists, an NotFoundError is returned.
	Collection(ctx context.Context, name string) (Collection, error)

	// CollectionExists returns true if a collection with given name exists within the database.
	CollectionExists(ctx context.Context, name string) (bool, error)

	// Collections returns a list of all collections in the database.
	Collections(ctx context.Context) ([]Collection, error)

	// CreateCollection creates a new collection with given name and options, and opens a connection to it.
	// If a collection with given name already exists within the database, a DuplicateError is returned.
	CreateCollection(ctx context.Context, name string, options *CreateCollectionOptions) (Collection, error)
}

type DatabaseInfo

type DatabaseInfo struct {
	// The identifier of the database.
	ID string `json:"id,omitempty"`
	// The name of the database.
	Name string `json:"name,omitempty"`
	// The filesystem path of the database.
	Path string `json:"path,omitempty"`
	// If true then the database is the _system database.
	IsSystem bool `json:"isSystem,omitempty"`
	// Default replication factor for collections in database
	ReplicationFactor ReplicationFactor `json:"replicationFactor,omitempty"`
	// Default write concern for collections in database
	WriteConcern int `json:"writeConcern,omitempty"`
	// Default sharding for collections in database
	Sharding DatabaseSharding `json:"sharding,omitempty"`
	// Replication version used for this database
	ReplicationVersion DatabaseReplicationVersion `json:"replicationVersion,omitempty"`
}

DatabaseInfo contains information about a database

type DatabaseQuery

type DatabaseQuery interface {
	// Query performs an AQL query, returning a cursor used to iterate over the returned documents.
	// Note that the returned Cursor must always be closed to avoid holding on to resources in the server while they are no longer needed.
	Query(ctx context.Context, query string, opts *QueryOptions) (Cursor, error)

	// QueryBatch performs an AQL query, returning a cursor used to iterate over the returned documents in batches.
	// In contrast to Query, QueryBatch does not load all documents into memory, but returns them in batches and allows for retries in case of errors.
	// Note that the returned Cursor must always be closed to avoid holding on to resources in the server while they are no longer needed
	QueryBatch(ctx context.Context, query string, opts *QueryOptions, result interface{}) (CursorBatch, error)

	// ValidateQuery validates an AQL query.
	// When the query is valid, nil returned, otherwise an error is returned.
	// The query is not executed.
	ValidateQuery(ctx context.Context, query string) error

	// ExplainQuery explains an AQL query and return information about it.
	ExplainQuery(ctx context.Context, query string, bindVars map[string]interface{}, opts *ExplainQueryOptions) (ExplainQueryResult, error)
}

type DatabaseReplicationVersion

type DatabaseReplicationVersion string

DatabaseReplicationVersion defines replication protocol version to use for this database Available since ArangoDB version 3.11 Note: this feature is still considered experimental and should not be used in production

const (
	DatabaseReplicationVersionOne DatabaseReplicationVersion = "1"
	DatabaseReplicationVersionTwo DatabaseReplicationVersion = "2"
)

type DatabaseSharding

type DatabaseSharding string
const (
	DatabaseShardingSingle DatabaseSharding = "single"
	DatabaseShardingNone   DatabaseSharding = ""
)

type DatabaseStreamingTransactions

type DatabaseStreamingTransactions interface {
	BeginTransaction(ctx context.Context, cols TransactionCollections, opts *BeginTransactionOptions) (TransactionID, error)
	CommitTransaction(ctx context.Context, tid TransactionID, opts *CommitTransactionOptions) error
	AbortTransaction(ctx context.Context, tid TransactionID, opts *AbortTransactionOptions) error

	TransactionStatus(ctx context.Context, tid TransactionID) (TransactionStatusRecord, error)
}

DatabaseStreamingTransactions provides access to the Streaming Transactions API

type DatabaseTransaction

type DatabaseTransaction interface {
	ListTransactions(ctx context.Context) ([]Transaction, error)
	ListTransactionsWithStatuses(ctx context.Context, statuses ...TransactionStatus) ([]Transaction, error)

	BeginTransaction(ctx context.Context, cols TransactionCollections, opts *BeginTransactionOptions) (Transaction, error)

	Transaction(ctx context.Context, id TransactionID) (Transaction, error)

	WithTransaction(ctx context.Context, cols TransactionCollections, opts *BeginTransactionOptions, commitOptions *CommitTransactionOptions, abortOptions *AbortTransactionOptions, w TransactionWrap) error
}

type DocumentID

type DocumentID string

type DocumentMeta

type DocumentMeta struct {
	Key string     `json:"_key,omitempty"`
	ID  DocumentID `json:"_id,omitempty"`
	Rev string     `json:"_rev,omitempty"`
}

DocumentMeta contains all meta data used to identify a document.

type DocumentMetaSlice

type DocumentMetaSlice []DocumentMeta

DocumentMetaSlice is a slice of DocumentMeta elements

func (DocumentMetaSlice) IDs

func (l DocumentMetaSlice) IDs() []DocumentID

IDs returns the ID's of all elements.

func (DocumentMetaSlice) Keys

func (l DocumentMetaSlice) Keys() []string

Keys returns the keys of all elements.

func (DocumentMetaSlice) Revs

func (l DocumentMetaSlice) Revs() []string

Revs returns the revisions of all elements.

type EngineInfo

type EngineInfo struct {
	Type EngineType `json:"name"`
}

EngineInfo contains information about the database engine being used.

type EngineType

type EngineType string

EngineType indicates type of database engine being used.

func (EngineType) String

func (t EngineType) String() string

type ExplainQueryOptimizerOptions

type ExplainQueryOptimizerOptions struct {
	// A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute,
	// telling the optimizer to include or exclude specific rules.
	//  To disable a rule, prefix its name with a "-", to enable a rule, prefix it with a "+".
	// There is also a pseudo-rule "all", which matches all optimizer rules. "-all" disables all rules.
	Rules []string `json:"rules,omitempty"`
}

type ExplainQueryOptions

type ExplainQueryOptions struct {
	// If set to true, all possible execution plans will be returned.
	// The default is false, meaning only the optimal plan will be returned.
	AllPlans bool `json:"allPlans,omitempty"`

	// An optional maximum number of plans that the optimizer is allowed to generate.
	// Setting this attribute to a low value allows to put a cap on the amount of work the optimizer does.
	MaxNumberOfPlans *int `json:"maxNumberOfPlans,omitempty"`

	// Options related to the query optimizer.
	Optimizer ExplainQueryOptimizerOptions `json:"optimizer,omitempty"`
}

type ExplainQueryResult

type ExplainQueryResult struct {
	Plan  ExplainQueryResultPlan   `json:"plan,omitempty"`
	Plans []ExplainQueryResultPlan `json:"plans,omitempty"`
	// List of warnings that occurred during optimization or execution plan creation
	Warnings []string `json:"warnings,omitempty"`
	// Info about optimizer statistics
	Stats ExplainQueryResultExecutionStats `json:"stats,omitempty"`
	// Cacheable states whether the query results can be cached on the server if the query result cache were used.
	// This attribute is not present when allPlans is set to true.
	Cacheable *bool `json:"cacheable,omitempty"`
}

type ExplainQueryResultExecutionCollection

type ExplainQueryResultExecutionCollection struct {
	Name string `json:"name"`
	Type string `json:"type"`
}

type ExplainQueryResultExecutionNodeRaw

type ExplainQueryResultExecutionNodeRaw map[string]interface{}

type ExplainQueryResultExecutionStats

type ExplainQueryResultExecutionStats struct {
	RulesExecuted   int     `json:"rulesExecuted,omitempty"`
	RulesSkipped    int     `json:"rulesSkipped,omitempty"`
	PlansCreated    int     `json:"plansCreated,omitempty"`
	PeakMemoryUsage uint64  `json:"peakMemoryUsage,omitempty"`
	ExecutionTime   float64 `json:"executionTime,omitempty"`
}

type ExplainQueryResultExecutionVariable

type ExplainQueryResultExecutionVariable struct {
	ID                           int    `json:"id"`
	Name                         string `json:"name"`
	IsDataFromCollection         bool   `json:"isDataFromCollection"`
	IsFullDocumentFromCollection bool   `json:"isFullDocumentFromCollection"`
}

type ExplainQueryResultPlan

type ExplainQueryResultPlan struct {
	// Execution nodes of the plan.
	NodesRaw []ExplainQueryResultExecutionNodeRaw `json:"nodes,omitempty"`
	// List of rules the optimizer applied
	Rules []string `json:"rules,omitempty"`
	// List of collections used in the query
	Collections []ExplainQueryResultExecutionCollection `json:"collections,omitempty"`
	// List of variables used in the query (note: this may contain internal variables created by the optimizer)
	Variables []ExplainQueryResultExecutionVariable `json:"variables,omitempty"`
	// The total estimated cost for the plan. If there are multiple plans, the optimizer will choose the plan with the lowest total cost
	EstimatedCost float64 `json:"estimatedCost,omitempty"`
	// The estimated number of results.
	EstimatedNrItems int `json:"estimatedNrItems,omitempty"`
}

type IndexOptions

type IndexOptions struct {
	// Fields returns a list of attributes of this index.
	Fields []string `json:"fields,omitempty"`

	// Estimates determines if the to-be-created index should maintain selectivity estimates or not - PersistentIndex only
	Estimates *bool `json:"estimates,omitempty"`

	// SelectivityEstimate determines the selectivity estimate value of the index - PersistentIndex only
	SelectivityEstimate float64 `json:"selectivityEstimate,omitempty"`

	// MinLength returns min length for this index if set.
	MinLength *int `json:"minLength,omitempty"`

	// Deduplicate returns deduplicate setting of this index.
	Deduplicate *bool `json:"deduplicate,omitempty"`

	// ExpireAfter returns an expiry after for this index if set.
	ExpireAfter *int `json:"expireAfter,omitempty"`

	// CacheEnabled if true, then the index will be cached in memory. Caching is turned off by default.
	CacheEnabled *bool `json:"cacheEnabled,omitempty"`

	// StoredValues returns a list of stored values for this index - PersistentIndex only
	StoredValues []string `json:"storedValues,omitempty"`

	// GeoJSON returns if geo json was set for this index or not.
	GeoJSON *bool `json:"geoJson,omitempty"`

	// LegacyPolygons returns if legacy polygons was set for this index or not before 3.10 - GeoIndex only
	LegacyPolygons *bool `json:"legacyPolygons,omitempty"`
}

IndexOptions contains the information about an regular index type

type IndexResponse

type IndexResponse struct {
	// Name optional user defined name used for hints in AQL queries
	Name string `json:"name,omitempty"`

	// Type returns the type of the index
	Type IndexType `json:"type"`

	IndexSharedOptions `json:",inline"`

	// RegularIndex is the regular index object. It is empty for the InvertedIndex type.
	RegularIndex *IndexOptions `json:"indexes"`

	// InvertedIndex is the inverted index object. It is not empty only for InvertedIndex type.
	InvertedIndex *InvertedIndexOptions `json:"invertedIndexes"`
}

IndexResponse is the response from the Index list method

func (*IndexResponse) UnmarshalJSON

func (i *IndexResponse) UnmarshalJSON(data []byte) error

type IndexSharedOptions

type IndexSharedOptions struct {
	// ID returns the ID of the index. Effectively this is `<collection-name>/<index.Name()>`.
	ID string `json:"id,omitempty"`

	// Unique is supported by persistent indexes. By default, all user-defined indexes are non-unique.
	// Only the attributes in fields are checked for uniqueness.
	// Any attributes in from storedValues are not checked for their uniqueness.
	Unique *bool `json:"unique,omitempty"`

	// Sparse You can control the sparsity for persistent indexes.
	// The inverted, fulltext, and geo index types are sparse by definition.
	Sparse *bool `json:"sparse,omitempty"`

	// IsNewlyCreated returns if this index was newly created or pre-existing.
	IsNewlyCreated *bool `json:"isNewlyCreated,omitempty"`
}

IndexSharedOptions contains options that are shared between all index types

type IndexType

type IndexType string

IndexType represents an index type as string

type InvertedIndexField

type InvertedIndexField struct {
	// Name (Required) An attribute path. The '.' character denotes sub-attributes.
	Name string `json:"name"`

	// Analyzer indicating the name of an analyzer instance
	// Default: the value defined by the top-level analyzer option, or if not set, the default identity Analyzer.
	Analyzer string `json:"analyzer,omitempty"`

	// Features is a list of Analyzer features to use for this field. They define what features are enabled for the analyzer
	Features []AnalyzerFeature `json:"features,omitempty"`

	// IncludeAllFields This option only applies if you use the inverted index in a search-alias Views.
	// If set to true, then all sub-attributes of this field are indexed, excluding any sub-attributes that are configured separately by other elements in the fields array (and their sub-attributes). The analyzer and features properties apply to the sub-attributes.
	// If set to false, then sub-attributes are ignored. The default value is defined by the top-level includeAllFields option, or false if not set.
	IncludeAllFields *bool `json:"includeAllFields,omitempty"`

	// SearchField This option only applies if you use the inverted index in a search-alias Views.
	// You can set the option to true to get the same behavior as with arangosearch Views regarding the indexing of array values for this field. If enabled, both, array and primitive values (strings, numbers, etc.) are accepted. Every element of an array is indexed according to the trackListPositions option.
	// If set to false, it depends on the attribute path. If it explicitly expand an array ([*]), then the elements are indexed separately. Otherwise, the array is indexed as a whole, but only geopoint and aql Analyzers accept array inputs. You cannot use an array expansion if searchField is enabled.
	// Default: the value defined by the top-level searchField option, or false if not set.
	SearchField *bool `json:"searchField,omitempty"`

	// TrackListPositions This option only applies if you use the inverted index in a search-alias Views.
	// If set to true, then track the value position in arrays for array values. For example, when querying a document like { attr: [ "valueX", "valueY", "valueZ" ] }, you need to specify the array element, e.g. doc.attr[1] == "valueY".
	// If set to false, all values in an array are treated as equal alternatives. You don’t specify an array element in queries, e.g. doc.attr == "valueY", and all elements are searched for a match.
	// Default: the value defined by the top-level trackListPositions option, or false if not set.
	TrackListPositions bool `json:"trackListPositions,omitempty"`

	// Cache - Enable this option to always cache the field normalization values in memory for this specific field
	// Default: the value defined by the top-level 'cache' option.
	Cache *bool `json:"cache,omitempty"`

	// Nested Index the specified sub-objects that are stored in an array.
	// Other than with the fields property, the values get indexed in a way that lets you query for co-occurring values.
	// For example, you can search the sub-objects and all the conditions need to be met by a single sub-object instead of across all of them.
	// Enterprise-only feature
	Nested []InvertedIndexNestedField `json:"nested,omitempty"`
}

InvertedIndexField contains configuration for indexing of the field

type InvertedIndexNestedField

type InvertedIndexNestedField struct {
	// Name An attribute path. The . character denotes sub-attributes.
	Name string `json:"name"`

	// Analyzer indicating the name of an analyzer instance
	// Default: the value defined by the top-level analyzer option, or if not set, the default identity Analyzer.
	Analyzer string `json:"analyzer,omitempty"`

	// Features is a list of Analyzer features to use for this field. They define what features are enabled for the analyzer
	Features []AnalyzerFeature `json:"features,omitempty"`

	// SearchField This option only applies if you use the inverted index in a search-alias Views.
	// You can set the option to true to get the same behavior as with arangosearch Views regarding the indexing of array values for this field. If enabled, both, array and primitive values (strings, numbers, etc.) are accepted. Every element of an array is indexed according to the trackListPositions option.
	// If set to false, it depends on the attribute path. If it explicitly expand an array ([*]), then the elements are indexed separately. Otherwise, the array is indexed as a whole, but only geopoint and aql Analyzers accept array inputs. You cannot use an array expansion if searchField is enabled.
	// Default: the value defined by the top-level searchField option, or false if not set.
	SearchField *bool `json:"searchField,omitempty"`

	// Cache - Enable this option to always cache the field normalization values in memory for this specific field
	// Default: the value defined by the top-level 'cache' option.
	Cache *bool `json:"cache,omitempty"`

	// Nested - Index the specified sub-objects that are stored in an array.
	// Other than with the fields property, the values get indexed in a way that lets you query for co-occurring values.
	// For example, you can search the sub-objects and all the conditions need to be met by a single sub-object instead of across all of them.
	// Enterprise-only feature
	Nested []InvertedIndexNestedField `json:"nested,omitempty"`
}

InvertedIndexNestedField contains sub-object configuration for indexing of the field

type InvertedIndexOptions

type InvertedIndexOptions struct {
	// Name optional user defined name used for hints in AQL queries
	Name string `json:"name,omitempty"`

	// Fields contains the properties for individual fields of the element.
	// The key of the map are field names.
	// Required: true
	Fields []InvertedIndexField `json:"fields,omitempty"`

	// SearchField This option only applies if you use the inverted index in a search-alias Views.
	// You can set the option to true to get the same behavior as with arangosearch Views regarding the indexing of array values as the default.
	// If enabled, both, array and primitive values (strings, numbers, etc.) are accepted. Every element of an array is indexed according to the trackListPositions option.
	// If set to false, it depends on the attribute path. If it explicitly expand an array ([*]), then the elements are indexed separately.
	// Otherwise, the array is indexed as a whole, but only geopoint and aql Analyzers accept array inputs.
	// You cannot use an array expansion if searchField is enabled.
	SearchField *bool `json:"searchField,omitempty"`

	// Cache - Enable this option to always cache the field normalization values in memory for all fields by default.
	Cache *bool `json:"cache,omitempty"`

	// StoredValues The optional storedValues attribute can contain an array of paths to additional attributes to store in the index.
	// These additional attributes cannot be used for index lookups or for sorting, but they can be used for projections.
	// This allows an index to fully cover more queries and avoid extra document lookups.
	StoredValues []StoredValue `json:"storedValues,omitempty"`

	// PrimarySort You can define a primary sort order to enable an AQL optimization.
	// If a query iterates over all documents of a collection, wants to sort them by attribute values, and the (left-most) fields to sort by,
	// as well as their sorting direction, match with the primarySort definition, then the SORT operation is optimized away.
	PrimarySort *PrimarySort `json:"primarySort,omitempty"`

	// PrimaryKeyCache Enable this option to always cache the primary key column in memory.
	// This can improve the performance of queries that return many documents.
	PrimaryKeyCache *bool `json:"primaryKeyCache,omitempty"`

	// Analyzer  The name of an Analyzer to use by default. This Analyzer is applied to the values of the indexed
	// fields for which you don’t define Analyzers explicitly.
	Analyzer string `json:"analyzer,omitempty"`

	// Features list of analyzer features. You can set this option to overwrite what features are enabled for the default analyzer
	Features []AnalyzerFeature `json:"features,omitempty"`

	// IncludeAllFields If set to true, all fields of this element will be indexed. Defaults to false.
	// Warning: Using includeAllFields for a lot of attributes in combination with complex Analyzers
	// may significantly slow down the indexing process.
	IncludeAllFields *bool `json:"includeAllFields,omitempty"`

	// TrackListPositions track the value position in arrays for array values.
	TrackListPositions bool `json:"trackListPositions,omitempty"`

	// Parallelism - The number of threads to use for indexing the fields. Default: 2
	Parallelism *int `json:"parallelism,omitempty"`

	// CleanupIntervalStep Wait at least this many commits between removing unused files in the ArangoSearch data directory
	// (default: 2, to disable use: 0).
	CleanupIntervalStep *int64 `json:"cleanupIntervalStep,omitempty"`

	// CommitIntervalMsec Wait at least this many milliseconds between committing View data store changes and making
	// documents visible to queries (default: 1000, to disable use: 0).
	CommitIntervalMsec *int64 `json:"commitIntervalMsec,omitempty"`

	// ConsolidationIntervalMsec Wait at least this many milliseconds between applying ‘consolidationPolicy’ to consolidate View data store
	// and possibly release space on the filesystem (default: 1000, to disable use: 0).
	ConsolidationIntervalMsec *int64 `json:"consolidationIntervalMsec,omitempty"`

	// ConsolidationPolicy The consolidation policy to apply for selecting which segments should be merged (default: {}).
	ConsolidationPolicy *ConsolidationPolicy `json:"consolidationPolicy,omitempty"`

	// WriteBufferIdle Maximum number of writers (segments) cached in the pool (default: 64, use 0 to disable)
	WriteBufferIdle *int64 `json:"writebufferIdle,omitempty"`

	// WriteBufferActive Maximum number of concurrent active writers (segments) that perform a transaction.
	// Other writers (segments) wait till current active writers (segments) finish (default: 0, use 0 to disable)
	WriteBufferActive *int64 `json:"writebufferActive,omitempty"`

	// WriteBufferSizeMax Maximum memory byte size per writer (segment) before a writer (segment) flush is triggered.
	// 0 value turns off this limit for any writer (buffer) and data will be flushed periodically based on the value defined for the flush thread (ArangoDB server startup option).
	// 0 value should be used carefully due to high potential memory consumption (default: 33554432, use 0 to disable)
	WriteBufferSizeMax *int64 `json:"writebufferSizeMax,omitempty"`

	// OptimizeTopK is an array of strings defining optimized sort expressions.
	// Introduced in v3.11.0, Enterprise Edition only.
	OptimizeTopK []string `json:"optimizeTopK,omitempty"`
}

InvertedIndexOptions provides specific options for creating an inverted index

type KeyGeneratorType

type KeyGeneratorType string

KeyGeneratorType is a type of key generated, used in `CollectionKeyOptions`.

type LogLevels

type LogLevels map[string]string

LogLevels is a map of topics to log level.

type LogLevelsGetOptions

type LogLevelsGetOptions struct {
	// serverID describes log levels for a specific server ID.
	ServerID ServerID
}

LogLevelsGetOptions describes log levels get options.

type LogLevelsSetOptions

type LogLevelsSetOptions struct {
	// serverID describes log levels for a specific server ID.
	ServerID ServerID
}

LogLevelsSetOptions describes log levels set options.

type PrimarySort

type PrimarySort struct {
	// Fields (Required) - An array of the fields to sort the index by and the direction to sort each field in.
	Fields []PrimarySortEntry `json:"fields,omitempty"`

	// Compression Defines how to compress the primary sort data
	Compression PrimarySortCompression `json:"compression,omitempty"`

	// Cache - Enable this option to always cache the primary sort columns in memory.
	// This can improve the performance of queries that utilize the primary sort order.
	Cache *bool `json:"cache,omitempty"`
}

PrimarySort defines compression and list of fields to be sorted

type PrimarySortCompression

type PrimarySortCompression string

PrimarySortCompression Defines how to compress the primary sort data (introduced in v3.7.1)

const (
	// PrimarySortCompressionLz4 (default): use LZ4 fast compression.
	PrimarySortCompressionLz4 PrimarySortCompression = "lz4"

	// PrimarySortCompressionNone disable compression to trade space for speed.
	PrimarySortCompressionNone PrimarySortCompression = "none"
)

type PrimarySortEntry

type PrimarySortEntry struct {
	// Field An attribute path. The . character denotes sub-attributes.
	Field string `json:"field,required"`

	// Ascending The sorting direction
	Ascending bool `json:"asc,required"`
}

PrimarySortEntry field to sort the index by and the direction

type QueryOptions

type QueryOptions struct {
	// indicates whether the number of documents in the result set should be returned in the "count" attribute of the result.
	// Calculating the "count" attribute might have a performance impact for some queries in the future so this option is
	// turned off by default, and "count" is only returned when requested.
	Count bool `json:"count,omitempty"`
	// maximum number of result documents to be transferred from the server to the client in one roundtrip.
	// If this attribute is not set, a server-controlled default value will be used. A batchSize value of 0 is disallowed.
	BatchSize int `json:"batchSize,omitempty"`
	// flag to determine whether the AQL query cache shall be used. If set to false, then any query cache lookup
	// will be skipped for the query. If set to true, it will lead to the query cache being checked for the query
	// if the query cache mode is either on or demand.
	Cache bool `json:"cache,omitempty"`
	// the maximum number of memory (measured in bytes) that the query is allowed to use. If set, then the query will fail
	// with error "resource limit exceeded" in case it allocates too much memory. A value of 0 indicates that there is no memory limit.
	MemoryLimit int64 `json:"memoryLimit,omitempty"`
	// The time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically after the specified
	// amount of time. This is useful to ensure garbage collection of cursors that are not fully fetched by clients.
	// If not set, a server-defined value will be used.
	TTL float64 `json:"ttl,omitempty"`
	// key/value pairs representing the bind parameters.
	BindVars map[string]interface{} `json:"bindVars,omitempty"`
	Options  QuerySubOptions        `json:"options,omitempty"`
}

type QueryRequest

type QueryRequest struct {
	Query string `json:"query"`
}

type QuerySubOptions

type QuerySubOptions struct {
	// ShardId query option
	ShardIds []string `json:"shardIds,omitempty"`
	// Profile If set to 1, then the additional query profiling information is returned in the profile sub-attribute
	// of the extra return attribute, unless the query result is served from the query cache.
	// If set to 2, the query includes execution stats per query plan node in stats.nodes
	// sub-attribute of the extra return attribute.
	// Additionally, the query plan is returned in the extra.plan sub-attribute.
	Profile uint `json:"profile,omitempty"`
	// Optimizer contains options related to the query optimizer.
	Optimizer QuerySubOptionsOptimizer `json:"optimizer,omitempty"`
	// This Enterprise Edition parameter allows to configure how long a DBServer will have time to bring the satellite collections
	// involved in the query into sync. The default value is 60.0 (seconds). When the max time has been reached the query will be stopped.
	SatelliteSyncWait float64 `json:"satelliteSyncWait,omitempty"`
	// if set to true and the query contains a LIMIT clause, then the result will have an extra attribute with the sub-attributes
	// stats and fullCount, { ... , "extra": { "stats": { "fullCount": 123 } } }. The fullCount attribute will contain the number
	// of documents in the result before the last LIMIT in the query was applied. It can be used to count the number of documents
	// that match certain filter criteria, but only return a subset of them, in one go. It is thus similar to MySQL's SQL_CALC_FOUND_ROWS hint.
	// Note that setting the option will disable a few LIMIT optimizations and may lead to more documents being processed, and
	// thus make queries run longer. Note that the fullCount attribute will only be present in the result if the query has a LIMIT clause
	// and the LIMIT clause is actually used in the query.
	FullCount bool `json:"fullCount,omitempty"`
	// Limits the maximum number of plans that are created by the AQL query optimizer.
	MaxPlans int `json:"maxPlans,omitempty"`
	// Specify true and the query will be executed in a streaming fashion. The query result is not stored on
	// the server, but calculated on the fly. Beware: long-running queries will need to hold the collection
	// locks for as long as the query cursor exists. When set to false a query will be executed right away in
	// its entirety.
	Stream bool `json:"stream,omitempty"`
	// MaxRuntime specify the timeout which can be used to kill a query on the server after the specified
	// amount in time. The timeout value is specified in seconds. A value of 0 means no timeout will be enforced.
	MaxRuntime float64 `json:"maxRuntime,omitempty"`
	// FillBlockCache if is set to true or not specified, this will make the query store the data it reads via the RocksDB storage engine in the RocksDB block cache.
	// This is usually the desired behavior. The option can be set to false for queries that are known to either read a lot of data which would thrash the block cache,
	// or for queries that read data which are known to be outside of the hot set. By setting the option to false, data read by the query will not make it into
	// the RocksDB block cache if not already in there, thus leaving more room for the actual hot set.
	FillBlockCache bool `json:"fillBlockCache,omitempty"`
	// AllowRetry If set to `true`, ArangoDB will store cursor results in such a way
	// that batch reads can be retried in the case of a communication error.
	AllowRetry bool `json:"allowRetry,omitempty"`
}

type QuerySubOptionsOptimizer

type QuerySubOptionsOptimizer struct {
	// A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute,
	// telling the optimizer to include or exclude specific rules.
	// To disable a rule, prefix its name with a -, to enable a rule, prefix it with a +.
	// There is also a pseudo-rule all, which will match all optimizer rules.
	Rules []string `json:"rules,omitempty"`
}

QuerySubOptionsOptimizer describes optimization's settings for AQL queries.

type ReplicationFactor

type ReplicationFactor int
const (
	// ReplicationFactorSatellite represents a satellite collection's replication factor
	ReplicationFactorSatellite ReplicationFactor = -1
)

func (ReplicationFactor) MarshalJSON

func (r ReplicationFactor) MarshalJSON() ([]byte, error)

MarshalJSON marshals InventoryCollectionParameters to arangodb json representation

func (*ReplicationFactor) UnmarshalJSON

func (r *ReplicationFactor) UnmarshalJSON(d []byte) error

UnmarshalJSON marshals InventoryCollectionParameters to arangodb json representation

type Requests

type Requests interface {
	Get(ctx context.Context, output interface{}, urlParts ...string) (connection.Response, error)
	Post(ctx context.Context, output, input interface{}, urlParts ...string) (connection.Response, error)
	Put(ctx context.Context, output, input interface{}, urlParts ...string) (connection.Response, error)
	Delete(ctx context.Context, output interface{}, urlParts ...string) (connection.Response, error)
	Head(ctx context.Context, output interface{}, urlParts ...string) (connection.Response, error)
	Patch(ctx context.Context, output, input interface{}, urlParts ...string) (connection.Response, error)
}

func NewRequests

func NewRequests(connection connection.Connection, urlParts ...string) Requests

type ServerHealth

type ServerHealth struct {
	Endpoint            string           `json:"Endpoint"`
	LastHeartbeatAcked  time.Time        `json:"LastHeartbeatAcked"`
	LastHeartbeatSent   time.Time        `json:"LastHeartbeatSent"`
	LastHeartbeatStatus string           `json:"LastHeartbeatStatus"`
	Role                ServerRole       `json:"Role"`
	ShortName           string           `json:"ShortName"`
	Status              ServerStatus     `json:"Status"`
	CanBeDeleted        bool             `json:"CanBeDeleted"`
	HostID              string           `json:"Host,omitempty"`
	Version             Version          `json:"Version,omitempty"`
	Engine              EngineType       `json:"Engine,omitempty"`
	SyncStatus          ServerSyncStatus `json:"SyncStatus,omitempty"`

	// Only for Coordinators
	AdvertisedEndpoint *string `json:"AdvertisedEndpoint,omitempty"`

	// Only for Agents
	Leader  *string `json:"Leader,omitempty"`
	Leading *bool   `json:"Leading,omitempty"`
}

ServerHealth contains health information of a single server in a cluster.

type ServerID

type ServerID string

ServerID identifies an ArangoDB server in a cluster.

type ServerRole

type ServerRole string

ServerRole is the role of an arangod server

const (
	// ServerRoleSingle indicates that the server is a single-server instance
	ServerRoleSingle ServerRole = "Single"
	// ServerRoleSingleActive indicates that the server is a the leader of a single-server resilient pair
	ServerRoleSingleActive ServerRole = "SingleActive"
	// ServerRoleSinglePassive indicates that the server is a a follower of a single-server resilient pair
	ServerRoleSinglePassive ServerRole = "SinglePassive"
	// ServerRoleDBServer indicates that the server is a dbserver within a cluster
	ServerRoleDBServer ServerRole = "DBServer"
	// ServerRoleCoordinator indicates that the server is a coordinator within a cluster
	ServerRoleCoordinator ServerRole = "Coordinator"
	// ServerRoleAgent indicates that the server is an agent within a cluster
	ServerRoleAgent ServerRole = "Agent"
	// ServerRoleUndefined indicates that the role of the server cannot be determined
	ServerRoleUndefined ServerRole = "Undefined"
)

func ConvertServerRole

func ConvertServerRole(arangoDBRole string) ServerRole

ConvertServerRole returns go-driver server role based on ArangoDB role.

type ServerStatus

type ServerStatus string

ServerStatus describes the health status of a server

const (
	// ServerStatusGood indicates server is in good state
	ServerStatusGood ServerStatus = "GOOD"
	// ServerStatusBad indicates server has missed 1 heartbeat
	ServerStatusBad ServerStatus = "BAD"
	// ServerStatusFailed indicates server has been declared failed by the supervision, this happens after about 15s being bad.
	ServerStatusFailed ServerStatus = "FAILED"
)

type ServerSyncStatus

type ServerSyncStatus string

ServerSyncStatus describes the servers sync status

const (
	ServerSyncStatusUnknown   ServerSyncStatus = "UNKNOWN"
	ServerSyncStatusUndefined ServerSyncStatus = "UNDEFINED"
	ServerSyncStatusStartup   ServerSyncStatus = "STARTUP"
	ServerSyncStatusStopping  ServerSyncStatus = "STOPPING"
	ServerSyncStatusStopped   ServerSyncStatus = "STOPPED"
	ServerSyncStatusServing   ServerSyncStatus = "SERVING"
	ServerSyncStatusShutdown  ServerSyncStatus = "SHUTDOWN"
)

type SetCollectionPropertiesOptions

type SetCollectionPropertiesOptions struct {
	// If true then creating or changing a document will wait until the data has been synchronized to disk.
	WaitForSync *bool `json:"waitForSync,omitempty"`
	// The maximal size of a journal or datafile in bytes. The value must be at least 1048576 (1 MB). Note that when changing the journalSize value, it will only have an effect for additional journals or datafiles that are created. Already existing journals or datafiles will not be affected.
	JournalSize int64 `json:"journalSize,omitempty"`
	// ReplicationFactor contains how many copies of each shard are kept on different DBServers.
	// Only available in cluster setup.
	ReplicationFactor ReplicationFactor `json:"replicationFactor,omitempty"`
	// Deprecated: use 'WriteConcern' instead
	MinReplicationFactor int `json:"minReplicationFactor,omitempty"`
	// WriteConcern contains how many copies must be available before a collection can be written.
	// Available from 3.6 arangod version.
	WriteConcern int `json:"writeConcern,omitempty"`
	// CacheEnabled set cacheEnabled option in collection properties
	CacheEnabled *bool `json:"cacheEnabled,omitempty"`
	// Schema for collection validation
	Schema *CollectionSchemaOptions `json:"schema,omitempty"`
}

SetCollectionPropertiesOptions contains data for Collection.SetProperties.

type ShardID

type ShardID string

ShardID is an internal identifier of a specific shard.

type ShardingStrategy

type ShardingStrategy string

ShardingStrategy describes the sharding strategy of a collection

const (
	ShardingStrategyCommunityCompat           ShardingStrategy = "community-compat"
	ShardingStrategyEnterpriseCompat          ShardingStrategy = "enterprise-compat"
	ShardingStrategyEnterpriseSmartEdgeCompat ShardingStrategy = "enterprise-smart-edge-compat"
	ShardingStrategyHash                      ShardingStrategy = "hash"
	ShardingStrategyEnterpriseHashSmartEdge   ShardingStrategy = "enterprise-hash-smart-edge"
)

type SortDirection

type SortDirection string

SortDirection describes the sorting direction

const (
	// SortDirectionAsc sort ascending
	SortDirectionAsc SortDirection = "asc"

	// SortDirectionDesc sort descending
	SortDirectionDesc SortDirection = "desc"
)

type StoredValue

type StoredValue struct {
	// Fields A list of attribute paths. The . character denotes sub-attributes.
	Fields []string `json:"fields,omitempty"`

	// Compression Defines how to compress the attribute values.
	Compression PrimarySortCompression `json:"compression,omitempty"`

	// Cache attribute allows you to always cache stored values in memory
	// Introduced in v3.9.5, Enterprise Edition only
	Cache *bool `json:"cache,omitempty"`
}

StoredValue defines the value stored in the index

type TransactionCollections

type TransactionCollections struct {
	Read      []string `json:"read,omitempty"`
	Write     []string `json:"write,omitempty"`
	Exclusive []string `json:"exclusive,omitempty"`
}

TransactionCollections is used to specify which collecitions are accessed by a transaction and how

type TransactionID

type TransactionID string

TransactionID identifies a transaction

type TransactionStatus

type TransactionStatus string

TransactionStatus describes the status of an transaction

const (
	TransactionRunning   TransactionStatus = "running"
	TransactionCommitted TransactionStatus = "committed"
	TransactionAborted   TransactionStatus = "aborted"
)

type TransactionStatusRecord

type TransactionStatusRecord struct {
	Status TransactionStatus
}

TransactionStatusRecord provides insight about the status of transaction

type TransactionStatuses

type TransactionStatuses []TransactionStatus

TransactionStatuses list of transaction statuses

func (TransactionStatuses) Contains

func (t TransactionStatuses) Contains(status TransactionStatus) bool

type TransactionWrap

type TransactionWrap func(ctx context.Context, t Transaction) error

type UnmarshalInto

type UnmarshalInto struct {
	// contains filtered or unexported fields
}

func (*UnmarshalInto) UnmarshalJSON

func (u *UnmarshalInto) UnmarshalJSON(d []byte) error

type Version

type Version string

Version holds a server version string. The string has the format "major.minor.sub". Major and minor will be numeric, and sub may contain a number or a textual version.

func (Version) CompareTo

func (v Version) CompareTo(other Version) int

CompareTo returns an integer comparing two version. The result will be 0 if v==other, -1 if v < other, and +1 if v > other. If major & minor parts are equal and sub part is not a number, the sub part will be compared using lexicographical string comparison.

func (Version) Major

func (v Version) Major() int

Major returns the major part of the version E.g. "3.1.7" -> 3

func (Version) Minor

func (v Version) Minor() int

Minor returns the minor part of the version. E.g. "3.1.7" -> 1

func (Version) Sub

func (v Version) Sub() string

Sub returns the sub part of the version. E.g. "3.1.7" -> "7"

func (Version) SubInt

func (v Version) SubInt() (int, bool)

SubInt returns the sub part of the version as integer. The bool return value indicates if the sub part is indeed a number. E.g. "3.1.7" -> 7, true E.g. "3.1.foo" -> 0, false

type VersionInfo

type VersionInfo struct {
	// This will always contain "arango"
	Server string `json:"server,omitempty"`
	//  The server version string. The string has the format "major.minor.sub".
	// Major and minor will be numeric, and sub may contain a number or a textual version.
	Version Version `json:"version,omitempty"`
	// Type of license of the server
	License string `json:"license,omitempty"`
	// Optional additional details. This is returned only if the context is configured using WithDetails.
	Details map[string]interface{} `json:"details,omitempty"`
}

VersionInfo describes the version of a database server.

func (*VersionInfo) IsEnterprise

func (v *VersionInfo) IsEnterprise() bool

func (VersionInfo) String

func (v VersionInfo) String() string

String creates a string representation of the given VersionInfo.

type ZKDFieldType

type ZKDFieldType string
const ZKDDoubleFieldType ZKDFieldType = "double"

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL