Documentation
¶
Index ¶
- Constants
- func CreateDocuments(ctx context.Context, col Collection, docCount int, generator func(i int) any) error
- func RequiredFieldError(field string) error
- type AbortTransactionOptions
- type Analyzer
- type AnalyzerDefinition
- type AnalyzersResponseReader
- type ArangoSearchAliasIndex
- type ArangoSearchAliasOperation
- type ArangoSearchAliasUpdateOpts
- type ArangoSearchAliasViewProperties
- type ArangoSearchAnalyzerAQLReturnType
- type ArangoSearchAnalyzerGeoJSONType
- type ArangoSearchAnalyzerGeoOptions
- type ArangoSearchAnalyzerPipeline
- type ArangoSearchAnalyzerProperties
- type ArangoSearchAnalyzerType
- type ArangoSearchBreakType
- type ArangoSearchCaseType
- type ArangoSearchConsolidationPolicy
- type ArangoSearchConsolidationPolicyBytesAccum
- type ArangoSearchConsolidationPolicyTier
- type ArangoSearchConsolidationPolicyType
- type ArangoSearchEdgeNGram
- type ArangoSearchElementProperties
- type ArangoSearchFeature
- type ArangoSearchFields
- type ArangoSearchFormat
- type ArangoSearchLinks
- type ArangoSearchNGramStreamType
- type ArangoSearchPrimarySortEntry
- type ArangoSearchStoreValues
- type ArangoSearchView
- type ArangoSearchViewAlias
- type ArangoSearchViewProperties
- type AsyncJobDeleteOptions
- type AsyncJobDeleteType
- type AsyncJobListOptions
- type AsyncJobStatusType
- type BackupCreateOptions
- type BackupListOptions
- type BackupMeta
- type BackupMetaSha256
- type BackupResponse
- type BackupRestoreResponse
- type BackupTransferProgressResponse
- type BackupTransferReport
- type BackupTransferStatus
- type BeginTransactionOptions
- type CacheRespObject
- type Client
- type ClientAdmin
- type ClientAdminBackup
- type ClientAdminCluster
- type ClientAdminLicense
- type ClientAdminLog
- type ClientAsyncJob
- type ClientDatabase
- type ClientFoxx
- type ClientFoxxService
- type ClientServerInfo
- type ClientTasks
- type ClientUsers
- type ClusterHealth
- type Collection
- type CollectionChecksum
- type CollectionDocumentCreate
- type CollectionDocumentCreateOptions
- type CollectionDocumentCreateOverwriteMode
- type CollectionDocumentCreateResponse
- type CollectionDocumentCreateResponseReader
- type CollectionDocumentDelete
- type CollectionDocumentDeleteOptions
- type CollectionDocumentDeleteResponse
- type CollectionDocumentDeleteResponseReader
- type CollectionDocumentImport
- type CollectionDocumentImportDocumentType
- type CollectionDocumentImportOnDuplicate
- type CollectionDocumentImportOptions
- type CollectionDocumentImportRequest
- type CollectionDocumentImportResponse
- type CollectionDocumentImportStatistics
- type CollectionDocumentRead
- type CollectionDocumentReadOptions
- type CollectionDocumentReadResponse
- type CollectionDocumentReadResponseReader
- type CollectionDocumentReplace
- type CollectionDocumentReplaceOptions
- type CollectionDocumentReplaceResponse
- type CollectionDocumentReplaceResponseReader
- type CollectionDocumentUpdate
- type CollectionDocumentUpdateOptions
- type CollectionDocumentUpdateResponse
- type CollectionDocumentUpdateResponseReader
- type CollectionDocuments
- type CollectionExtendedInfo
- type CollectionFigures
- type CollectionIndexes
- type CollectionInfo
- type CollectionKeyOptions
- type CollectionProperties
- type CollectionSchemaLevel
- type CollectionSchemaOptions
- type CollectionShards
- type CollectionStatistics
- type CollectionStatus
- type CollectionType
- type CommitTransactionOptions
- type CommonFoxxServiceFields
- type ComputeOn
- type ComputedValue
- type ConsolidationPolicy
- type ConsolidationPolicyBytesAccum
- type ConsolidationPolicyTier
- type ConsolidationPolicyType
- type Contributor
- type CreateCollectionOptions
- type CreateCollectionPropertiesV2
- type CreateDatabaseDefaultOptions
- type CreateDatabaseOptions
- type CreateDatabaseUserOptions
- type CreateEdgeDefinitionOptions
- type CreateEdgeDefinitionResponse
- type CreateEdgeOptions
- type CreateGeoIndexOptions
- type CreateGraphOptions
- type CreateMDIIndexOptions
- type CreateMDIPrefixedIndexOptions
- type CreatePersistentIndexOptions
- type CreateTTLIndexOptions
- type CreateVertexCollectionOptions
- type CreateVertexCollectionResponse
- type CreateVertexOptions
- type Cursor
- type CursorBatch
- type CursorPlan
- type CursorPlanCollection
- type CursorPlanNodes
- type CursorPlanVariable
- type CursorStats
- type Database
- type DatabaseAnalyzer
- type DatabaseCollection
- type DatabaseGraph
- type DatabaseInfo
- type DatabaseInventory
- type DatabasePermissions
- type DatabaseQuery
- type DatabaseReplicationVersion
- type DatabaseSharding
- type DatabaseTransaction
- type DatabaseView
- type DeleteEdgeDefinitionOptions
- type DeleteEdgeDefinitionResponse
- type DeleteEdgeOptions
- type DeleteVertexCollectionOptions
- type DeleteVertexCollectionResponse
- type DeleteVertexOptions
- type DeployFoxxServiceRequest
- type DocumentID
- type DocumentMeta
- type DocumentMetaSlice
- type DocumentMetaWithOldRev
- type Edge
- type EdgeCreateResponse
- type EdgeDefinition
- type EdgeDeleteResponse
- type EdgeDetails
- type EdgeDirection
- type EdgeReplaceOptions
- type EdgeReplaceResponse
- type EdgeUpdateOptions
- type EdgeUpdateResponse
- type EngineInfo
- type EngineType
- type Engines
- type ExplainQueryOptimizerOptions
- type ExplainQueryOptions
- type ExplainQueryResult
- type ExplainQueryResultExecutionCollection
- type ExplainQueryResultExecutionNodeRaw
- type ExplainQueryResultExecutionStats
- type ExplainQueryResultExecutionVariable
- type ExplainQueryResultPlan
- type Flags
- type FoxxDeleteOptions
- type FoxxDeploymentOptions
- type FoxxServiceListItem
- type FoxxServiceObject
- type FoxxTestOptions
- type GetCollectionOptions
- type GetDatabaseOptions
- type GetEdgeOptions
- type GetEdgesOptions
- type GetGraphOptions
- type GetVersionOptions
- type GetVertexOptions
- type Grant
- type Graph
- type GraphCollection
- type GraphDefinition
- type GraphEdgesDefinition
- type GraphVertexCollections
- type GraphsResponseReader
- type IndexOptions
- type IndexResponse
- type IndexSharedOptions
- type IndexType
- type InventoryCollection
- type InventoryCollectionParameters
- type InventoryIndex
- type InventoryView
- type InvertedIndexField
- type InvertedIndexNestedField
- type InvertedIndexOptions
- type KeyGeneratorType
- type KeyGeneratorsResponse
- type License
- type LicenseFeatures
- type LicenseStatus
- type ListBackupsResponse
- type LogLevels
- type LogLevelsGetOptions
- type LogLevelsSetOptions
- type MDIFieldType
- type Manifest
- type NumberOfServersResponse
- type OptimizerRules
- type PrimarySort
- type PrimarySortCompression
- type PrimarySortEntry
- type QueryCacheEntriesRespObject
- type QueryCacheProperties
- type QueryOptions
- type QueryPlanCacheRespObject
- type QueryProperties
- type QueryRequest
- type QuerySubOptions
- type QuerySubOptionsOptimizer
- type RemoveCollectionOptions
- type RemoveGraphOptions
- type RemoveViewOptions
- type RenameCollectionRequest
- type ReplaceEdgeDefinitionResponse
- type ReplaceEdgeOptions
- type ReplicationFactor
- type Repository
- type Requests
- type ResponsibleShardRequest
- type RunningAQLQuery
- type ServerHealth
- type ServerID
- type ServerMode
- type ServerRole
- type ServerStatus
- type ServerSyncStatus
- type SetCollectionPropertiesOptionsV2
- type ShardID
- type ShardingStrategy
- type SortDirection
- type StoredValue
- type SwaggerInfo
- type SwaggerResponse
- type Task
- type TaskOptions
- type Transaction
- type TransactionCollections
- type TransactionID
- type TransactionJSOptions
- type TransactionStatus
- type TransactionStatusRecord
- type TransactionStatuses
- type TransactionWrap
- type TransferMonitor
- type TransferType
- type UninstallFoxxServiceRequest
- type Unmarshal
- type UnmarshalData
- type UnmarshalInto
- type Unmarshaler
- type User
- type UserDefinedFunctionObject
- type UserOptions
- type UserPermissions
- type Version
- type VersionInfo
- type VertexCollection
- type VertexCreateResponse
- type VertexDeleteResponse
- type VertexReplaceOptions
- type VertexReplaceResponse
- type VertexUpdateOptions
- type VertexUpdateResponse
- type View
- type ViewBase
- type ViewType
- type ViewsResponseReader
Constants ¶
const ( QueryFromPrefix = "fromPrefix" QueryToPrefix = "toPrefix" QueryComplete = "complete" QueryOnDuplicate = "onDuplicate" )
const ( // PrimaryIndexType is automatically created for each collection. It indexes the documents’ primary keys, // which are stored in the _key system attribute. The primary index is unique and can be used for queries on both the _key and _id attributes. // There is no way to explicitly create or delete primary indexes. PrimaryIndexType = IndexType("primary") // EdgeIndexType is automatically created for edge collections. It contains connections between vertex documents // and is invoked when the connecting edges of a vertex are queried. There is no way to explicitly create or delete edge indexes. // The edge index is non-unique. EdgeIndexType = IndexType("edge") // PersistentIndexType is a sorted index that can be used for finding individual documents or ranges of documents. PersistentIndexType = IndexType("persistent") // GeoIndexType index can accelerate queries that filter and sort by the distance between stored coordinates and coordinates provided in a query. GeoIndexType = IndexType("geo") // TTLIndexType index can be used for automatically removing expired documents from a collection. // Documents which are expired are eventually removed by a background thread. TTLIndexType = IndexType("ttl") // MDIIndexType is multidimensional index for indexing two- or higher dimensional data such as time ranges, // for efficient intersection of multiple range queries. // Available in ArangoDB 3.12 and later. MDIIndexType = IndexType("mdi") // MDIPrefixedIndexType is an additional `mdi` index variant that lets you specify additional attributes // for the index to narrow down the search space using equality checks. // Available in ArangoDB 3.12 and later. MDIPrefixedIndexType = IndexType("mdi-prefixed") // InvertedIndexType can be used to speed up a broad range of AQL queries, from simple to complex, including full-text search InvertedIndexType = IndexType("inverted") )
const ( CollectionStatusNewBorn = CollectionStatus(1) CollectionStatusUnloaded = CollectionStatus(2) CollectionStatusLoaded = CollectionStatus(3) CollectionStatusUnloading = CollectionStatus(4) CollectionStatusDeleted = CollectionStatus(5) CollectionStatusLoading = CollectionStatus(6) )
const ( // CollectionTypeDocument specifies a document collection CollectionTypeDocument = CollectionType(2) // CollectionTypeEdge specifies an edges collection CollectionTypeEdge = CollectionType(3) )
const ( KeyGeneratorTraditional = KeyGeneratorType("traditional") KeyGeneratorAutoIncrement = KeyGeneratorType("autoincrement") )
const ( EngineTypeMMFiles = EngineType("mmfiles") EngineTypeRocksDB = EngineType("rocksdb") )
const ( HeaderDirtyReads = "x-arango-allow-dirty-read" HeaderTransaction = "x-arango-trx-id" HeaderIfMatch = "If-Match" HeaderIfNoneMatch = "If-None-Match" QueryRev = "rev" QueryIgnoreRevs = "ignoreRevs" QueryWaitForSync = "waitForSync" QueryReturnNew = "returnNew" QueryReturnOld = "returnOld" QueryKeepNull = "keepNull" QueryDirection = "direction" QuerySilent = "silent" QueryRefillIndexCaches = "refillIndexCaches" QueryMergeObjects = "mergeObjects" QueryOverwrite = "overwrite" QueryOverwriteMode = "overwriteMode" QueryVersionAttribute = "versionAttribute" QueryIsRestore = "isRestore" QueryCollection = "collection" QueryType = "type" )
const ( // ViewTypeArangoSearch specifies an ArangoSearch view type. ViewTypeArangoSearch = ViewType("arangosearch") // ViewTypeSearchAlias specifies an ArangoSearch view type alias. ViewTypeSearchAlias = ViewType("search-alias") )
const ( // SatelliteGraph is a special replication factor for satellite graphs. // Use this replication factor to create a satellite graph. SatelliteGraph = -100 )
Variables ¶
This section is empty.
Functions ¶
func CreateDocuments ¶
func CreateDocuments(ctx context.Context, col Collection, docCount int, generator func(i int) any) error
CreateDocuments creates given number of documents for the provided collection.
func RequiredFieldError ¶ added in v2.1.5
Types ¶
type AbortTransactionOptions ¶
type AbortTransactionOptions struct{}
AbortTransactionOptions provides options for CommitTransaction. Currently unused
type Analyzer ¶ added in v2.0.3
type Analyzer interface { Name() string Database() Database // Type returns the analyzer type Type() ArangoSearchAnalyzerType // UniqueName returns the unique name: <database>::<analyzer-name> UniqueName() string // Definition returns the analyzer definition Definition() AnalyzerDefinition // Remove the analyzer Remove(ctx context.Context, force bool) error }
type AnalyzerDefinition ¶ added in v2.0.3
type AnalyzerDefinition struct { // The Analyzer name. Name string `json:"name,omitempty"` // The Analyzer type. Type ArangoSearchAnalyzerType `json:"type,omitempty"` // The properties used to configure the specified Analyzer type. Properties ArangoSearchAnalyzerProperties `json:"properties,omitempty"` // The set of features to set on the Analyzer generated fields. // The default value is an empty array. Features []ArangoSearchFeature `json:"features,omitempty"` }
type AnalyzersResponseReader ¶ added in v2.0.3
type ArangoSearchAliasIndex ¶ added in v2.0.3
type ArangoSearchAliasOperation ¶ added in v2.1.0
type ArangoSearchAliasOperation string
const ( // ArangoSearchAliasOperationAdd adds the index to the stored indexes property of the View. ArangoSearchAliasOperationAdd ArangoSearchAliasOperation = "add" // ArangoSearchAliasOperationDel removes the index from the stored indexes property of the View. ArangoSearchAliasOperationDel ArangoSearchAliasOperation = "del" )
type ArangoSearchAliasUpdateOpts ¶ added in v2.1.0
type ArangoSearchAliasUpdateOpts struct { // Indexes A list of inverted indexes to add to the View. Indexes []ArangoSearchAliasIndex `json:"indexes,omitempty"` // Operation Whether to add or remove the index to the stored indexes property of the View. // Possible values: "add", "del". // The default is "add". Operation ArangoSearchAliasOperation `json:"operation,omitempty"` }
type ArangoSearchAliasViewProperties ¶ added in v2.0.3
type ArangoSearchAliasViewProperties struct { // ViewBase field is available only in read operations ViewBase // Indexes A list of inverted indexes to add to the View. Indexes []ArangoSearchAliasIndex `json:"indexes,omitempty"` }
type ArangoSearchAnalyzerAQLReturnType ¶ added in v2.0.3
type ArangoSearchAnalyzerAQLReturnType string
const ( ArangoSearchAnalyzerAQLReturnTypeString ArangoSearchAnalyzerAQLReturnType = "string" ArangoSearchAnalyzerAQLReturnTypeNumber ArangoSearchAnalyzerAQLReturnType = "number" ArangoSearchAnalyzerAQLReturnTypeBool ArangoSearchAnalyzerAQLReturnType = "bool" )
func (ArangoSearchAnalyzerAQLReturnType) New ¶ added in v2.0.3
func (a ArangoSearchAnalyzerAQLReturnType) New() *ArangoSearchAnalyzerAQLReturnType
New returns pointer to selected return type
type ArangoSearchAnalyzerGeoJSONType ¶ added in v2.0.3
type ArangoSearchAnalyzerGeoJSONType string
ArangoSearchAnalyzerGeoJSONType GeoJSON Type parameter.
const ( // ArangoSearchAnalyzerGeoJSONTypeShape define index all GeoJSON geometry types (Point, Polygon etc.). (default) ArangoSearchAnalyzerGeoJSONTypeShape ArangoSearchAnalyzerGeoJSONType = "shape" // ArangoSearchAnalyzerGeoJSONTypeCentroid define compute and only index the centroid of the input geometry. ArangoSearchAnalyzerGeoJSONTypeCentroid ArangoSearchAnalyzerGeoJSONType = "centroid" // ArangoSearchAnalyzerGeoJSONTypePoint define only index GeoJSON objects of type Point, ignore all other geometry types. ArangoSearchAnalyzerGeoJSONTypePoint ArangoSearchAnalyzerGeoJSONType = "point" )
func (ArangoSearchAnalyzerGeoJSONType) New ¶ added in v2.0.3
func (a ArangoSearchAnalyzerGeoJSONType) New() *ArangoSearchAnalyzerGeoJSONType
New returns pointer to selected return type
type ArangoSearchAnalyzerGeoOptions ¶ added in v2.0.3
type ArangoSearchAnalyzerGeoOptions struct { // MaxCells define maximum number of S2 cells. MaxCells *int `json:"maxCells,omitempty"` // MinLevel define the least precise S2 level. MinLevel *int `json:"minLevel,omitempty"` // MaxLevel define the most precise S2 level MaxLevel *int `json:"maxLevel,omitempty"` }
ArangoSearchAnalyzerGeoOptions for fine-tuning geo queries. These options should generally remain unchanged.
type ArangoSearchAnalyzerPipeline ¶ added in v2.0.3
type ArangoSearchAnalyzerPipeline struct { // Type of the Pipeline Analyzer Type ArangoSearchAnalyzerType `json:"type"` // Properties of the Pipeline Analyzer Properties ArangoSearchAnalyzerProperties `json:"properties,omitempty"` }
ArangoSearchAnalyzerPipeline provides object definition for Pipeline array parameter
type ArangoSearchAnalyzerProperties ¶ added in v2.0.3
type ArangoSearchAnalyzerProperties struct { IsSystem bool `json:"isSystem,omitempty"` // Locale used by ArangoSearchAnalyzerTypeStem, ArangoSearchAnalyzerTypeNorm, Text Locale string `json:"locale,omitempty"` // Delimiter used by ArangoSearchAnalyzerTypeDelimiter Delimiter string `json:"delimiter,omitempty"` // Delimiters used by ArangoSearchAnalyzerTypeMultiDelimiter Delimiters []string `json:"delimiters,omitempty"` // Accent used by ArangoSearchAnalyzerTypeNorm, ArangoSearchAnalyzerTypeText Accent *bool `json:"accent,omitempty"` // Case used by ArangoSearchAnalyzerTypeNorm, ArangoSearchAnalyzerTypeText, ArangoSearchAnalyzerTypeSegmentation Case ArangoSearchCaseType `json:"case,omitempty"` // EdgeNGram used by ArangoSearchAnalyzerTypeText EdgeNGram *ArangoSearchEdgeNGram `json:"edgeNgram,omitempty"` // Min used by ArangoSearchAnalyzerTypeNGram Min *int64 `json:"min,omitempty"` Max *int64 `json:"max,omitempty"` // PreserveOriginal used by ArangoSearchAnalyzerTypeNGram PreserveOriginal *bool `json:"preserveOriginal,omitempty"` // StartMarker used by ArangoSearchAnalyzerTypeNGram StartMarker *string `json:"startMarker,omitempty"` // EndMarker used by ArangoSearchAnalyzerTypeNGram EndMarker *string `json:"endMarker,omitempty"` // StreamType used by ArangoSearchAnalyzerTypeNGram StreamType *ArangoSearchNGramStreamType `json:"streamType,omitempty"` // Stemming used by ArangoSearchAnalyzerTypeText Stemming *bool `json:"stemming,omitempty"` // Stopwords used by ArangoSearchAnalyzerTypeText and ArangoSearchAnalyzerTypeStopwords. // This field is not mandatory since version 3.7 of arangod so it can not be omitted in 3.6. Stopwords []string `json:"stopwords"` // StopwordsPath used by ArangoSearchAnalyzerTypeText StopwordsPath []string `json:"stopwordsPath,omitempty"` // QueryString used by ArangoSearchAnalyzerTypeAQL. QueryString string `json:"queryString,omitempty"` // CollapsePositions used by ArangoSearchAnalyzerTypeAQL. CollapsePositions *bool `json:"collapsePositions,omitempty"` // KeepNull used by ArangoSearchAnalyzerTypeAQL. KeepNull *bool `json:"keepNull,omitempty"` // BatchSize used by ArangoSearchAnalyzerTypeAQL. BatchSize *int `json:"batchSize,omitempty"` // MemoryLimit used by ArangoSearchAnalyzerTypeAQL. MemoryLimit *int `json:"memoryLimit,omitempty"` // ReturnType used by ArangoSearchAnalyzerTypeAQL. ReturnType *ArangoSearchAnalyzerAQLReturnType `json:"returnType,omitempty"` // Pipeline used by ArangoSearchAnalyzerTypePipeline. Pipeline []ArangoSearchAnalyzerPipeline `json:"pipeline,omitempty"` // Type used by ArangoSearchAnalyzerTypeGeoJSON. Type *ArangoSearchAnalyzerGeoJSONType `json:"type,omitempty"` // Options used by ArangoSearchAnalyzerTypeGeoJSON and ArangoSearchAnalyzerTypeGeoPoint Options *ArangoSearchAnalyzerGeoOptions `json:"options,omitempty"` // Latitude used by ArangoSearchAnalyzerTypeGeoPoint. Latitude []string `json:"latitude,omitempty"` // Longitude used by ArangoSearchAnalyzerTypeGeoPoint. Longitude []string `json:"longitude,omitempty"` // Break used by ArangoSearchAnalyzerTypeSegmentation Break ArangoSearchBreakType `json:"break,omitempty"` // Hex used by ArangoSearchAnalyzerTypeStopwords. // If false then each string in stopwords is used verbatim. // If true, then each string in stopwords needs to be hex-encoded. Hex *bool `json:"hex,omitempty"` // ModelLocation used by ArangoSearchAnalyzerTypeClassification, ArangoSearchAnalyzerTypeNearestNeighbors // The on-disk path to the trained fastText supervised model. // Note: if you are running this in an ArangoDB cluster, this model must exist on every machine in the cluster. ModelLocation string `json:"model_location,omitempty"` // TopK used by ArangoSearchAnalyzerTypeClassification, ArangoSearchAnalyzerTypeNearestNeighbors // The number of class labels that will be produced per input (default: 1) TopK *uint64 `json:"top_k,omitempty"` // Threshold used by ArangoSearchAnalyzerTypeClassification // The probability threshold for which a label will be assigned to an input. // A fastText model produces a probability per class label, and this is what will be filtered (default: 0.99). Threshold *float64 `json:"threshold,omitempty"` // Analyzer used by ArangoSearchAnalyzerTypeMinhash // Definition of inner analyzer to use for incoming data. In case if omitted field or empty object falls back to 'identity' analyzer. Analyzer *AnalyzerDefinition `json:"analyzer,omitempty"` // NumHashes used by ArangoSearchAnalyzerTypeMinhash // Size of min hash signature. Must be greater or equal to 1. NumHashes *uint64 `json:"numHashes,omitempty"` // Format is the internal binary representation to use for storing the geo-spatial data in an index. Format *ArangoSearchFormat `json:"format,omitempty"` // NGramSize used by ArangoSearchAnalyzerTypeWildcard // It is an unsigned integer for the n-gram length, needs to be at least 2. // It can be greater than the substrings between wildcards that you want to search for, e.g. 4 with an expected // search pattern of %up%if%ref% (substrings of length 2 and 3 between %), but this leads to a slower search // (for ref% with post-validation using the ICU regular expression engine). // A value of 3 is a good default, 2 is better for short strings NGramSize uint `json:"ngramSize"` }
ArangoSearchAnalyzerProperties specifies options for the analyzer. Required and respected depend on the analyzer type. See docs: https://docs.arangodb.com/stable/index-and-search/analyzers/#analyzer-properties
type ArangoSearchAnalyzerType ¶ added in v2.0.3
type ArangoSearchAnalyzerType string
ArangoSearchAnalyzerType specifies type of analyzer
const ( // ArangoSearchAnalyzerTypeIdentity treat value as atom (no transformation) ArangoSearchAnalyzerTypeIdentity ArangoSearchAnalyzerType = "identity" // ArangoSearchAnalyzerTypeDelimiter split into tokens at user-defined character ArangoSearchAnalyzerTypeDelimiter ArangoSearchAnalyzerType = "delimiter" // ArangoSearchAnalyzerTypeMultiDelimiter split into tokens at user-defined character // // Available in ArangoDB 3.12 and later. ArangoSearchAnalyzerTypeMultiDelimiter ArangoSearchAnalyzerType = "multi_delimiter" // ArangoSearchAnalyzerTypeStem apply stemming to the value as a whole ArangoSearchAnalyzerTypeStem ArangoSearchAnalyzerType = "stem" // ArangoSearchAnalyzerTypeNorm apply normalization to the value as a whole ArangoSearchAnalyzerTypeNorm ArangoSearchAnalyzerType = "norm" // ArangoSearchAnalyzerTypeNGram create n-grams from value with user-defined lengths ArangoSearchAnalyzerTypeNGram ArangoSearchAnalyzerType = "ngram" // ArangoSearchAnalyzerTypeText tokenize into words, optionally with stemming, normalization and stop-word filtering ArangoSearchAnalyzerTypeText ArangoSearchAnalyzerType = "text" // ArangoSearchAnalyzerTypeAQL an Analyzer capable of running a restricted AQL query to perform data manipulation / filtering. ArangoSearchAnalyzerTypeAQL ArangoSearchAnalyzerType = "aql" // ArangoSearchAnalyzerTypePipeline an Analyzer capable of chaining effects of multiple Analyzers into one. The pipeline is a list of Analyzers, where the output of an Analyzer is passed to the next for further processing. The final token value is determined by last Analyzer in the pipeline. ArangoSearchAnalyzerTypePipeline ArangoSearchAnalyzerType = "pipeline" // ArangoSearchAnalyzerTypeStopwords an Analyzer capable of removing specified tokens from the input. ArangoSearchAnalyzerTypeStopwords ArangoSearchAnalyzerType = "stopwords" // ArangoSearchAnalyzerTypeGeoJSON an Analyzer capable of breaking up a GeoJSON object into a set of indexable tokens for further usage with ArangoSearch Geo functions. ArangoSearchAnalyzerTypeGeoJSON ArangoSearchAnalyzerType = "geojson" // ArangoSearchAnalyzerTypeGeoS2 an Analyzer capable of index GeoJSON data with inverted indexes or Views similar // to the existing `geojson` Analyzer, but it internally uses a format for storing the geo-spatial data. // that is more efficient. ArangoSearchAnalyzerTypeGeoS2 ArangoSearchAnalyzerType = "geo_s2" // ArangoSearchAnalyzerTypeGeoPoint an Analyzer capable of breaking up JSON object describing a coordinate into a set of indexable tokens for further usage with ArangoSearch Geo functions. ArangoSearchAnalyzerTypeGeoPoint ArangoSearchAnalyzerType = "geopoint" // ArangoSearchAnalyzerTypeSegmentation an Analyzer capable of breaking up the input text into tokens in a language-agnostic manner ArangoSearchAnalyzerTypeSegmentation ArangoSearchAnalyzerType = "segmentation" // ArangoSearchAnalyzerTypeCollation an Analyzer capable of converting the input into a set of language-specific tokens ArangoSearchAnalyzerTypeCollation ArangoSearchAnalyzerType = "collation" // ArangoSearchAnalyzerTypeClassification An Analyzer capable of classifying tokens in the input text. (EE only) ArangoSearchAnalyzerTypeClassification ArangoSearchAnalyzerType = "classification" // ArangoSearchAnalyzerTypeNearestNeighbors An Analyzer capable of finding nearest neighbors of tokens in the input. (EE only) ArangoSearchAnalyzerTypeNearestNeighbors ArangoSearchAnalyzerType = "nearest_neighbors" // ArangoSearchAnalyzerTypeMinhash an analyzer which is capable of evaluating so called MinHash signatures as a stream of tokens. (EE only) ArangoSearchAnalyzerTypeMinhash ArangoSearchAnalyzerType = "minhash" // ArangoSearchAnalyzerTypeWildcard An Analyzer that creates n-grams to enable fast partial matching for wildcard // queries if you have large string values, especially if you want to search for suffixes or substrings in the // middle of strings (infixes) as opposed to prefixes. // // Available in ArangoDB 3.12 and later. ArangoSearchAnalyzerTypeWildcard ArangoSearchAnalyzerType = "wildcard" )
type ArangoSearchBreakType ¶ added in v2.0.3
type ArangoSearchBreakType string
const ( // ArangoSearchBreakTypeAll to return all tokens ArangoSearchBreakTypeAll ArangoSearchBreakType = "all" // ArangoSearchBreakTypeAlpha to return tokens composed of alphanumeric characters only (default) ArangoSearchBreakTypeAlpha ArangoSearchBreakType = "alpha" // ArangoSearchBreakTypeGraphic to return tokens composed of non-whitespace characters only ArangoSearchBreakTypeGraphic ArangoSearchBreakType = "graphic" )
type ArangoSearchCaseType ¶ added in v2.0.3
type ArangoSearchCaseType string
const ( // ArangoSearchCaseUpper to convert to all lower-case characters ArangoSearchCaseUpper ArangoSearchCaseType = "upper" // ArangoSearchCaseLower to convert to all upper-case characters ArangoSearchCaseLower ArangoSearchCaseType = "lower" // ArangoSearchCaseNone to not change character case (default) ArangoSearchCaseNone ArangoSearchCaseType = "none" )
type ArangoSearchConsolidationPolicy ¶ added in v2.0.3
type ArangoSearchConsolidationPolicy struct { // Type returns the type of the ConsolidationPolicy. This interface can then be casted to the corresponding ArangoSearchConsolidationPolicy* struct. Type ArangoSearchConsolidationPolicyType `json:"type,omitempty"` ArangoSearchConsolidationPolicyBytesAccum ArangoSearchConsolidationPolicyTier }
ArangoSearchConsolidationPolicy holds threshold values specifying when to consolidate view data. Semantics of the values depend on where they are used.
type ArangoSearchConsolidationPolicyBytesAccum ¶ added in v2.0.3
type ArangoSearchConsolidationPolicyBytesAccum struct { // Threshold, see ArangoSearchConsolidationTypeBytesAccum Threshold *float64 `json:"threshold,omitempty"` }
ArangoSearchConsolidationPolicyBytesAccum contains fields used for ArangoSearchConsolidationPolicyTypeBytesAccum
type ArangoSearchConsolidationPolicyTier ¶ added in v2.0.3
type ArangoSearchConsolidationPolicyTier struct { MinScore *int64 `json:"minScore,omitempty"` // MinSegments specifies the minimum number of segments that will be evaluated as candidates for consolidation. MinSegments *int64 `json:"segmentsMin,omitempty"` // MaxSegments specifies the maximum number of segments that will be evaluated as candidates for consolidation. MaxSegments *int64 `json:"segmentsMax,omitempty"` // SegmentsBytesMax specifies the maxinum allowed size of all consolidated segments in bytes. SegmentsBytesMax *int64 `json:"segmentsBytesMax,omitempty"` // SegmentsBytesFloor defines the value (in bytes) to treat all smaller segments as equal for consolidation selection. SegmentsBytesFloor *int64 `json:"segmentsBytesFloor,omitempty"` }
ArangoSearchConsolidationPolicyTier contains fields used for ArangoSearchConsolidationPolicyTypeTier
type ArangoSearchConsolidationPolicyType ¶ added in v2.0.3
type ArangoSearchConsolidationPolicyType string
ArangoSearchConsolidationPolicyType strings for consolidation types
const ( // ArangoSearchConsolidationPolicyTypeTier consolidate based on segment byte size and live document count as dictated by the customization attributes. ArangoSearchConsolidationPolicyTypeTier ArangoSearchConsolidationPolicyType = "tier" // ArangoSearchConsolidationPolicyTypeBytesAccum consolidate if and only if ({threshold} range [0.0, 1.0]) // {threshold} > (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes, // i.e. the sum of all candidate segment's byte size is less than the total segment byte size multiplied by the {threshold}. ArangoSearchConsolidationPolicyTypeBytesAccum ArangoSearchConsolidationPolicyType = "bytes_accum" )
type ArangoSearchEdgeNGram ¶ added in v2.0.3
type ArangoSearchEdgeNGram struct { // Min used by Text Min *int64 `json:"min,omitempty"` // Max used by Text Max *int64 `json:"max,omitempty"` // PreserveOriginal used by Text PreserveOriginal *bool `json:"preserveOriginal,omitempty"` }
ArangoSearchEdgeNGram specifies options for the edgeNGram text analyzer. More information can be found here: https://docs.arangodb.com/stable/index-and-search/analyzers/#text
type ArangoSearchElementProperties ¶ added in v2.0.3
type ArangoSearchElementProperties struct { AnalyzerDefinitions []AnalyzerDefinition `json:"analyzerDefinitions,omitempty"` // The list of analyzers to be used for indexing of string values. Defaults to ["identify"]. Analyzers []string `json:"analyzers,omitempty"` // If set to true, all fields of this element will be indexed. Defaults to false. IncludeAllFields *bool `json:"includeAllFields,omitempty"` // If set to true, values in a listed are treated as separate values. Defaults to false. TrackListPositions *bool `json:"trackListPositions,omitempty"` // This values specifies how the view should track values. StoreValues ArangoSearchStoreValues `json:"storeValues,omitempty"` // Fields contains the properties for individual fields of the element. // The key of the map are field names. Fields ArangoSearchFields `json:"fields,omitempty"` // If set to true, then no exclusive lock is used on the source collection during View index creation, // so that it remains basically available. inBackground is an option that can be set when adding links. // It does not get persisted as it is not a View property, but only a one-off option InBackground *bool `json:"inBackground,omitempty"` // Nested contains the properties for nested fields (sub-objects) of the element // Enterprise Edition only Nested ArangoSearchFields `json:"nested,omitempty"` // Cache If you enable this option, then field normalization values are always cached in memory. // Introduced in v3.9.5, Enterprise Edition only Cache *bool `json:"cache,omitempty"` }
ArangoSearchElementProperties contains properties that specify how an element is indexed in an ArangoSearch view. Note that this structure is recursive. Settings not specified (nil) at a given level will inherit their setting from a lower level.
type ArangoSearchFeature ¶ added in v2.0.3
type ArangoSearchFeature string
ArangoSearchFeature specifies a feature to an analyzer
const ( // ArangoSearchFeatureFrequency how often a term is seen, required for PHRASE() ArangoSearchFeatureFrequency ArangoSearchFeature = "frequency" // ArangoSearchFeatureNorm the field normalization factor ArangoSearchFeatureNorm ArangoSearchFeature = "norm" // ArangoSearchFeaturePosition sequentially increasing term position, required for PHRASE(). If present then the frequency feature is also required ArangoSearchFeaturePosition ArangoSearchFeature = "position" // ArangoSearchFeatureOffset can be specified if 'position' feature is set ArangoSearchFeatureOffset ArangoSearchFeature = "offset" )
type ArangoSearchFields ¶ added in v2.0.3
type ArangoSearchFields map[string]ArangoSearchElementProperties
ArangoSearchFields is a strongly typed map containing properties per field. The keys in the map are field names.
type ArangoSearchFormat ¶ added in v2.0.3
type ArangoSearchFormat string
const ( // ArangoSearchFormatLatLngDouble stores each latitude and longitude value as an 8-byte floating-point value (16 bytes per coordinate pair). // It is default value. ArangoSearchFormatLatLngDouble ArangoSearchFormat = "latLngDouble" ArangoSearchFormatLatLngInt ArangoSearchFormat = "latLngInt" // ArangoSearchFormatS2Point store each longitude-latitude pair in the native format of Google S2 which is used for geo-spatial // calculations (24 bytes per coordinate pair). ArangoSearchFormatS2Point ArangoSearchFormat = "s2Point" )
type ArangoSearchLinks ¶ added in v2.0.3
type ArangoSearchLinks map[string]ArangoSearchElementProperties
ArangoSearchLinks is a strongly typed map containing links between a collection and a view. The keys in the map are collection names.
type ArangoSearchNGramStreamType ¶ added in v2.0.3
type ArangoSearchNGramStreamType string
const ( // ArangoSearchNGramStreamBinary used by NGram. Default value ArangoSearchNGramStreamBinary ArangoSearchNGramStreamType = "binary" // ArangoSearchNGramStreamUTF8 used by NGram ArangoSearchNGramStreamUTF8 ArangoSearchNGramStreamType = "utf8" )
type ArangoSearchPrimarySortEntry ¶ added in v2.0.3
type ArangoSearchPrimarySortEntry struct { Field string `json:"field,omitempty"` Ascending *bool `json:"asc,omitempty"` }
ArangoSearchPrimarySortEntry describes an entry for the primarySort list
func (ArangoSearchPrimarySortEntry) GetAscending ¶ added in v2.0.3
func (pse ArangoSearchPrimarySortEntry) GetAscending() bool
GetAscending returns the value of Ascending or false if not set
type ArangoSearchStoreValues ¶ added in v2.0.3
type ArangoSearchStoreValues string
ArangoSearchStoreValues is the type of the StoreValues option of an ArangoSearch element.
const ( // ArangoSearchStoreValuesNone specifies that a view should not store values. ArangoSearchStoreValuesNone ArangoSearchStoreValues = "none" // ArangoSearchStoreValuesID specifies that a view should only store // information about value presence, to allow use of the EXISTS() function. ArangoSearchStoreValuesID ArangoSearchStoreValues = "id" )
type ArangoSearchView ¶ added in v2.0.3
type ArangoSearchView interface { // View Includes generic View functions View // Properties fetches extended information about the view. Properties(ctx context.Context) (ArangoSearchViewProperties, error) // SetProperties Changes all properties of a View by replacing them. SetProperties(ctx context.Context, options ArangoSearchViewProperties) error // UpdateProperties Partially changes the properties of a View by updating the specified attributes. UpdateProperties(ctx context.Context, options ArangoSearchViewProperties) error }
ArangoSearchView provides access to the information of a view. Views are only available in ArangoDB 3.4 and higher.
type ArangoSearchViewAlias ¶ added in v2.0.3
type ArangoSearchViewAlias interface { // View Includes generic View functions View // Properties fetches extended information about the view. Properties(ctx context.Context) (ArangoSearchAliasViewProperties, error) // SetProperties Replaces the list of indexes of a search-alias View. SetProperties(ctx context.Context, options ArangoSearchAliasViewProperties) error // UpdateProperties Updates the list of indexes of a search-alias View. UpdateProperties(ctx context.Context, options ArangoSearchAliasUpdateOpts) error }
ArangoSearchViewAlias provides access to the information of a view alias Views aliases are only available in ArangoDB 3.10 and higher.
type ArangoSearchViewProperties ¶ added in v2.0.3
type ArangoSearchViewProperties struct { ViewBase // CleanupIntervalStep Wait at least this many commits between removing unused files in the ArangoSearch data // directory (default: 2, to disable use: 0). For the case where the consolidation policies merge segments // often (i.e. a lot of commit+consolidate), a lower value causes a lot of disk space to be wasted. // For the case where the consolidation policies rarely merge segments (i.e. few inserts/deletes), // a higher value impacts performance without any added benefits. // // Background: With every “commit” or “consolidate” operation, a new state of the View’s internal data structures // is created on disk. Old states/snapshots are released once there are no longer any users remaining. // However, the files for the released states/snapshots are left on disk, and only removed by “cleanup” operation. CleanupIntervalStep *int64 `json:"cleanupIntervalStep,omitempty"` // ConsolidationInterval specifies the minimum number of milliseconds that must be waited // between committing index data changes and making them visible to queries. // Defaults to 60000. // Use 0 to disable. // For the case where there are a lot of inserts/updates, a lower value, // until commit, will cause the index not to account for them and memory usage // would continue to grow. // For the case where there are a few inserts/updates, a higher value will // impact performance and waste disk space for each commit call without // any added benefits. ConsolidationInterval *int64 `json:"consolidationIntervalMsec,omitempty"` // ConsolidationPolicy specifies thresholds for consolidation. ConsolidationPolicy *ArangoSearchConsolidationPolicy `json:"consolidationPolicy,omitempty"` // CommitInterval ArangoSearch waits at least this many milliseconds between committing view data store changes and making documents visible to queries CommitInterval *int64 `json:"commitIntervalMsec,omitempty"` // WriteBufferIdle specifies the maximum number of writers (segments) cached in the pool. // 0 value turns off caching, default value is 64. WriteBufferIdle *int64 `json:"writebufferIdle,omitempty"` // WriteBufferActive specifies the maximum number of concurrent active writers (segments) performs (a transaction). // Other writers (segments) are wait till current active writers (segments) finish. // 0 value turns off this limit and used by default. WriteBufferActive *int64 `json:"writebufferActive,omitempty"` // WriteBufferSizeMax specifies maximum memory byte size per writer (segment) before a writer (segment) flush is triggered. // 0 value turns off this limit fon any writer (buffer) and will be flushed only after a period defined for special thread during ArangoDB server startup. // 0 value should be used with carefully due to high potential memory consumption. WriteBufferSizeMax *int64 `json:"writebufferSizeMax,omitempty"` // Links contains the properties for how individual collections // are indexed in the view. // The key of the map are collection names. Links ArangoSearchLinks `json:"links,omitempty"` // OptimizeTopK is an array of strings defining optimized sort expressions. // Introduced in v3.11.0, Enterprise Edition only. OptimizeTopK []string `json:"optimizeTopK,omitempty"` // PrimarySort describes how individual fields are sorted PrimarySort []ArangoSearchPrimarySortEntry `json:"primarySort,omitempty"` // PrimarySortCompression Defines how to compress the primary sort data (introduced in v3.7.1). // ArangoDB v3.5 and v3.6 always compress the index using LZ4. This option is immutable. PrimarySortCompression PrimarySortCompression `json:"primarySortCompression,omitempty"` // PrimarySortCache If you enable this option, then the primary sort columns are always cached in memory. // Can't be changed after creating View. // Introduced in v3.9.5, Enterprise Edition only PrimarySortCache *bool `json:"primarySortCache,omitempty"` // PrimaryKeyCache If you enable this option, then the primary key columns are always cached in memory. // Introduced in v3.9.6, Enterprise Edition only // Can't be changed after creating View. PrimaryKeyCache *bool `json:"primaryKeyCache,omitempty"` // StoredValues An array of objects to describe which document attributes to store in the View index (introduced in v3.7.1). // It can then cover search queries, which means the data can be taken from the index directly and accessing the storage engine can be avoided. // This option is immutable. StoredValues []StoredValue `json:"storedValues,omitempty"` }
ArangoSearchViewProperties contains properties of view with type 'arangosearch'
type AsyncJobDeleteOptions ¶ added in v2.0.3
type AsyncJobDeleteType ¶ added in v2.0.3
type AsyncJobDeleteType string
const ( DeleteAllJobs AsyncJobDeleteType = "all" DeleteExpiredJobs AsyncJobDeleteType = "expired" DeleteSingleJob AsyncJobDeleteType = "single" )
type AsyncJobListOptions ¶ added in v2.0.3
type AsyncJobListOptions struct { // Count The maximum number of ids to return per call. // If not specified, a server-defined maximum value will be used. Count int `json:"count,omitempty"` }
type AsyncJobStatusType ¶ added in v2.0.3
type AsyncJobStatusType string
const ( JobDone AsyncJobStatusType = "done" JobPending AsyncJobStatusType = "pending" )
type BackupCreateOptions ¶ added in v2.1.0
type BackupCreateOptions struct { // The label for this backup. // The label is used together with a timestamp string create a unique backup identifier, <timestamp>_<label>. // Default: If omitted or empty, a UUID will be generated. Label string `json:"label,omitempty"` // The time in seconds that the operation tries to get a consistent snapshot. The default is 120 seconds. Timeout *uint `json:"timeout,omitempty"` // If set to `true` and no global transaction lock can be acquired within the // given timeout, a possibly inconsistent backup is taken. AllowInconsistent *bool `json:"allowInconsistent,omitempty"` // (Enterprise Edition cluster only.) If set to `true` and no global transaction lock can be acquired within the // given timeout, all running transactions are forcefully aborted to ensure that a consistent backup can be created. Force *bool `json:"force,omitempty"` }
type BackupListOptions ¶ added in v2.1.0
type BackupListOptions struct { // Set to receive info about specific single backup ID string `json:"id,omitempty"` }
type BackupMeta ¶ added in v2.1.0
type BackupMeta struct { BackupResponse Version string `json:"version,omitempty"` Available bool `json:"available,omitempty"` NumberOfPiecesPresent uint `json:"nrPiecesPresent,omitempty"` Keys []BackupMetaSha256 `json:"keys,omitempty"` }
type BackupMetaSha256 ¶ added in v2.1.0
type BackupMetaSha256 struct {
SHA256 string `json:"sha256"`
}
type BackupResponse ¶ added in v2.1.0
type BackupResponse struct { ID string `json:"id,omitempty"` PotentiallyInconsistent bool `json:"potentiallyInconsistent,omitempty"` NumberOfFiles uint `json:"nrFiles,omitempty"` NumberOfDBServers uint `json:"nrDBServers,omitempty"` SizeInBytes uint64 `json:"sizeInBytes,omitempty"` CreationTime time.Time `json:"datetime,omitempty"` }
type BackupRestoreResponse ¶ added in v2.1.0
type BackupRestoreResponse struct {
Previous string `json:"previous,omitempty"`
}
type BackupTransferProgressResponse ¶ added in v2.1.0
type BackupTransferProgressResponse struct { BackupID string `json:"BackupId,omitempty"` Cancelled bool `json:"Cancelled,omitempty"` Timestamp string `json:"Timestamp,omitempty"` DBServers map[string]BackupTransferReport `json:"DBServers,omitempty"` }
type BackupTransferReport ¶ added in v2.1.0
type BackupTransferReport struct { Status BackupTransferStatus `json:"Status,omitempty"` Error int `json:"Error,omitempty"` ErrorMessage string `json:"ErrorMessage,omitempty"` Progress struct { Total int `json:"Total,omitempty"` Done int `json:"Done,omitempty"` Timestamp string `json:"Timestamp,omitempty"` } `json:"Progress,omitempty"` }
type BackupTransferStatus ¶ added in v2.1.0
type BackupTransferStatus string
BackupTransferStatus represents all possible states a transfer job can be in
const ( TransferAcknowledged BackupTransferStatus = "ACK" TransferStarted BackupTransferStatus = "STARTED" TransferCompleted BackupTransferStatus = "COMPLETED" TransferFailed BackupTransferStatus = "FAILED" TransferCancelled BackupTransferStatus = "CANCELLED" )
type BeginTransactionOptions ¶
type BeginTransactionOptions struct { // Set this to true to allow the Coordinator to ask any shard replica for the data, not only the shard leader. // This may result in “dirty reads”. AllowDirtyReads *bool `json:"-"` // Allow reading from undeclared collections. AllowImplicit bool `json:"allowImplicit,omitempty"` // An optional numeric value that can be used to set a timeout in seconds for waiting on collection locks. // This option is only meaningful when using exclusive locks. If not specified, a default value will be used. // Setting lockTimeout to 0 will make ArangoDB not time out waiting for a lock. LockTimeout float64 `json:"lockTimeout,omitempty"` // Transaction size limit in bytes. MaxTransactionSize uint64 `json:"maxTransactionSize,omitempty"` // Whether to disable fast locking for write operations. // // Skipping the fast lock round can be faster overall if there are many concurrent Stream Transactions queued that // all try to lock the same collection exclusively. It avoids deadlocking and retrying which can occur with the fast // locking by guaranteeing a deterministic locking order at the expense of each actual locking operation taking longer. // // Fast locking should not be skipped for read-only Stream Transactions because it degrades performance if there // are no concurrent transactions that use exclusive locks on the same collection. SkipFastLockRound bool `json:"skipFastLockRound,omitempty"` // An optional boolean flag that, if set, will force the transaction to write all data to disk before returning. WaitForSync bool `json:"waitForSync,omitempty"` LockTimeoutDuration time.Duration `json:"-"` }
BeginTransactionOptions provides options for BeginTransaction call
type CacheRespObject ¶ added in v2.1.5
type CacheRespObject struct { // BindVars are the bind variables used in the query. BindVars map[string]interface{} `json:"bindVars,omitempty"` // DataSources is a list of data sources used in the query. DataSources *[]string `json:"dataSources,omitempty"` // Hash is the plan cache key. Hash *string `json:"hash,omitempty"` // Hits is the number of times the cached plan has been utilized so far. Hits *uint32 `json:"hits,omitempty"` // Query is the AQL query string. Query *string `json:"query,omitempty"` }
type Client ¶
type Client interface { // Connection returns current Driver Connection Connection() connection.Connection Requests ClientDatabase ClientUsers ClientServerInfo ClientAdmin ClientAsyncJob ClientFoxx ClientTasks }
func NewClient ¶
func NewClient(connection connection.Connection) Client
type ClientAdmin ¶
type ClientAdmin interface { ClientAdminLog ClientAdminBackup ClientAdminLicense ClientAdminCluster // ServerMode returns the current mode in which the server/cluster is operating. // This call needs ArangoDB 3.3 and up. ServerMode(ctx context.Context) (ServerMode, error) // SetServerMode changes the current mode in which the server/cluster is operating. // This call needs a client that uses JWT authentication. // This call needs ArangoDB 3.3 and up. SetServerMode(ctx context.Context, mode ServerMode) error // CheckAvailability checks if the particular server is available. // Use ClientAdminCluster.Health() to fetch the Endpoint list. // For ActiveFailover, it will return an error (503 code) if the server is not the leader. CheckAvailability(ctx context.Context, serverEndpoint string) error }
type ClientAdminBackup ¶
type ClientAdminBackup interface { // BackupCreate creates a new backup and returns its id BackupCreate(ctx context.Context, opt *BackupCreateOptions) (BackupResponse, error) // BackupRestore restores the backup with given id BackupRestore(ctx context.Context, id string) (BackupRestoreResponse, error) // BackupDelete deletes the backup with given id BackupDelete(ctx context.Context, id string) error // BackupList returns meta data about some/all backups available BackupList(ctx context.Context, opt *BackupListOptions) (ListBackupsResponse, error) // BackupUpload triggers an upload of backup into the remote repository using the given config BackupUpload(ctx context.Context, backupId string, remoteRepository string, config interface{}) (TransferMonitor, error) // BackupDownload triggers a download of backup into the remote repository using the given config BackupDownload(ctx context.Context, backupId string, remoteRepository string, config interface{}) (TransferMonitor, error) TransferMonitor(jobId string, transferType TransferType) (TransferMonitor, error) }
type ClientAdminCluster ¶ added in v2.1.0
type ClientAdminCluster interface { // Health returns the cluster configuration & health. Not available in single server deployments (403 Forbidden). Health(ctx context.Context) (ClusterHealth, error) // DatabaseInventory the inventory of the cluster collections (with entire details) from a specific database. DatabaseInventory(ctx context.Context, dbName string) (DatabaseInventory, error) // MoveShard moves a single shard of the given collection between `fromServer` and `toServer`. MoveShard(ctx context.Context, col Collection, shard ShardID, fromServer, toServer ServerID) (string, error) // CleanOutServer triggers activities to clean out a DBServer. CleanOutServer(ctx context.Context, serverID ServerID) (string, error) // ResignServer triggers activities to let a DBServer resign for all shards. ResignServer(ctx context.Context, serverID ServerID) (string, error) // NumberOfServers returns the number of coordinators & dbServers in a clusters and the ID's of cleanedOut servers. NumberOfServers(ctx context.Context) (NumberOfServersResponse, error) // IsCleanedOut checks if the dbServer with given ID has been cleaned out. IsCleanedOut(ctx context.Context, serverID ServerID) (bool, error) // RemoveServer is a low-level option to remove a server from a cluster. // This function is suitable for servers of type coordinator or dbServer. // The use of `ClientServerAdmin.Shutdown` is highly recommended above this function. RemoveServer(ctx context.Context, serverID ServerID) error }
type ClientAdminLicense ¶ added in v2.0.3
type ClientAdminLicense interface { // GetLicense returns license of an ArangoDB deployment. GetLicense(ctx context.Context) (License, error) // SetLicense Set a new license for an Enterprise Edition instance. // Can be called on single servers, Coordinators, and DB-Servers. SetLicense(ctx context.Context, license string, force bool) error }
type ClientAdminLog ¶
type ClientAdminLog interface { // GetLogLevels returns log levels for topics. GetLogLevels(ctx context.Context, opts *LogLevelsGetOptions) (LogLevels, error) // SetLogLevels sets log levels for a given topics. SetLogLevels(ctx context.Context, logLevels LogLevels, opts *LogLevelsSetOptions) error }
type ClientAsyncJob ¶ added in v2.0.3
type ClientAsyncJob interface { // AsyncJobList Returns the ids of job results with a specific status AsyncJobList(ctx context.Context, jobType AsyncJobStatusType, opts *AsyncJobListOptions) ([]string, error) // AsyncJobStatus Returns the status of a specific job AsyncJobStatus(ctx context.Context, jobID string) (AsyncJobStatusType, error) // AsyncJobCancel Cancels a specific async job AsyncJobCancel(ctx context.Context, jobID string) (bool, error) // AsyncJobDelete Deletes async job result AsyncJobDelete(ctx context.Context, deleteType AsyncJobDeleteType, opts *AsyncJobDeleteOptions) (bool, error) }
ClientAsyncJob https://docs.arangodb.com/stable/develop/http-api/jobs/
type ClientDatabase ¶
type ClientDatabase interface { // GetDatabase opens a connection to an existing database. // If no database with given name exists, an NotFoundError is returned. GetDatabase(ctx context.Context, name string, options *GetDatabaseOptions) (Database, error) // DatabaseExists returns true if a database with given name exists. DatabaseExists(ctx context.Context, name string) (bool, error) // Databases returns a list of all databases found by the client. Databases(ctx context.Context) ([]Database, error) // AccessibleDatabases returns a list of all databases that can be accessed by the authenticated user. AccessibleDatabases(ctx context.Context) ([]Database, error) // CreateDatabase creates a new database with given name and opens a connection to it. // If the a database with given name already exists, a DuplicateError is returned. CreateDatabase(ctx context.Context, name string, options *CreateDatabaseOptions) (Database, error) }
type ClientFoxx ¶ added in v2.1.4
type ClientFoxx interface { ClientFoxxService }
type ClientFoxxService ¶ added in v2.1.4
type ClientFoxxService interface { // InstallFoxxService installs a new service at a given mount path. InstallFoxxService(ctx context.Context, dbName string, zipFile string, options *FoxxDeploymentOptions) error // UninstallFoxxService uninstalls service at a given mount path. UninstallFoxxService(ctx context.Context, dbName string, options *FoxxDeleteOptions) error // ListInstalledFoxxServices retrieves the list of Foxx services installed in the specified database. // If excludeSystem is true, system services (like _admin/aardvark) will be excluded from the result, // returning only custom-installed Foxx services. ListInstalledFoxxServices(ctx context.Context, dbName string, excludeSystem *bool) ([]FoxxServiceListItem, error) // GetInstalledFoxxService retrieves detailed information about a specific Foxx service // installed in the specified database. // The service is identified by its mount path, which must be provided and non-empty. // If the mount path is missing or empty, a RequiredFieldError is returned. // The returned FoxxServiceObject contains the full metadata and configuration details // for the specified service. GetInstalledFoxxService(ctx context.Context, dbName string, mount *string) (FoxxServiceObject, error) // ReplaceFoxxService removes the service at the given mount path from the database and file system // and installs the given new service at the same mount path. ReplaceFoxxService(ctx context.Context, dbName string, zipFile string, opts *FoxxDeploymentOptions) error // UpgradeFoxxService installs the given new service on top of the service currently installed // at the specified mount path, retaining the existing service’s configuration and dependencies. // This should be used only when upgrading to a newer or equivalent version of the same service. UpgradeFoxxService(ctx context.Context, dbName string, zipFile string, opts *FoxxDeploymentOptions) error // GetFoxxServiceConfiguration retrieves the configuration values for the Foxx service // mounted at the specified path in the given database. // The mount parameter must not be nil or empty. // Returns a map containing the current configuration key-value pairs. GetFoxxServiceConfiguration(ctx context.Context, dbName string, mount *string) (map[string]interface{}, error) // UpdateFoxxServiceConfiguration updates the configuration of a specific Foxx service. // If the Foxx service does not allow a particular configuration key, it will appear // in the response warnings. // The caller is responsible for validating allowed keys before calling this method. UpdateFoxxServiceConfiguration(ctx context.Context, dbName string, mount *string, opt map[string]interface{}) (map[string]interface{}, error) // ReplaceFoxxServiceConfiguration replaces the given Foxx service's dependencies entirely. // If the Foxx service does not allow a particular configuration key, it will appear // in the response warnings. // The caller is responsible for validating allowed keys before calling this method. ReplaceFoxxServiceConfiguration(ctx context.Context, dbName string, mount *string, opt map[string]interface{}) (map[string]interface{}, error) // GetFoxxServiceDependencies retrieves the configured dependencies for a specific Foxx service. // Returns: // A map where each key is a dependency name and the value is an object containing: // * title: Human-readable title of the dependency // * mount: Current mount path of the dependency service (if set) // An error if the request fails or the mount is missing. GetFoxxServiceDependencies(ctx context.Context, dbName string, mount *string) (map[string]interface{}, error) // UpdateFoxxServiceDependencies updates the configured dependencies of a specific Foxx service. // If the Foxx service does not allow a particular dependency key, it will appear // in the "warnings" field of the response. // The caller is responsible for ensuring that only allowed dependency keys are provided. UpdateFoxxServiceDependencies(ctx context.Context, dbName string, mount *string, opt map[string]interface{}) (map[string]interface{}, error) // ReplaceFoxxServiceDependencies replaces the given Foxx service's dependencies entirely. // If the Foxx service does not allow a particular dependency key, it will appear // in the "warnings" field of the response. // The caller is responsible for validating allowed keys before calling this method. ReplaceFoxxServiceDependencies(ctx context.Context, dbName string, mount *string, opt map[string]interface{}) (map[string]interface{}, error) // GetFoxxServiceScripts retrieves the scripts associated with a specific Foxx service. GetFoxxServiceScripts(ctx context.Context, dbName string, mount *string) (map[string]interface{}, error) // RunFoxxServiceScript executes a specific script associated with a Foxx service. RunFoxxServiceScript(ctx context.Context, dbName string, name string, mount *string, body map[string]interface{}) (map[string]interface{}, error) // RunFoxxServiceTests executes the test suite of a specific Foxx service // deployed in an ArangoDB database. RunFoxxServiceTests(ctx context.Context, dbName string, opt FoxxTestOptions) (map[string]interface{}, error) // EnableDevelopmentMode enables the development mode for a specific Foxx service. // Development mode causes the Foxx service to be reloaded from the filesystem and its setup // script (if present) to be re-executed every time the service handles a request. EnableDevelopmentMode(ctx context.Context, dbName string, mount *string) (map[string]interface{}, error) // DisableDevelopmentMode disables the development mode for a specific Foxx service. DisableDevelopmentMode(ctx context.Context, dbName string, mount *string) (map[string]interface{}, error) // GetFoxxServiceReadme retrieves the README file for a specific Foxx service. GetFoxxServiceReadme(ctx context.Context, dbName string, mount *string) ([]byte, error) // GetFoxxServiceSwagger retrieves the Swagger specification // for a specific Foxx service mounted in the given database. GetFoxxServiceSwagger(ctx context.Context, dbName string, mount *string) (SwaggerResponse, error) // CommitFoxxService commits the local Foxx service state of the Coordinator // to the database. This can resolve service conflicts between Coordinators. CommitFoxxService(ctx context.Context, dbName string, replace *bool) error // DownloadFoxxServiceBundle downloads a zip bundle of the Foxx service directory // from the specified database and mount point. // Note: The response is the raw zip data (binary). DownloadFoxxServiceBundle(ctx context.Context, dbName string, mount *string) ([]byte, error) }
type ClientServerInfo ¶
type ClientServerInfo interface { // Version returns version information from the connected database server. Version(ctx context.Context) (VersionInfo, error) // VersionWithOptions returns version information from the connected database server. VersionWithOptions(ctx context.Context, opts *GetVersionOptions) (VersionInfo, error) // ServerRole returns the role of the server that answers the request. ServerRole(ctx context.Context) (ServerRole, error) // ServerID Gets the ID of this server in the cluster. // An error is returned when calling this to a server that is not part of a cluster. ServerID(ctx context.Context) (string, error) }
type ClientTasks ¶ added in v2.1.5
type ClientTasks interface { // Task retrieves an existing task by its ID. // If no task with the given ID exists, a NotFoundError is returned. Task(ctx context.Context, databaseName string, id string) (Task, error) // Tasks returns a list of all tasks on the server. Tasks(ctx context.Context, databaseName string) ([]Task, error) // CreateTask creates a new task with the specified options. CreateTask(ctx context.Context, databaseName string, options TaskOptions) (Task, error) // If a task with the given ID already exists, a Conflict error is returned. CreateTaskWithID(ctx context.Context, databaseName string, id string, options TaskOptions) (Task, error) // RemoveTask deletes an existing task by its ID. RemoveTask(ctx context.Context, databaseName string, id string) error }
ClientTasks defines the interface for managing tasks in ArangoDB.
type ClientUsers ¶ added in v2.1.0
type ClientUsers interface { // User opens a connection to an existing user. // If no user with given name exists, an NotFoundError is returned. User(ctx context.Context, name string) (User, error) // UserExists returns true if a user with a given name exists. UserExists(ctx context.Context, name string) (bool, error) // Users return a list of all users found by the client. Users(ctx context.Context) ([]User, error) // CreateUser creates a new user with a given name and opens a connection to it. // If a user with a given name already exists, a Conflict error is returned. CreateUser(ctx context.Context, name string, options *UserOptions) (User, error) // ReplaceUser Replaces the data of an existing user. ReplaceUser(ctx context.Context, name string, options *UserOptions) (User, error) // UpdateUser Partially modifies the data of an existing user UpdateUser(ctx context.Context, name string, options *UserOptions) (User, error) // RemoveUser removes an existing user. RemoveUser(ctx context.Context, name string) error }
type ClusterHealth ¶
type ClusterHealth struct { // Unique identifier of the entire cluster. // This ID is created when the cluster was first created. ID string `json:"ClusterId"` // Health per server Health map[ServerID]ServerHealth `json:"Health"` }
ClusterHealth contains health information for all servers in a cluster.
type Collection ¶
type Collection interface { Name() string Database() Database // Shards fetches shards information of the collection. Shards(ctx context.Context, details bool) (CollectionShards, error) // Remove removes the entire collection. // If the collection does not exist, a NotFoundError is returned. Remove(ctx context.Context) error // RemoveWithOptions removes the entire collection. // If the collection does not exist, a NotFoundError is returned. RemoveWithOptions(ctx context.Context, opts *RemoveCollectionOptions) error // Truncate removes all documents from the collection, but leaves the indexes intact. Truncate(ctx context.Context) error // Properties fetches extended information about the collection. Properties(ctx context.Context) (CollectionProperties, error) // SetProperties allows modifying collection parameters SetPropertiesV2(ctx context.Context, options SetCollectionPropertiesOptionsV2) error // Count fetches the number of document in the collection. Count(ctx context.Context) (int64, error) // Statistics returns the number of documents and additional statistical information about the collection. Statistics(ctx context.Context, details bool) (CollectionFigures, error) // Revision fetches the revision ID of the collection. // The revision ID is a server-generated string that clients can use to check whether data // in a collection has changed since the last revision check. Revision(ctx context.Context) (CollectionProperties, error) // Checksum returns a checksum for the specified collection // withRevisions - Whether to include document revision ids in the checksum calculation. // withData - Whether to include document body data in the checksum calculation. Checksum(ctx context.Context, withRevisions *bool, withData *bool) (CollectionChecksum, error) // ResponsibleShard returns the shard responsible for the given options. ResponsibleShard(ctx context.Context, options map[string]interface{}) (string, error) // LoadIndexesIntoMemory loads all indexes of the collection into memory. LoadIndexesIntoMemory(ctx context.Context) (bool, error) // Renaming collections is not supported in cluster deployments. // Renaming collections is only supported in single server deployments. Rename(ctx context.Context, req RenameCollectionRequest) (CollectionInfo, error) // RecalculateCount recalculates the count of documents in the collection. RecalculateCount(ctx context.Context) (bool, *int64, error) //Compacts the data of a collection in order to reclaim disk space. // This operation is only supported in single server deployments. // In cluster deployments, the compaction is done automatically by the server. Compact(ctx context.Context) (CollectionInfo, error) CollectionDocuments CollectionIndexes }
type CollectionChecksum ¶ added in v2.1.5
type CollectionChecksum struct { CollectionInfo // The collection revision id as a string. Revision string `json:"revision,omitempty"` }
CollectionChecksum contains information about a collection checksum response
type CollectionDocumentCreate ¶
type CollectionDocumentCreate interface { // CreateDocument creates a single document in the collection. // The document data is loaded from the given document, the document metadata is returned. // If the document data already contains a `_key` field, this will be used as key of the new document, // otherwise a unique key is created. // A ConflictError is returned when a `_key` field contains a duplicate key, other any other field violates an index constraint. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface CreateDocument(ctx context.Context, document interface{}) (CollectionDocumentCreateResponse, error) // CreateDocumentWithOptions creates a single document in the collection. // The document data is loaded from the given document, the document metadata is returned. // If the document data already contains a `_key` field, this will be used as key of the new document, // otherwise a unique key is created. // A ConflictError is returned when a `_key` field contains a duplicate key, other any other field violates an index constraint. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface CreateDocumentWithOptions(ctx context.Context, document interface{}, options *CollectionDocumentCreateOptions) (CollectionDocumentCreateResponse, error) // CreateDocuments creates multiple documents in the collection. // The document data is loaded from the given documents slice, the documents metadata is returned. // If a document element already contains a `_key` field, this will be used as key of the new document, // otherwise a unique key is created. // If a document element contains a `_key` field with a duplicate key, or any other field that violates an index constraint, // then the ConflictError for a specific document will be returned only while reading from CollectionDocumentCreateResponseReader // and not as the error output of this function. // If the create request itself fails or one of the arguments is invalid, an error is returned. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface CreateDocuments(ctx context.Context, documents interface{}) (CollectionDocumentCreateResponseReader, error) // CreateDocumentsWithOptions creates multiple documents in the collection. // The document data is loaded from the given documents slice, the documents metadata is returned. // If a document element already contains a `_key` field, this will be used as key of the new document, // otherwise a unique key is created. // If a document element contains a `_key` field with a duplicate key, or any other field that violates an index constraint, // then the ConflictError for a specific document will be returned only while reading from CollectionDocumentCreateResponseReader // and not as the error output of this function. // If the create request itself fails or one of the arguments is invalid, an error is returned. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface CreateDocumentsWithOptions(ctx context.Context, documents interface{}, opts *CollectionDocumentCreateOptions) (CollectionDocumentCreateResponseReader, error) }
CollectionDocumentCreate interface for creating documents in a collection. https://docs.arangodb.com/stable/develop/http-api/documents/#create-a-document
type CollectionDocumentCreateOptions ¶
type CollectionDocumentCreateOptions struct { // Wait until document has been synced to disk. WithWaitForSync *bool // If set to true, the insert becomes a replace-insert. // If a document with the same _key already exists, // the new document is not rejected with unique constraint violation error but replaces the old document. // Note that operations with overwrite parameter require a _key attribute in the request payload, // therefore they can only be performed on collections sharded by _key. Overwrite *bool // This option supersedes `overwrite` option. OverwriteMode *CollectionDocumentCreateOverwriteMode // If set to true, an empty object is returned as response if the document operation succeeds. // No meta-data is returned for the created document. If the operation raises an error, an error object is returned. // You can use this option to save network traffic. Silent *bool // Additionally return the complete new document NewObject interface{} // Additionally return the complete old document under the attribute. // Only available if the overwrite option is used. OldObject interface{} // RefillIndexCaches if set to true then refills the in-memory index caches. RefillIndexCaches *bool // If the intention is to delete existing attributes with the update-insert command, set it to false. // This modifies the behavior of the patch command to remove top-level attributes and sub-attributes from // the existing document that are contained in the patch document with an attribute value of null // (but not attributes of objects that are nested inside of arrays). // This option controls the update-insert behavior only (CollectionDocumentCreateOverwriteModeUpdate). KeepNull *bool // Controls whether objects (not arrays) are merged if present in both, the existing and the update-insert document. // If set to false, the value in the patch document overwrites the existing document’s value. // If set to true, objects are merged. The default is true. This option controls the update-insert behavior only. // This option controls the update-insert behavior only (CollectionDocumentCreateOverwriteModeUpdate). MergeObjects *bool // By default, or if this is set to true, the _rev attributes in the given document are ignored. // If this is set to false, then the _rev attribute given in the body document is taken as a precondition. // The document is only removed if the current revision is the one specified. // This works only with multiple documents removal method CollectionDocumentDelete.DeleteDocumentsWithOptions IgnoreRevs *bool // IsRestore is used to make insert functions use the "isRestore=<value>" setting. // Note: This option is intended for internal (replication) use. // It is NOT intended to be used by normal client. Use on your own risk! IsRestore *bool // Specify any top-level attribute to compare whether the version number is higher // than the currently stored one when updating or replacing documents. // // Only applicable if `Overwrite` is set to `true` or `OverwriteMode` is set to `update` or `replace`. VersionAttribute string // To make this operation a part of a Stream Transaction, set this header to the transaction ID returned by the // DatabaseTransaction.BeginTransaction() method. TransactionID string }
type CollectionDocumentCreateOverwriteMode ¶
type CollectionDocumentCreateOverwriteMode string
const ( CollectionDocumentCreateOverwriteModeIgnore CollectionDocumentCreateOverwriteMode = "ignore" CollectionDocumentCreateOverwriteModeReplace CollectionDocumentCreateOverwriteMode = "replace" CollectionDocumentCreateOverwriteModeUpdate CollectionDocumentCreateOverwriteMode = "update" CollectionDocumentCreateOverwriteModeConflict CollectionDocumentCreateOverwriteMode = "conflict" )
func (*CollectionDocumentCreateOverwriteMode) String ¶
func (c *CollectionDocumentCreateOverwriteMode) String() string
type CollectionDocumentCreateResponse ¶
type CollectionDocumentCreateResponse struct { DocumentMeta shared.ResponseStruct `json:",inline"` Old, New interface{} }
type CollectionDocumentCreateResponseReader ¶
type CollectionDocumentCreateResponseReader interface {
Read() (CollectionDocumentCreateResponse, error)
}
type CollectionDocumentDelete ¶
type CollectionDocumentDelete interface { // DeleteDocument removes a single document with given key from the collection. // The document metadata is returned. // If no document exists with given key, a NotFoundError is returned. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface DeleteDocument(ctx context.Context, key string) (CollectionDocumentDeleteResponse, error) // DeleteDocumentWithOptions removes a single document with given key from the collection. // The document metadata is returned. // If no document exists with given key, a NotFoundError is returned. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface DeleteDocumentWithOptions(ctx context.Context, key string, opts *CollectionDocumentDeleteOptions) (CollectionDocumentDeleteResponse, error) // DeleteDocuments removes multiple documents with given keys from the collection. // The document metadata are returned. // If no document exists with a given key, a NotFoundError is returned at its errors index. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface DeleteDocuments(ctx context.Context, keys []string) (CollectionDocumentDeleteResponseReader, error) // DeleteDocumentsWithOptions removes multiple documents with given keys from the collection. // The document metadata are returned. // If no document exists with a given key, a NotFoundError is returned at its errors index. // 'documents' must be a slice of structs with a `_key` field or a slice of keys. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface DeleteDocumentsWithOptions(ctx context.Context, documents interface{}, opts *CollectionDocumentDeleteOptions) (CollectionDocumentDeleteResponseReader, error) }
CollectionDocumentDelete removes document(s) with given key(s) from the collection https://docs.arangodb.com/stable/develop/http-api/documents/#remove-a-document
type CollectionDocumentDeleteOptions ¶
type CollectionDocumentDeleteOptions struct { // Conditionally delete a document based on a target revision id // IMPORTANT: This will work only for single document delete operations (CollectionDocumentDelete.DeleteDocument, // CollectionDocumentDelete.DeleteDocumentWithOptions) IfMatch string // By default, or if this is set to true, the _rev attributes in the given document are ignored. // If this is set to false, then the _rev attribute given in the body document is taken as a precondition. // The document is only removed if the current revision is the one specified. // This works only with multiple documents removal method CollectionDocumentDelete.DeleteDocumentsWithOptions IgnoreRevs *bool // Wait until the deletion operation has been synced to disk. WithWaitForSync *bool // Return additionally the complete previous revision of the changed document OldObject interface{} // If set to true, an empty object is returned as response if the document operation succeeds. // No meta-data is returned for the deleted document. If the operation raises an error, an error object is returned. // You can use this option to save network traffic. Silent *bool // RefillIndexCaches if set to true then refills the in-memory index caches. RefillIndexCaches *bool // To make this operation a part of a Stream Transaction, set this header to the transaction ID returned by the // DatabaseTransaction.BeginTransaction() method. TransactionID string }
type CollectionDocumentDeleteResponse ¶
type CollectionDocumentDeleteResponse struct { DocumentMeta `json:",inline"` shared.ResponseStruct `json:",inline"` Old interface{} `json:"old,omitempty"` }
type CollectionDocumentDeleteResponseReader ¶
type CollectionDocumentDeleteResponseReader interface {
Read(i interface{}) (CollectionDocumentDeleteResponse, error)
}
type CollectionDocumentImport ¶ added in v2.1.4
type CollectionDocumentImport interface { // ImportDocuments imports one or more documents into the collection. // The document data is loaded from the given documents argument, statistics are returned. // The documents argument can be one of the following: // - An array of structs: All structs will be imported as individual documents. // - An array of maps: All maps will be imported as individual documents. // To wait until all documents have been synced to disk, prepare a context with `WithWaitForSync`. // To return details about documents that could not be imported, prepare a context with `WithImportDetails`. ImportDocuments(ctx context.Context, documents string, documentsType CollectionDocumentImportDocumentType) (CollectionDocumentImportResponse, error) ImportDocumentsWithOptions(ctx context.Context, documents string, documentsType CollectionDocumentImportDocumentType, options *CollectionDocumentImportOptions) (CollectionDocumentImportResponse, error) }
CollectionDocumentDelete removes document(s) with given key(s) from the collection https://docs.arangodb.com/stable/develop/http-api/documents/#remove-a-document
type CollectionDocumentImportDocumentType ¶ added in v2.1.4
type CollectionDocumentImportDocumentType string
const ( // ImportDocumentTypeDocuments // Each line is expected to be one JSON object. // example : // {"_key":"john","name":"John Smith","age":35} // {"_key":"katie","name":"Katie Foster","age":28} ImportDocumentTypeDocuments CollectionDocumentImportDocumentType = CollectionDocumentImportDocumentType("documents") // ImportDocumentTypeArray // The request body is expected to be a JSON array of objects. // example : // [ // {"_key":"john","name":"John Smith","age":35}, // {"_key":"katie","name":"Katie Foster","age":28} // ] ImportDocumentTypeArray CollectionDocumentImportDocumentType = CollectionDocumentImportDocumentType("array") // ImportDocumentTypeAuto // Automatically determines the type either documents(ImportDocumentTypeDocumentsError) or array(ImportDocumentTypeArrayError) ImportDocumentTypeAuto CollectionDocumentImportDocumentType = CollectionDocumentImportDocumentType("auto") // ImportDocumentTypeTabular // The first line is an array of strings that defines the attribute keys. The subsequent lines are arrays with the attribute values. // The keys and values are matched by the order of the array elements. // example: // ["_key","name","age"] // ["john","John Smith",35] // ["katie","Katie Foster",28] ImportDocumentTypeTabular CollectionDocumentImportDocumentType = CollectionDocumentImportDocumentType("") )
type CollectionDocumentImportOnDuplicate ¶ added in v2.1.4
type CollectionDocumentImportOnDuplicate string
const ( // ImportOnDuplicateError will not import the current document because of the unique key constraint violation. // This is the default setting. ImportOnDuplicateError CollectionDocumentImportOnDuplicate = CollectionDocumentImportOnDuplicate("error") // ImportOnDuplicateUpdate will update an existing document in the database with the data specified in the request. // Attributes of the existing document that are not present in the request will be preserved. ImportOnDuplicateUpdate CollectionDocumentImportOnDuplicate = CollectionDocumentImportOnDuplicate("update") // ImportOnDuplicateReplace will replace an existing document in the database with the data specified in the request. ImportOnDuplicateReplace CollectionDocumentImportOnDuplicate = CollectionDocumentImportOnDuplicate("replace") // ImportOnDuplicateIgnore will not update an existing document and simply ignore the error caused by a unique key constraint violation. ImportOnDuplicateIgnore CollectionDocumentImportOnDuplicate = CollectionDocumentImportOnDuplicate("ignore") )
type CollectionDocumentImportOptions ¶ added in v2.1.4
type CollectionDocumentImportOptions struct { // FromPrefix is an optional prefix for the values in _from attributes. If specified, the value is automatically // prepended to each _from input value. This allows specifying just the keys for _from. FromPrefix *string `json:"fromPrefix,omitempty"` // ToPrefix is an optional prefix for the values in _to attributes. If specified, the value is automatically // prepended to each _to input value. This allows specifying just the keys for _to. ToPrefix *string `json:"toPrefix,omitempty"` // Overwrite is a flag that if set, then all data in the collection will be removed prior to the import. // Note that any existing index definitions will be preseved. Overwrite *bool `json:"overwrite,omitempty"` // OnDuplicate controls what action is carried out in case of a unique key constraint violation. // Possible values are: // - ImportOnDuplicateError // - ImportOnDuplicateUpdate // - ImportOnDuplicateReplace // - ImportOnDuplicateIgnore OnDuplicate *CollectionDocumentImportOnDuplicate `json:"onDuplicate,omitempty"` // Complete is a flag that if set, will make the whole import fail if any error occurs. // Otherwise the import will continue even if some documents cannot be imported. Complete *bool `json:"complete,omitempty"` // Wait until the deletion operation has been synced to disk. WithWaitForSync *bool }
ImportDocumentOptions holds optional options that control the import document process.
type CollectionDocumentImportRequest ¶ added in v2.1.4
type CollectionDocumentImportRequest struct { CollectionDocumentImportOptions `json:",inline"` Collection *string `json:"collection,inline"` Type *CollectionDocumentImportDocumentType `json:"type,inline"` }
ImportDocumentRequest holds Query parameters for /import.
type CollectionDocumentImportResponse ¶ added in v2.1.4
type CollectionDocumentImportResponse struct {
CollectionDocumentImportStatistics `json:",inline"`
}
type CollectionDocumentImportStatistics ¶ added in v2.1.4
type CollectionDocumentImportStatistics struct { // Created holds the number of documents imported. Created int64 `json:"created,omitempty"` // Errors holds the number of documents that were not imported due to an error. Errors int64 `json:"errors,omitempty"` // Empty holds the number of empty lines found in the input (will only contain a value greater zero for types documents or auto). Empty int64 `json:"empty,omitempty"` // Updated holds the number of updated/replaced documents (in case onDuplicate was set to either update or replace). Updated int64 `json:"updated,omitempty"` // Ignored holds the number of failed but ignored insert operations (in case onDuplicate was set to ignore). Ignored int64 `json:"ignored,omitempty"` // if query parameter details is set to true, the result will contain a details attribute which is an array // with more detailed information about which documents could not be inserted. Details []string }
CollectionDocumentImportResponse holds statistics of an import action.
type CollectionDocumentRead ¶
type CollectionDocumentRead interface { // ReadDocument reads a single document with given key from the collection. // The document data is stored into result, the document metadata is returned. // If no document exists with given key, a NotFoundError is returned. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface ReadDocument(ctx context.Context, key string, result interface{}) (DocumentMeta, error) // ReadDocumentWithOptions reads a single document with given key from the collection. // The document data is stored into result, the document metadata is returned. // If no document exists with given key, a NotFoundError is returned. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface ReadDocumentWithOptions(ctx context.Context, key string, result interface{}, opts *CollectionDocumentReadOptions) (DocumentMeta, error) // ReadDocuments reads multiple documents with given keys from the collection. // The documents data is stored into elements of the given results slice, // the documents metadata is returned. // If no document exists with a given key, a NotFoundError is returned at its errors index. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface ReadDocuments(ctx context.Context, keys []string) (CollectionDocumentReadResponseReader, error) // ReadDocumentsWithOptions reads multiple documents with given keys from the collection. // The documents data is stored into elements of the given results slice and the documents metadata is returned. // If no document exists with a given key, a NotFoundError is returned at its errors index. // 'documents' must be a slice of structs with a `_key` field or a slice of keys. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface ReadDocumentsWithOptions(ctx context.Context, documents interface{}, opts *CollectionDocumentReadOptions) (CollectionDocumentReadResponseReader, error) }
CollectionDocumentRead contains methods for reading documents from a collection. https://docs.arangodb.com/stable/develop/http-api/documents/#get-a-document
type CollectionDocumentReadOptions ¶
type CollectionDocumentReadOptions struct { // If the “If-Match” header is given, then it must contain exactly one ETag (_rev). // The document is returned, if it has the same revision as the given ETag // IMPORTANT: This will work only for single document read operations (CollectionDocumentRead.ReadDocument, // CollectionDocumentRead.ReadDocumentWithOptions) IfMatch string // If the “If-None-Match” header is given, then it must contain exactly one ETag (_rev). // The document is returned, if it has a different revision than the given ETag // IMPORTANT: This will work only for single document read operations (CollectionDocumentRead.ReadDocument, // CollectionDocumentRead.ReadDocumentWithOptions) IfNoneMatch string // By default, or if this is set to true, the _rev attributes in the given document is ignored. // If this is set to false, then the _rev attribute given in the body document is taken as a precondition. // The document is only removed if the current revision is the one specified. // This works only with multiple documents removal method CollectionDocumentRead.ReadDocumentsWithOptions IgnoreRevs *bool // Set this to true to allow the Coordinator to ask any shard replica for the data, not only the shard leader. // This may result in “dirty reads”. // This option is ignored if this operation is part of a DatabaseTransaction (TransactionID option). // The header set when creating the transaction decides about dirty reads for the entire transaction, // not the individual read operations. AllowDirtyReads *bool // To make this operation a part of a Stream Transaction, set this header to the transaction ID returned by the // DatabaseTransaction.BeginTransaction() method. TransactionID string }
type CollectionDocumentReadResponse ¶
type CollectionDocumentReadResponse struct { DocumentMeta `json:",inline"` shared.ResponseStruct `json:",inline"` }
type CollectionDocumentReadResponseReader ¶
type CollectionDocumentReadResponseReader interface {
Read(i interface{}) (CollectionDocumentReadResponse, error)
}
type CollectionDocumentReplace ¶ added in v2.0.3
type CollectionDocumentReplace interface { // ReplaceDocument replaces a single document with given key in the collection. // If no document exists with given key, a NotFoundError is returned. // If `_id` field is present in the document body, it is always ignored. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface ReplaceDocument(ctx context.Context, key string, document interface{}) (CollectionDocumentReplaceResponse, error) // ReplaceDocumentWithOptions replaces a single document with given key in the collection. // If no document exists with given key, a NotFoundError is returned. // If `_id` field is present in the document body, it is always ignored. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface ReplaceDocumentWithOptions(ctx context.Context, key string, document interface{}, options *CollectionDocumentReplaceOptions) (CollectionDocumentReplaceResponse, error) // ReplaceDocuments replaces multiple document with given keys in the collection. // The replaces are loaded from the given replaces slice, the documents metadata are returned. // If no document exists with a given key, a NotFoundError is returned at its errors index. // Each element in the replaces slice must contain a `_key` field. // If `_id` field is present in the document body, it is always ignored. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface ReplaceDocuments(ctx context.Context, documents interface{}) (CollectionDocumentReplaceResponseReader, error) // ReplaceDocumentsWithOptions replaces multiple document with given keys in the collection. // The replaces are loaded from the given replaces slice, the documents metadata are returned. // If no document exists with a given key, a NotFoundError is returned at its errors index. // Each element in the replaces slice must contain a `_key` field. // If `_id` field is present in the document body, it is always ignored. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface ReplaceDocumentsWithOptions(ctx context.Context, documents interface{}, opts *CollectionDocumentReplaceOptions) (CollectionDocumentReplaceResponseReader, error) }
CollectionDocumentReplace replaces document(s) with given key(s) in the collection https://docs.arangodb.com/stable/develop/http-api/documents/#replace-a-document
type CollectionDocumentReplaceOptions ¶ added in v2.0.3
type CollectionDocumentReplaceOptions struct { // Conditionally replace a document based on a target revision id // IMPORTANT: This will work only for single document replace operations (CollectionDocumentReplace.ReplaceDocument, // CollectionDocumentReplace.ReplaceDocumentWithOptions) IfMatch string `json:"ifMatch,omitempty"` // By default, or if this is set to true, the _rev attributes in the given document is ignored. // If this is set to false, then the _rev attribute given in the body document is taken as a precondition. // The document is only replaced if the current revision is the one specified. IgnoreRevs *bool // Wait until document has been synced to disk. WithWaitForSync *bool // If set to true, an empty object is returned as response if the document operation succeeds. // No meta-data is returned for the created document. If the operation raises an error, an error object is returned. // You can use this option to save network traffic. Silent *bool // Additionally return the complete new document NewObject interface{} // Additionally return the complete old document under the attribute. // Only available if the overwrite option is used. OldObject interface{} // RefillIndexCaches if set to true then refills the in-memory index caches. RefillIndexCaches *bool // IsRestore is used to make insert functions use the "isRestore=<value>" setting. // Note: This option is intended for internal (replication) use. // It is NOT intended to be used by normal client. Use on your own risk! IsRestore *bool // Specify any top-level attribute to compare whether the version number is higher // than the currently stored one when updating or replacing documents. VersionAttribute string // To make this operation a part of a Stream Transaction, set this header to the transaction ID returned by the // DatabaseTransaction.BeginTransaction() method. TransactionID string }
type CollectionDocumentReplaceResponse ¶ added in v2.0.3
type CollectionDocumentReplaceResponse struct { DocumentMetaWithOldRev shared.ResponseStruct `json:",inline"` Old, New interface{} }
type CollectionDocumentReplaceResponseReader ¶ added in v2.0.3
type CollectionDocumentReplaceResponseReader interface {
Read() (CollectionDocumentReplaceResponse, error)
}
type CollectionDocumentUpdate ¶
type CollectionDocumentUpdate interface { // UpdateDocument updates a single document with a given key in the collection. // The document metadata is returned. // If no document exists with a given key, a NotFoundError is returned. // If `_id` field is present in the document body, it is always ignored. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface UpdateDocument(ctx context.Context, key string, document interface{}) (CollectionDocumentUpdateResponse, error) // UpdateDocumentWithOptions updates a single document with a given key in the collection. // The document metadata is returned. // If no document exists with a given key, a NotFoundError is returned. // If `_id` field is present in the document body, it is always ignored. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface UpdateDocumentWithOptions(ctx context.Context, key string, document interface{}, options *CollectionDocumentUpdateOptions) (CollectionDocumentUpdateResponse, error) // UpdateDocuments updates multiple documents // The updates are loaded from the given updates slice, the documents metadata are returned. // If no document exists with a given key, a NotFoundError is returned at its errors index. // Each element in the update slice must contain a `_key` field. // If `_id` field is present in the document body, it is always ignored. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface UpdateDocuments(ctx context.Context, documents interface{}) (CollectionDocumentUpdateResponseReader, error) // UpdateDocumentsWithOptions updates multiple documents // The updates are loaded from the given updates slice, the documents metadata are returned. // If no document exists with a given key, a NotFoundError is returned at its errors index. // Each element in the update slice must contain a `_key` field. // If `_id` field is present in the document body, it is always ignored. // SmartGraphs and EnterpriseGraphs cannot use existing collections and cannot use the document interface UpdateDocumentsWithOptions(ctx context.Context, documents interface{}, opts *CollectionDocumentUpdateOptions) (CollectionDocumentUpdateResponseReader, error) }
CollectionDocumentUpdate Partially updates document(s) with given key in the collection. https://docs.arangodb.com/stable/develop/http-api/documents/#update-a-document
type CollectionDocumentUpdateOptions ¶
type CollectionDocumentUpdateOptions struct { // Conditionally update a document based on a target revision id // IMPORTANT: This will work only for single document updates operations (CollectionDocumentUpdate.UpdateDocument, // CollectionDocumentUpdate.UpdateDocumentWithOptions) IfMatch string // By default, or if this is set to true, the _rev attributes in the given document is ignored. // If this is set to false, then the _rev attribute given in the body document is taken as a precondition. // The document is only updated if the current revision is the one specified. IgnoreRevs *bool // Wait until document has been synced to disk. WithWaitForSync *bool // If set to true, an empty object is returned as response if the document operation succeeds. // No meta-data is returned for the created document. If the operation raises an error, an error object is returned. // You can use this option to save network traffic. Silent *bool // Additionally return the complete new document NewObject interface{} // Additionally return the complete old document under the attribute. // Only available if the overwrite option is used. OldObject interface{} // RefillIndexCaches if set to true then refills the in-memory index caches. RefillIndexCaches *bool // If the intention is to delete existing attributes with the update-insert command, set it to false. // This modifies the behavior of the patch command to remove top-level attributes and sub-attributes from // the existing document that are contained in the patch document with an attribute value of null // (but not attributes of objects that are nested inside of arrays). // This option controls the update-insert behavior only. KeepNull *bool // Controls whether objects (not arrays) are merged if present in both, the existing and the update-insert document. // If set to false, the value in the patch document overwrites the existing document’s value. // If set to true, objects are merged. The default is true. This option controls the update-insert behavior only. MergeObjects *bool // Specify any top-level attribute to compare whether the version number is higher // than the currently stored one when updating or replacing documents. VersionAttribute string // To make this operation a part of a Stream Transaction, set this header to the transaction ID returned by the // DatabaseTransaction.BeginTransaction() method. TransactionID string }
type CollectionDocumentUpdateResponse ¶
type CollectionDocumentUpdateResponse struct { DocumentMetaWithOldRev shared.ResponseStruct `json:",inline"` Old, New interface{} }
type CollectionDocumentUpdateResponseReader ¶
type CollectionDocumentUpdateResponseReader interface {
Read() (CollectionDocumentUpdateResponse, error)
}
type CollectionDocuments ¶
type CollectionDocuments interface { // DocumentExists checks if a document with given key exists in the collection. DocumentExists(ctx context.Context, key string) (bool, error) CollectionDocumentCreate CollectionDocumentRead CollectionDocumentUpdate CollectionDocumentReplace CollectionDocumentDelete CollectionDocumentImport }
type CollectionExtendedInfo ¶
type CollectionExtendedInfo struct { CollectionInfo // CacheEnabled set cacheEnabled option in collection properties. CacheEnabled bool `json:"cacheEnabled,omitempty"` KeyOptions struct { // Type specifies the type of the key generator. The currently available generators are traditional and autoincrement. Type KeyGeneratorType `json:"type,omitempty"` // AllowUserKeys; if set to true, then it is allowed to supply own key values in the _key attribute of a document. // If set to false, then the key generator is solely responsible for generating keys and supplying own key values in // the _key attribute of documents is considered an error. AllowUserKeys bool `json:"allowUserKeys,omitempty"` // The initial value for the key generator. This is only used for autoincrement key generators. LastValue *uint64 `json:"lastValue,omitempty"` } `json:"keyOptions,omitempty"` // NumberOfShards is the number of shards of the collection. // Only available in cluster setup. NumberOfShards int `json:"numberOfShards,omitempty"` // This attribute specifies the name of the sharding strategy to use for the collection. // Can not be changed after creation. ShardingStrategy ShardingStrategy `json:"shardingStrategy,omitempty"` // ShardKeys contains the names of document attributes that are used to determine the target shard for documents. // Only available in cluster setup. ShardKeys []string `json:"shardKeys,omitempty"` // ReplicationFactor contains how many copies of each shard are kept on different DBServers. // Only available in cluster setup. ReplicationFactor ReplicationFactor `json:"replicationFactor,omitempty"` // WaitForSync; If true then creating, changing or removing documents will wait // until the data has been synchronized to disk. WaitForSync bool `json:"waitForSync,omitempty"` // WriteConcern contains how many copies must be available before a collection can be written. // It is required that 1 <= WriteConcern <= ReplicationFactor. // Default is 1. Not available for SatelliteCollections. // Available from 3.6 arangod version. WriteConcern int `json:"writeConcern,omitempty"` // Available from 3.9 ArangoD version. InternalValidatorType int `json:"internalValidatorType,omitempty"` // IsDisjoint set isDisjoint flag for Graph. Required ArangoDB 3.7+ IsDisjoint bool `json:"isDisjoint,omitempty"` // Available from 3.7 ArangoD version. IsSmartChild bool `json:"isSmartChild,omitempty"` // Set to create a smart edge or vertex collection. // This requires ArangoDB Enterprise Edition. IsSmart bool `json:"isSmart,omitempty"` // ComputedValues let configure collections to generate document attributes when documents are created or modified, using an AQL expression ComputedValues []ComputedValue `json:"computedValues,omitempty"` }
CollectionExtendedInfo contains extended information about a collection.
type CollectionFigures ¶ added in v2.1.5
type CollectionFigures struct { CollectionProperties CollectionStatistics }
type CollectionIndexes ¶
type CollectionIndexes interface { // Index opens a connection to an existing index within the collection. // If no index with given name exists, an NotFoundError is returned. Index(ctx context.Context, name string) (IndexResponse, error) // IndexExists returns true if an index with given name exists within the collection. IndexExists(ctx context.Context, name string) (bool, error) // Indexes returns a list of all indexes in the collection. Indexes(ctx context.Context) ([]IndexResponse, error) // EnsurePersistentIndex creates a persistent index in the collection, if it does not already exist. // Fields is a slice of attribute paths. // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). // NOTE: 'hash' and 'skiplist' being mere aliases for the persistent index type nowadays EnsurePersistentIndex(ctx context.Context, fields []string, options *CreatePersistentIndexOptions) (IndexResponse, bool, error) // EnsureGeoIndex creates a hash index in the collection, if it does not already exist. // Fields is a slice with one or two attribute paths. If it is a slice with one attribute path location, // then a geo-spatial index on all documents is created using location as path to the coordinates. // The value of the attribute must be a slice with at least two double values. The slice must contain the latitude (first value) // and the longitude (second value). All documents, which do not have the attribute path or with value that are not suitable, are ignored. // If it is a slice with two attribute paths latitude and longitude, then a geo-spatial index on all documents is created // using latitude and longitude as paths the latitude and the longitude. The value of the attribute latitude and of the // attribute longitude must a double. All documents, which do not have the attribute paths or which values are not suitable, are ignored. // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). EnsureGeoIndex(ctx context.Context, fields []string, options *CreateGeoIndexOptions) (IndexResponse, bool, error) // EnsureTTLIndex creates a TLL collection, if it does not already exist. // expireAfter is the time interval (in seconds) from the point in time stored in the fields attribute after which the documents count as expired. // Can be set to 0 to let documents expire as soon as the server time passes the point in time stored in the document attribute, or to a higher number to delay the expiration. // fields The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). EnsureTTLIndex(ctx context.Context, fields []string, expireAfter int, options *CreateTTLIndexOptions) (IndexResponse, bool, error) // EnsureMDIIndex creates a multidimensional index for the collection, if it does not already exist. // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). // Available in ArangoDB 3.12 and later. EnsureMDIIndex(ctx context.Context, fields []string, options *CreateMDIIndexOptions) (IndexResponse, bool, error) // EnsureMDIPrefixedIndex creates is an additional index variant of mdi index that lets you specify additional // attributes for the index to narrow down the search space using equality checks. // Available in ArangoDB 3.12 and later. EnsureMDIPrefixedIndex(ctx context.Context, fields []string, options *CreateMDIPrefixedIndexOptions) (IndexResponse, bool, error) // EnsureInvertedIndex creates an inverted index in the collection, if it does not already exist. // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false). // Available in ArangoDB 3.10 and later. // InvertedIndexOptions is an obligatory parameter and must contain at least `Fields` field EnsureInvertedIndex(ctx context.Context, options *InvertedIndexOptions) (IndexResponse, bool, error) // DeleteIndex deletes an index from the collection. DeleteIndex(ctx context.Context, name string) error // DeleteIndexByID deletes an index from the collection. DeleteIndexByID(ctx context.Context, id string) error }
CollectionIndexes provides access to the indexes in a single collection.
type CollectionInfo ¶
type CollectionInfo struct { // The identifier of the collection. ID string `json:"id,omitempty"` // The name of the collection. Name string `json:"name,omitempty"` // The status of the collection Status CollectionStatus `json:"status,omitempty"` // StatusString represents status as a string. StatusString string `json:"statusString,omitempty"` // The type of the collection Type CollectionType `json:"type,omitempty"` // If true then the collection is a system collection. IsSystem bool `json:"isSystem,omitempty"` // Global unique name for the collection GloballyUniqueId string `json:"globallyUniqueId,omitempty"` }
CollectionInfo contains basic information about a collection.
type CollectionKeyOptions ¶
type CollectionKeyOptions struct { // If set to true, then it is allowed to supply own key values in the _key attribute of a document. // If set to false, then the key generator will solely be responsible for generating keys and supplying own // key values in the _key attribute of documents is considered an error. // If set to true, then it is allowed to supply own key values in the _key attribute of a document. // If set to false, then the key generator will solely be responsible for generating keys and supplying own // key values in the _key attribute of documents is considered an error. AllowUserKeysPtr *bool `json:"allowUserKeys,omitempty"` // Specifies the type of the key generator. The currently available generators are traditional and autoincrement. Type KeyGeneratorType `json:"type,omitempty"` // increment value for autoincrement key generator. Not used for other key generator types. Increment int `json:"increment,omitempty"` // Initial offset value for autoincrement key generator. Not used for other key generator types. Offset int `json:"offset,omitempty"` }
CollectionKeyOptions specifies ways for creating keys of a collection.
type CollectionProperties ¶
type CollectionProperties struct { CollectionExtendedInfo // JournalSize is the maximal size setting for journals / datafiles in bytes. JournalSize int64 `json:"journalSize,omitempty"` // SmartJoinAttribute // See documentation for SmartJoins. // This requires ArangoDB Enterprise Edition. SmartJoinAttribute string `json:"smartJoinAttribute,omitempty"` // This field must be set to the attribute that will be used for sharding or SmartGraphs. // All vertices are required to have this attribute set. Edges derive the attribute from their connected vertices. // This requires ArangoDB Enterprise Edition. SmartGraphAttribute string `json:"smartGraphAttribute,omitempty"` // This attribute specifies that the sharding of a collection follows that of another // one. DistributeShardsLike string `json:"distributeShardsLike,omitempty"` // This attribute specifies if the new format introduced in 3.7 is used for this // collection. UsesRevisionsAsDocumentIds bool `json:"usesRevisionsAsDocumentIds,omitempty"` // The following attribute specifies if the new MerkleTree based sync protocol // can be used on the collection. SyncByRevision bool `json:"syncByRevision,omitempty"` // Schema for collection validation Schema *CollectionSchemaOptions `json:"schema,omitempty"` // The collection revision id as a string. Revision string `json:"revision,omitempty"` }
CollectionProperties contains extended information about a collection.
func (*CollectionProperties) IsSatellite ¶
func (p *CollectionProperties) IsSatellite() bool
IsSatellite returns true if the collection is a SatelliteCollection
type CollectionSchemaLevel ¶
type CollectionSchemaLevel string
const ( CollectionSchemaLevelNone CollectionSchemaLevel = "none" CollectionSchemaLevelNew CollectionSchemaLevel = "new" CollectionSchemaLevelModerate CollectionSchemaLevel = "moderate" CollectionSchemaLevelStrict CollectionSchemaLevel = "strict" )
type CollectionSchemaOptions ¶
type CollectionSchemaOptions struct { Rule interface{} `json:"rule,omitempty"` Level CollectionSchemaLevel `json:"level,omitempty"` Message string `json:"message,omitempty"` }
func (*CollectionSchemaOptions) LoadRule ¶
func (d *CollectionSchemaOptions) LoadRule(data []byte) error
type CollectionShards ¶
type CollectionShards struct { CollectionExtendedInfo // Shards is a list of shards that belong to the collection. // Each shard contains a list of DB servers where the first one is the leader and the rest are followers. Shards map[ShardID][]ServerID `json:"shards,omitempty"` }
CollectionShards contains shards information about a collection.
type CollectionStatistics ¶
type CollectionStatistics struct { //The number of documents currently present in the collection. Count int64 `json:"count,omitempty"` // The maximal size of a journal or datafile in bytes. JournalSize int64 `json:"journalSize,omitempty"` Figures struct { DataFiles struct { // The number of datafiles. Count int64 `json:"count,omitempty"` // The total filesize of datafiles (in bytes). FileSize int64 `json:"fileSize,omitempty"` } `json:"datafiles"` // The number of markers in the write-ahead log for this collection that have not been transferred to journals or datafiles. UncollectedLogfileEntries int64 `json:"uncollectedLogfileEntries,omitempty"` // The number of references to documents in datafiles that JavaScript code currently holds. This information can be used for debugging compaction and unload issues. DocumentReferences int64 `json:"documentReferences,omitempty"` CompactionStatus struct { // The action that was performed when the compaction was last run for the collection. This information can be used for debugging compaction issues. Message string `json:"message,omitempty"` // The point in time the compaction for the collection was last executed. This information can be used for debugging compaction issues. Time time.Time `json:"time,omitempty"` } `json:"compactionStatus"` Compactors struct { // The number of compactor files. Count int64 `json:"count,omitempty"` // The total filesize of all compactor files (in bytes). FileSize int64 `json:"fileSize,omitempty"` } `json:"compactors"` Dead struct { // The number of dead documents. This includes document versions that have been deleted or replaced by a newer version. Documents deleted or replaced that are contained the write-ahead log only are not reported in this figure. Count int64 `json:"count,omitempty"` // The total number of deletion markers. Deletion markers only contained in the write-ahead log are not reporting in this figure. Deletion int64 `json:"deletion,omitempty"` // The total size in bytes used by all dead documents. Size int64 `json:"size,omitempty"` } `json:"dead"` Indexes struct { // The total number of indexes defined for the collection, including the pre-defined indexes (e.g. primary index). Count int64 `json:"count,omitempty"` // The total memory allocated for indexes in bytes. Size int64 `json:"size,omitempty"` } `json:"indexes"` ReadCache struct { // The number of revisions of this collection stored in the document revisions cache. Count int64 `json:"count,omitempty"` // The memory used for storing the revisions of this collection in the document revisions cache (in bytes). This figure does not include the document data but only mappings from document revision ids to cache entry locations. Size int64 `json:"size,omitempty"` } `json:"readcache"` // An optional string value that contains information about which object type is at the head of the collection's cleanup queue. This information can be used for debugging compaction and unload issues. WaitingFor string `json:"waitingFor,omitempty"` Alive struct { // The number of currently active documents in all datafiles and journals of the collection. Documents that are contained in the write-ahead log only are not reported in this figure. Count int64 `json:"count,omitempty"` // The total size in bytes used by all active documents of the collection. Documents that are contained in the write-ahead log only are not reported in this figure. Size int64 `json:"size,omitempty"` } `json:"alive"` // The tick of the last marker that was stored in a journal of the collection. This might be 0 if the collection does not yet have a journal. LastTick int64 `json:"lastTick,omitempty"` Journals struct { // The number of journal files. Count int64 `json:"count,omitempty"` // The total filesize of all journal files (in bytes). FileSize int64 `json:"fileSize,omitempty"` } `json:"journals"` Revisions struct { // The number of revisions of this collection managed by the storage engine. Count int64 `json:"count,omitempty"` // The memory used for storing the revisions of this collection in the storage engine (in bytes). This figure does not include the document data but only mappings from document revision ids to storage engine datafile positions. Size int64 `json:"size,omitempty"` } `json:"revisions"` DocumentsSize int64 `json:"documentsSize,omitempty"` // RocksDB cache statistics CacheInUse *bool `json:"cacheInUse,omitempty"` CacheSize *int64 `json:"cacheSize,omitempty"` CacheUsage *int64 `json:"cacheUsage,omitempty"` } `json:"figures"` }
CollectionStatistics contains the number of documents and additional statistical information about a collection.
type CollectionStatus ¶
type CollectionStatus int
CollectionStatus indicates the status of a collection.
type CommitTransactionOptions ¶
type CommitTransactionOptions struct{}
CommitTransactionOptions provides options for CommitTransaction. Currently unused
type CommonFoxxServiceFields ¶ added in v2.1.5
type CommonFoxxServiceFields struct { // Mount is the mount path of the Foxx service in the database (e.g., "/my-service"). // This determines the URL path at which the service can be accessed. Mount *string `json:"mount"` // Development indicates whether the service is in development mode. // When true, the service is not cached and changes are applied immediately. Development *bool `json:"development"` // Legacy indicates whether the service uses a legacy format or API. // This may be used for backward compatibility checks. Legacy *bool `json:"legacy"` // Name is the name of the Foxx service (optional). // This may be defined in the service manifest (manifest.json). Name *string `json:"name,omitempty"` // Version is the version of the Foxx service (optional). // This is useful for managing service upgrades or deployments. Version *string `json:"version,omitempty"` }
type ComputedValue ¶ added in v2.0.3
type ComputedValue struct { // The name of the target attribute. Can only be a top-level attribute, but you // may return a nested object. Cannot be `_key`, `_id`, `_rev`, `_from`, `_to`, // or a shard key attribute. Name string `json:"name"` // An AQL `RETURN` operation with an expression that computes the desired value. Expression string `json:"expression"` // An array of strings to define on which write operations the value shall be // computed. The possible values are `"insert"`, `"update"`, and `"replace"`. // The default is `["insert", "update", "replace"]`. ComputeOn []ComputeOn `json:"computeOn,omitempty"` // Whether the computed value shall take precedence over a user-provided or existing attribute. Overwrite bool `json:"overwrite"` // Whether to let the write operation fail if the expression produces a warning. The default is false. FailOnWarning *bool `json:"failOnWarning,omitempty"` // Whether the result of the expression shall be stored if it evaluates to `null`. // This can be used to skip the value computation if any pre-conditions are not met. KeepNull *bool `json:"keepNull,omitempty"` }
type ConsolidationPolicy ¶
type ConsolidationPolicy struct { // Type returns the type of the ConsolidationPolicy. This interface can then be casted to the corresponding ConsolidationPolicy struct. Type ConsolidationPolicyType `json:"type,omitempty"` ConsolidationPolicyBytesAccum ConsolidationPolicyTier }
ConsolidationPolicy holds threshold values specifying when to consolidate view data. Semantics of the values depend on where they are used.
type ConsolidationPolicyBytesAccum ¶
type ConsolidationPolicyBytesAccum struct { // Threshold, see ConsolidationTypeBytesAccum Threshold *float64 `json:"threshold,omitempty"` }
ConsolidationPolicyBytesAccum contains fields used for ConsolidationPolicyTypeBytesAccum
type ConsolidationPolicyTier ¶
type ConsolidationPolicyTier struct { // MinScore Filter out consolidation candidates with a score less than this. Default: 0 MinScore *int64 `json:"minScore,omitempty"` // SegmentsMin The minimum number of segments that are evaluated as candidates for consolidation. Default: 1 SegmentsMin *int64 `json:"segmentsMin,omitempty"` // SegmentsMax The maximum number of segments that are evaluated as candidates for consolidation. Default: 10 SegmentsMax *int64 `json:"segmentsMax,omitempty"` // SegmentsBytesMax The maximum allowed size of all consolidated segments in bytes. Default: 5368709120 SegmentsBytesMax *int64 `json:"segmentsBytesMax,omitempty"` // SegmentsBytesFloor Defines the value (in bytes) to treat all smaller segments as equal for consolidation selection. Default: 2097152 SegmentsBytesFloor *int64 `json:"segmentsBytesFloor,omitempty"` }
ConsolidationPolicyTier contains fields used for ConsolidationPolicyTypeTier
type ConsolidationPolicyType ¶
type ConsolidationPolicyType string
ConsolidationPolicyType strings for consolidation types
const ( // ConsolidationPolicyTypeTier consolidate based on segment byte size and live document count as dictated by the customization attributes. ConsolidationPolicyTypeTier ConsolidationPolicyType = "tier" // ConsolidationPolicyTypeBytesAccum consolidate if and only if ({threshold} range [0.0, 1.0]) // {threshold} > (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes, // i.e. the sum of all candidate segment's byte size is less than the total segment byte size multiplied by the {threshold}. ConsolidationPolicyTypeBytesAccum ConsolidationPolicyType = "bytes_accum" )
type Contributor ¶ added in v2.1.5
type Contributor struct { // Name is the contributor's name. Name *string `json:"name,omitempty"` // Email is the contributor's contact email. Email *string `json:"email,omitempty"` }
Contributor represents a person who contributed to the Foxx service.
type CreateCollectionOptions ¶
type CreateCollectionOptions struct { // EnforceReplicationFactor the default is true, which means the server checks if there are enough replicas available // at creation time and bail out otherwise. Set it to false to disable this extra check. EnforceReplicationFactor *bool }
CreateCollectionOptions specifies additional options to be provided while creating collection
type CreateCollectionPropertiesV2 ¶ added in v2.1.4
type CreateCollectionPropertiesV2 struct { // CacheEnabled set cacheEnabled option in collection properties CacheEnabled *bool `json:"cacheEnabled,omitempty"` // This field is used for internal purposes only. DO NOT USE. DistributeShardsLike *string `json:"distributeShardsLike,omitempty"` // DoCompact checks if the collection will be compacted (default is true) DoCompact *bool `json:"doCompact,omitempty"` // The number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power // of 2 and less than or equal to 1024. For very large collections one should increase this to avoid long pauses when the hash // table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel. // For example, 64 might be a sensible value for a collection with 100 000 000 documents. // Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions. // Changes are applied when the collection is loaded the next time. IndexBuckets *int `json:"indexBuckets,omitempty"` // Available from 3.9 ArangoD version. InternalValidatorType *int `json:"internalValidatorType,omitempty"` // IsDisjoint set isDisjoint flag for Graph. Required ArangoDB 3.7+ IsDisjoint *bool `json:"isDisjoint,omitempty"` // Set to create a smart edge or vertex collection. // This requires ArangoDB Enterprise Edition. IsSmart *bool `json:"isSmart,omitempty"` // If true, create a system collection. In this case collection-name should start with an underscore. // End users should normally create non-system collections only. API implementors may be required to create system // collections in very special occasions, but normally a regular collection will do. (The default is false) IsSystem *bool `json:"isSystem,omitempty"` // If true then the collection data is kept in-memory only and not made persistent. // Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also // cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster // than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any // CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only, // and not for data that cannot be re-created otherwise. (The default is false) IsVolatile *bool `json:"isVolatile,omitempty"` // The maximal size of a journal or datafile in bytes. The value must be at least 1048576 (1 MiB). (The default is a configuration parameter) JournalSize *int64 `json:"journalSize,omitempty"` // Specifies how keys in the collection are created. KeyOptions *CollectionKeyOptions `json:"keyOptions,omitempty"` // In a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless. (default is 1) NumberOfShards *int `json:"numberOfShards,omitempty"` // ReplicationFactor in a cluster (default is 1), this attribute determines how many copies of each shard are kept on different DBServers. // The value 1 means that only one copy (no synchronous replication) is kept. // A value of k means that k-1 replicas are kept. Any two copies reside on different DBServers. // Replication between them is synchronous, that is, every write operation to the "leader" copy will be replicated to all "follower" replicas, // before the write operation is reported successful. If a server fails, this is detected automatically // and one of the servers holding copies take over, usually without an error being reported. ReplicationFactor *ReplicationFactor `json:"replicationFactor,omitempty"` // Schema for collection validation Schema *CollectionSchemaOptions `json:"schema,omitempty"` // This attribute specifies the name of the sharding strategy to use for the collection. // Must be one of ShardingStrategy* values. ShardingStrategy *ShardingStrategy `json:"shardingStrategy,omitempty"` // In a cluster, this attribute determines which document attributes are used to // determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes. // The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard. // Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup. // The default is []string{"_key"}. ShardKeys *[]string `json:"shardKeys,omitempty"` // This field must be set to the attribute that will be used for sharding or SmartGraphs. // All vertices are required to have this attribute set. Edges derive the attribute from their connected vertices. // This requires ArangoDB Enterprise Edition. SmartGraphAttribute *string `json:"smartGraphAttribute,omitempty"` // SmartJoinAttribute // In the specific case that the two collections have the same number of shards, the data of the two collections can // be co-located on the same server for the same shard key values. In this case the extra hop via the coordinator will not be necessary. // See documentation for SmartJoins. // This requires ArangoDB Enterprise Edition. SmartJoinAttribute *string `json:"smartJoinAttribute,omitempty"` // Available from 3.7 ArangoDB version SyncByRevision *bool `json:"syncByRevision,omitempty"` // The type of the collection to create. (default is CollectionTypeDocument) Type *CollectionType `json:"type,omitempty"` // If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false) WaitForSync *bool `json:"waitForSync,omitempty"` // WriteConcern contains how many copies must be available before a collection can be written. // It is required that 1 <= WriteConcern <= ReplicationFactor. // Default is 1. Not available for SatelliteCollections. // Available from 3.6 ArangoDB version. WriteConcern *int `json:"writeConcern,omitempty"` // ComputedValues let configure collections to generate document attributes when documents are created or modified, using an AQL expression ComputedValues *[]ComputedValue `json:"computedValues,omitempty"` }
CreateCollectionProperties contains options that customize the creating of a collection.
func (*CreateCollectionPropertiesV2) Init ¶ added in v2.1.4
func (c *CreateCollectionPropertiesV2) Init()
Init translate deprecated fields into current one for backward compatibility
type CreateDatabaseDefaultOptions ¶
type CreateDatabaseDefaultOptions struct { // Default replication factor for collections in database ReplicationFactor ReplicationFactor `json:"replicationFactor,omitempty"` // Default write concern for collections in database WriteConcern int `json:"writeConcern,omitempty"` // Default sharding for collections in database Sharding DatabaseSharding `json:"sharding,omitempty"` // Replication version to use for this database // Available since ArangoDB version 3.11 ReplicationVersion DatabaseReplicationVersion `json:"replicationVersion,omitempty"` }
CreateDatabaseDefaultOptions contains options that change defaults for collections
type CreateDatabaseOptions ¶
type CreateDatabaseOptions struct { // List of users to initially create for the new database. User information will not be changed for users that already exist. // If users is not specified or does not contain any users, a default user root will be created with an empty string password. // This ensures that the new database will be accessible after it is created. Users []CreateDatabaseUserOptions `json:"users,omitempty"` // Options database defaults Options CreateDatabaseDefaultOptions `json:"options,omitempty"` }
CreateDatabaseOptions contains options that customize the creating of a database.
type CreateDatabaseUserOptions ¶
type CreateDatabaseUserOptions struct { // Loginname of the user to be created UserName string `json:"user,omitempty"` // The user password as a string. If not specified, it will default to an empty string. Password string `json:"passwd,omitempty"` // A flag indicating whether the user account should be activated or not. The default value is true. If set to false, the user won't be able to log into the database. Active *bool `json:"active,omitempty"` // A JSON object with extra user information. The data contained in extra will be stored for the user but not be interpreted further by ArangoDB. Extra interface{} `json:"extra,omitempty"` }
CreateDatabaseUserOptions contains options for creating a single user for a database.
type CreateEdgeDefinitionOptions ¶ added in v2.1.0
type CreateEdgeDefinitionOptions struct { // An array of collection names that is used to create SatelliteCollections for a (Disjoint) SmartGraph // using SatelliteCollections (Enterprise Edition only). // Each array element must be a string and a valid collection name. The collection type cannot be modified later. Satellites []string `json:"satellites,omitempty"` }
type CreateEdgeDefinitionResponse ¶ added in v2.1.0
type CreateEdgeDefinitionResponse struct { shared.ResponseStruct `json:",inline"` // GraphDefinition contains the updated graph definition GraphDefinition *GraphDefinition `json:"graph,omitempty"` Edge }
type CreateEdgeOptions ¶ added in v2.1.0
type CreateEdgeOptions struct { // Define if the request should wait until synced to disk. WaitForSync *bool `json:"waitForSync,omitempty"` // Define if the response should contain the complete new version of the document. NewObject interface{} // To make this operation a part of a Stream Transaction, set this header to the transaction ID returned by the // DatabaseTransaction.BeginTransaction() method. TransactionID string }
type CreateGeoIndexOptions ¶
type CreateGeoIndexOptions struct { // Name optional user defined name used for hints in AQL queries Name string `json:"name,omitempty"` // If a geo-spatial index on a location is constructed and GeoJSON is true, then the order within the array // is longitude followed by latitude. This corresponds to the format described in http://geojson.org/geojson-spec.html#positions GeoJSON *bool `json:"geoJson,omitempty"` // LegacyPolygons determines if the to-be-created index should use legacy polygons or not. // It is relevant for those that have geoJson set to true only. // Old geo indexes from versions from below 3.10 will always implicitly have the legacyPolygons option set to true. // Newly generated geo indexes from 3.10 on will have the legacyPolygons option by default set to false, // however, it can still be explicitly overwritten with true to create a legacy index but is not recommended. LegacyPolygons *bool `json:"legacyPolygons,omitempty"` // InBackground You can set this option to true to create the index in the background, // which will not write-lock the underlying collection for as long as if the index is built in the foreground. // The default value is false. InBackground *bool `json:"inBackground,omitempty"` }
CreateGeoIndexOptions contains specific options for creating a geo index.
type CreateGraphOptions ¶ added in v2.1.0
type CreateGraphOptions struct { // Satellites An array of collection names that is used to create SatelliteCollections for a (Disjoint) SmartGraph // using SatelliteCollections (Enterprise Edition only). Each array element must be a string and a valid // collection name. The collection type cannot be modified later. Satellites []string `json:"satellites,omitempty"` }
type CreateMDIIndexOptions ¶ added in v2.1.0
type CreateMDIIndexOptions struct { // Name optional user defined name used for hints in AQL queries Name string `json:"name,omitempty"` // FieldValueTypes is required and the only allowed value is "double". //Future extensions of the index will allow other types. FieldValueTypes MDIFieldType `json:"fieldValueTypes,required"` // Unique if true, then create a unique index. Unique *bool `json:"unique,omitempty"` // Sparse If `true`, then create a sparse index to exclude documents from the index that do not have the defined // attributes or are explicitly set to `null` values. If a non-value is set, it still needs to be numeric. Sparse *bool `json:"sparse,omitempty"` // InBackground You can set this option to true to create the index in the background, // which will not write-lock the underlying collection for as long as if the index is built in the foreground. // The default value is false. InBackground *bool `json:"inBackground,omitempty"` // StoredValues The optional `storedValues` attribute can contain an array of paths to additional attributes to // store in the index. These additional attributes cannot be used for index lookups or for sorting, but they can // be used for projections. This allows an index to fully cover more queries and avoid extra document lookups. // The maximum number of attributes in `storedValues` is 32. // // Attributes in `storedValues` cannot overlap with attributes specified in `prefixFields` but you can have // the attributes in both `storedValues` and `fields`. StoredValues []string `json:"storedValues,omitempty"` }
CreateMDIIndexOptions provides specific options for creating a MKD index
type CreateMDIPrefixedIndexOptions ¶ added in v2.1.0
type CreateMDIPrefixedIndexOptions struct { CreateMDIIndexOptions `json:",inline"` // PrefixFields is required and contains nn array of attribute names used as search prefix. // Array expansions are not allowed. PrefixFields []string `json:"prefixFields,required"` }
type CreatePersistentIndexOptions ¶
type CreatePersistentIndexOptions struct { // Name optional user defined name used for hints in AQL queries Name string `json:"name,omitempty"` // CacheEnabled if true, then the index will be cached in memory. Caching is turned off by default. CacheEnabled *bool `json:"cacheEnabled,omitempty"` // StoreValues if true, then the additional attributes will be included. // These additional attributes cannot be used for index lookups or sorts, but they can be used for projections. // There must be no overlap of attribute paths between `fields` and `storedValues`. The maximum number of values is 32. StoredValues []string `json:"storedValues,omitempty"` // Sparse You can control the sparsity for persistent indexes. // The inverted, fulltext, and geo index types are sparse by definition. Sparse *bool `json:"sparse,omitempty"` // Unique is supported by persistent indexes. By default, all user-defined indexes are non-unique. // Only the attributes in fields are checked for uniqueness. // Any attributes in from storedValues are not checked for their uniqueness. Unique *bool `json:"unique,omitempty"` // Deduplicate is supported by array indexes of type persistent. It controls whether inserting duplicate index // values from the same document into a unique array index will lead to a unique constraint error or not. // The default value is true, so only a single instance of each non-unique index value will be inserted into // the index per document. // Trying to insert a value into the index that already exists in the index will always fail, // regardless of the value of this attribute. Deduplicate *bool `json:"deduplicate,omitempty"` // Estimates determines if the to-be-created index should maintain selectivity estimates or not. // Is supported by indexes of type persistent // This attribute controls whether index selectivity estimates are maintained for the index. // Not maintaining index selectivity estimates can have a slightly positive impact on write performance. // The downside of turning off index selectivity estimates will be that the query optimizer will not be able // to determine the usefulness of different competing indexes in AQL queries when there are multiple candidate // indexes to choose from. The estimates attribute is optional and defaults to true if not set. // It will have no effect on indexes other than persistent (with hash and skiplist being mere aliases for the persistent index type nowadays). Estimates *bool `json:"estimates,omitempty"` // InBackground You can set this option to true to create the index in the background, // which will not write-lock the underlying collection for as long as if the index is built in the foreground. // The default value is false. InBackground *bool `json:"inBackground,omitempty"` }
CreatePersistentIndexOptions contains specific options for creating a persistent index. Note: "hash" and "skiplist" are only aliases for "persistent" with the RocksDB storage engine which is only storage engine since 3.7
type CreateTTLIndexOptions ¶
type CreateTTLIndexOptions struct { // Name optional user defined name used for hints in AQL queries Name string `json:"name,omitempty"` // InBackground You can set this option to true to create the index in the background, // which will not write-lock the underlying collection for as long as if the index is built in the foreground. // The default value is false. InBackground *bool `json:"inBackground,omitempty"` }
CreateTTLIndexOptions provides specific options for creating a TTL index
type CreateVertexCollectionOptions ¶ added in v2.1.0
type CreateVertexCollectionOptions struct { // Satellites contain an array of collection names that will be used to create SatelliteCollections for // a (Disjoint) SmartGraph using SatelliteCollections (Enterprise Edition only) Satellites []string `json:"satellites,omitempty"` }
type CreateVertexCollectionResponse ¶ added in v2.1.0
type CreateVertexCollectionResponse struct { shared.ResponseStruct `json:",inline"` // GraphDefinition contains the updated graph definition GraphDefinition *GraphDefinition `json:"graph,omitempty"` VertexCollection }
type CreateVertexOptions ¶ added in v2.1.0
type CreateVertexOptions struct { // Define if the request should wait until synced to disk. WaitForSync *bool `json:"waitForSync,omitempty"` // Define if the response should contain the complete new version of the document. NewObject interface{} // To make this operation a part of a Stream Transaction, set this header to the transaction ID returned by the // DatabaseTransaction.BeginTransaction() method. TransactionID string }
type Cursor ¶
type Cursor interface { io.Closer // CloseWithContext run Close with specified Context CloseWithContext(ctx context.Context) error // HasMore returns true if the next call to ReadDocument does not return a NoMoreDocuments error. HasMore() bool // ReadDocument reads the next document from the cursor. // The document data is stored into result, the document meta data is returned. // If the cursor has no more documents, a NoMoreDocuments error is returned. // Note: If the query (resulting in this cursor) does not return documents, // then the returned DocumentMeta will be empty. ReadDocument(ctx context.Context, result interface{}) (DocumentMeta, error) // Count returns the total number of result documents available. // A valid return value is only available when the cursor has been created with `Count` and not with `Stream`. Count() int64 // Statistics returns the query execution statistics for this cursor. // This might not be valid if the cursor has been created with `Stream` Statistics() CursorStats // Plan returns the query execution plan for this cursor. Plan() CursorPlan }
Cursor is returned from a query, used to iterate over a list of documents. Note that a Cursor must always be closed to avoid holding on to resources in the server while they are no longer needed.
type CursorBatch ¶
type CursorBatch interface { io.Closer // CloseWithContext run Close with specified Context CloseWithContext(ctx context.Context) error // HasMoreBatches returns true if the next call to ReadNextBatch does not return a NoMoreDocuments error. HasMoreBatches() bool // ReadNextBatch reads the next batch of documents from the cursor. // The result must be a pointer to a slice of documents. // E.g. `var result []MyStruct{}`. ReadNextBatch(ctx context.Context, result interface{}) error // RetryReadBatch retries the last batch read made by ReadNextBatch. // The result must be a pointer to a slice of documents. // E.g. `var result []MyStruct{}`. RetryReadBatch(ctx context.Context, result interface{}) error // ReadNextRawBatch reads the next batch of documents from the cursor. // The result must be a pointer to a byte array *[]bytes. ReadNextRawBatch(ctx context.Context, result *connection.RawObject) error // RetryReadRawBatch retries the last batch read made by ReadNextRawBatch. // The result must be a pointer to a byte array *[]bytes. RetryReadRawBatch(ctx context.Context, result *connection.RawObject) error // Count returns the total number of result documents available. // A valid return value is only available when the cursor has been created with `Count` and not with `Stream`. Count() int64 // Statistics returns the query execution statistics for this cursor. // This might not be valid if the cursor has been created with `Stream` Statistics() CursorStats // Plan returns the query execution plan for this cursor. Plan() CursorPlan }
CursorBatch is returned from a query, used to iterate over a list of documents. In contrast to Cursor, CursorBatch does not load all documents into memory, but returns them in batches and allows for retries in case of errors. Note that a Cursor must always be closed to avoid holding on to resources in the server while they are no longer needed.
type CursorPlan ¶
type CursorPlan struct { // Nodes describes a nested list of the execution plan nodes. Nodes []CursorPlanNodes `json:"nodes,omitempty"` // Rules describes a list with the names of the applied optimizer rules. Rules []string `json:"rules,omitempty"` // Collections describes list of the collections involved in the query. Collections []CursorPlanCollection `json:"collections,omitempty"` // Variables describes list of variables involved in the query. Variables []CursorPlanVariable `json:"variables,omitempty"` // EstimatedCost is an estimated cost of the query. EstimatedCost float64 `json:"estimatedCost,omitempty"` // EstimatedNrItems is an estimated number of results. EstimatedNrItems int `json:"estimatedNrItems,omitempty"` // IsModificationQuery describes whether the query contains write operations. IsModificationQuery bool `json:"isModificationQuery,omitempty"` }
CursorPlan describes execution plan for a query.
type CursorPlanCollection ¶
type CursorPlanCollection struct { // Name is a name of collection. Name string `json:"name"` // Type describes how the collection is used: read, write or exclusive. Type string `json:"type"` }
CursorPlanCollection describes a collection involved in the query.
type CursorPlanNodes ¶
type CursorPlanNodes map[string]interface{}
CursorPlanNodes describes map of nodes which take part in the execution.
type CursorPlanVariable ¶
type CursorPlanVariable struct { // ID is a variable's id. ID int `json:"id"` // Name is a variable's name. Name string `json:"name"` // IsDataFromCollection is set to true when data comes from a collection. IsDataFromCollection bool `json:"isDataFromCollection"` // IsFullDocumentFromCollection is set to true when all data comes from a collection. IsFullDocumentFromCollection bool `json:"isFullDocumentFromCollection"` }
CursorPlanVariable describes variable's settings.
type CursorStats ¶
type CursorStats struct { // The total number of data-modification operations successfully executed. WritesExecutedInt uint64 `json:"writesExecuted,omitempty"` // The total number of data-modification operations that were unsuccessful WritesIgnoredInt uint64 `json:"writesIgnored,omitempty"` // The total number of documents iterated over when scanning a collection without an index. ScannedFullInt uint64 `json:"scannedFull,omitempty"` // The total number of documents iterated over when scanning a collection using an index. ScannedIndexInt uint64 `json:"scannedIndex,omitempty"` // The total number of documents that were removed after executing a filter condition in a FilterNode FilteredInt uint64 `json:"filtered,omitempty"` // The total number of documents that matched the search condition if the query's final LIMIT statement were not present. FullCountInt uint64 `json:"fullCount,omitempty"` // Query execution time (wall-clock time). value will be set from the outside ExecutionTimeInt float64 `json:"executionTime,omitempty"` HTTPRequests uint64 `json:"httpRequests,omitempty"` PeakMemoryUsage uint64 `json:"peakMemoryUsage,omitempty"` // CursorsCreated the total number of cursor objects created during query execution. Cursor objects are created for index lookups. CursorsCreated uint64 `json:"cursorsCreated,omitempty"` // CursorsRearmed the total number of times an existing cursor object was repurposed. // Repurposing an existing cursor object is normally more efficient compared to destroying an existing cursor object // and creating a new one from scratch. CursorsRearmed uint64 `json:"cursorsRearmed,omitempty"` // CacheHits the total number of index entries read from in-memory caches for indexes of type edge or persistent. // This value will only be non-zero when reading from indexes that have an in-memory cache enabled, // and when the query allows using the in-memory cache (i.e. using equality lookups on all index attributes). CacheHits uint64 `json:"cacheHits,omitempty"` // CacheMisses the total number of cache read attempts for index entries that could not be served from in-memory caches for indexes of type edge or persistent. // This value will only be non-zero when reading from indexes that have an in-memory cache enabled, // the query allows using the in-memory cache (i.e. using equality lookups on all index attributes) and the looked up values are not present in the cache. CacheMisses uint64 `json:"cacheMisses,omitempty"` }
type Database ¶
type Database interface { // Name returns the name of the database. Name() string // Info fetches information about the database. Info(ctx context.Context) (DatabaseInfo, error) // Remove removes the entire database. // If the database does not exist, a NotFoundError is returned. Remove(ctx context.Context) error // TransactionJS performs a javascript transaction. The result of the transaction function is returned. TransactionJS(ctx context.Context, options TransactionJSOptions) (interface{}, error) // Returns the available key generators for collections. KeyGenerators(ctx context.Context) (KeyGeneratorsResponse, error) DatabaseCollection DatabaseTransaction DatabaseQuery DatabaseView DatabaseAnalyzer DatabaseGraph }
type DatabaseAnalyzer ¶ added in v2.0.3
type DatabaseAnalyzer interface { // EnsureCreatedAnalyzer creates an Analyzer for the database, if it does not already exist. // It returns the Analyser object together with a boolean indicating if the Analyzer was newly created (true) or pre-existing (false). EnsureCreatedAnalyzer(ctx context.Context, analyzer *AnalyzerDefinition) (Analyzer, bool, error) // Analyzer returns the analyzer definition for the given analyzer Analyzer(ctx context.Context, name string) (Analyzer, error) // Analyzers return an iterator to read all analyzers Analyzers(ctx context.Context) (AnalyzersResponseReader, error) }
type DatabaseCollection ¶
type DatabaseCollection interface { // GetCollection opens a connection to an existing collection within the database. // If no collection with given name exists, an NotFoundError is returned. GetCollection(ctx context.Context, name string, options *GetCollectionOptions) (Collection, error) // CollectionExists returns true if a collection with given name exists within the database. CollectionExists(ctx context.Context, name string) (bool, error) // Collections returns a list of all collections in the database. Collections(ctx context.Context) ([]Collection, error) // CreateCollection creates a new collection with given name and options, and opens a connection to it. // If a collection with given name already exists within the database, a DuplicateError is returned. CreateCollectionV2(ctx context.Context, name string, props *CreateCollectionPropertiesV2) (Collection, error) // CreateCollectionWithOptions creates a new collection with given name and options, and opens a connection to it. // If a collection with given name already exists within the database, a DuplicateError is returned. CreateCollectionWithOptionsV2(ctx context.Context, name string, props *CreateCollectionPropertiesV2, options *CreateCollectionOptions) (Collection, error) }
type DatabaseGraph ¶ added in v2.1.0
type DatabaseGraph interface { // GetEdges returns inbound and outbound edge documents of a given vertex. // Requires Edge collection name and vertex ID GetEdges(ctx context.Context, name, vertex string, options *GetEdgesOptions) ([]EdgeDetails, error) // Graph opens a connection to an existing graph within the database. // If no graph with given name exists, an NotFoundError is returned. Graph(ctx context.Context, name string, options *GetGraphOptions) (Graph, error) // GraphExists returns true if a graph with given name exists within the database. GraphExists(ctx context.Context, name string) (bool, error) // Graphs return a list of all graphs in the database. Graphs(ctx context.Context) (GraphsResponseReader, error) // CreateGraph creates a new graph with given name and options, and opens a connection to it. // If a graph with given name already exists within the database, a DuplicateError is returned. CreateGraph(ctx context.Context, name string, graph *GraphDefinition, options *CreateGraphOptions) (Graph, error) }
type DatabaseInfo ¶
type DatabaseInfo struct { // The identifier of the database. ID string `json:"id,omitempty"` // The name of the database. Name string `json:"name,omitempty"` // The filesystem path of the database. Path string `json:"path,omitempty"` // If true then the database is the _system database. IsSystem bool `json:"isSystem,omitempty"` // Default replication factor for collections in database ReplicationFactor ReplicationFactor `json:"replicationFactor,omitempty"` // Default write concern for collections in database WriteConcern int `json:"writeConcern,omitempty"` // Default sharding for collections in database Sharding DatabaseSharding `json:"sharding,omitempty"` // Replication version used for this database ReplicationVersion DatabaseReplicationVersion `json:"replicationVersion,omitempty"` }
DatabaseInfo contains information about a database
type DatabaseInventory ¶ added in v2.1.0
type DatabaseInventory struct { Info DatabaseInfo `json:"properties,omitempty"` Collections []InventoryCollection `json:"collections,omitempty"` Views []InventoryView `json:"views,omitempty"` State ServerStatus `json:"state,omitempty"` Tick string `json:"tick,omitempty"` }
func (DatabaseInventory) CollectionByName ¶ added in v2.1.0
func (i DatabaseInventory) CollectionByName(name string) (InventoryCollection, bool)
CollectionByName returns the InventoryCollection with given name. Return false if not found.
func (DatabaseInventory) ViewByName ¶ added in v2.1.0
func (i DatabaseInventory) ViewByName(name string) (InventoryView, bool)
ViewByName returns the InventoryView with given name. Return false if not found.
type DatabasePermissions ¶ added in v2.1.0
type DatabaseQuery ¶
type DatabaseQuery interface { // Query performs an AQL query, returning a cursor used to iterate over the returned documents. // Note that the returned Cursor must always be closed to avoid holding on to resources in the server while they are no longer needed. Query(ctx context.Context, query string, opts *QueryOptions) (Cursor, error) // QueryBatch performs an AQL query, returning a cursor used to iterate over the returned documents in batches. // In contrast to Query, QueryBatch does not load all documents into memory, but returns them in batches and allows for retries in case of errors. // Note that the returned Cursor must always be closed to avoid holding on to resources in the server while they are no longer needed QueryBatch(ctx context.Context, query string, opts *QueryOptions, result interface{}) (CursorBatch, error) // ValidateQuery validates an AQL query. // When the query is valid, nil returned, otherwise an error is returned. // The query is not executed. ValidateQuery(ctx context.Context, query string) error // ExplainQuery explains an AQL query and return information about it. ExplainQuery(ctx context.Context, query string, bindVars map[string]interface{}, opts *ExplainQueryOptions) (ExplainQueryResult, error) // GetQueryProperties returns the properties of the query system. GetQueryProperties(ctx context.Context) (QueryProperties, error) // UpdateQueryProperties updates the properties of the query system. // The properties are updated with the provided options. // The updated properties are returned. UpdateQueryProperties(ctx context.Context, options QueryProperties) (QueryProperties, error) // ListOfRunningAQLQueries returns a list of currently running AQL queries. // If the all parameter is set to true, it returns all queries, otherwise only the queries that are currently running. // The result is a list of RunningAQLQuery objects. ListOfRunningAQLQueries(ctx context.Context, all *bool) ([]RunningAQLQuery, error) // ListOfSlowAQLQueries returns a list of slow AQL queries. // If the all parameter is set to true, it returns all slow queries, otherwise only the queries that are currently running. // The result is a list of RunningAQLQuery objects. // Slow queries are defined as queries that have been running longer than the configured slow query threshold. // The slow query threshold can be configured in the query properties. // The result is a list of RunningAQLQuery objects. ListOfSlowAQLQueries(ctx context.Context, all *bool) ([]RunningAQLQuery, error) // ClearSlowAQLQueries clears the list of slow AQL queries. // If the all parameter is set to true, it clears all slow queries, otherwise only // the queries that are currently running. ClearSlowAQLQueries(ctx context.Context, all *bool) error // KillAQLQuery kills a running AQL query. // The queryId is the unique identifier of the query KillAQLQuery(ctx context.Context, queryId string, all *bool) error // GetAllOptimizerRules returns all optimizer rules available in the database. // The result is a list of OptimizerRule objects. GetAllOptimizerRules(ctx context.Context) ([]OptimizerRules, error) // GetQueryPlanCache returns a list of cached query plans. // The result is a list of QueryPlanCacheRespObject objects. GetQueryPlanCache(ctx context.Context) ([]QueryPlanCacheRespObject, error) // ClearQueryPlanCache clears the query plan cache. ClearQueryPlanCache(ctx context.Context) error // GetQueryEntriesCache returns a list of cached query entries. // The result is a list of QueryCacheEntriesRespObject objects. GetQueryEntriesCache(ctx context.Context) ([]QueryCacheEntriesRespObject, error) // ClearQueryCache clears the query cache. // This will remove all cached query entries. ClearQueryCache(ctx context.Context) error // GetQueryCacheProperties returns the properties of the query cache. // The result is a QueryCacheProperties object. GetQueryCacheProperties(ctx context.Context) (QueryCacheProperties, error) // SetQueryCacheProperties sets the properties of the query cache. // The properties are updated with the provided options. SetQueryCacheProperties(ctx context.Context, options QueryCacheProperties) (QueryCacheProperties, error) // CreateUserDefinedFunction creates a user-defined function in the database. // The function is created with the provided options. // The function is created in the system collection `_aqlfunctions`. // The function is created with the provided code and name. // If the function already exists, it will be updated with the new code. CreateUserDefinedFunction(ctx context.Context, options UserDefinedFunctionObject) (bool, error) // DeleteUserDefinedFunction removes a user-defined AQL function from the current database. // If group is true, all functions with the given name as a namespace prefix will be deleted. // If group is false, only the function with the fully qualified name will be removed. // It returns the number of functions deleted. DeleteUserDefinedFunction(ctx context.Context, name *string, group *bool) (*int, error) // GetUserDefinedFunctions retrieves all user-defined AQL functions registered in the current database. // It returns a list of UserDefinedFunctionObject, each containing the function's name, code, and isDeterministic. // The returned list may be empty array if no user-defined functions are registered. GetUserDefinedFunctions(ctx context.Context) ([]UserDefinedFunctionObject, error) }
type DatabaseReplicationVersion ¶
type DatabaseReplicationVersion string
DatabaseReplicationVersion defines replication protocol version to use for this database Available since ArangoDB version 3.11 Note: this feature is still considered experimental and should not be used in production
const ( DatabaseReplicationVersionOne DatabaseReplicationVersion = "1" DatabaseReplicationVersionTwo DatabaseReplicationVersion = "2" )
type DatabaseSharding ¶
type DatabaseSharding string
const ( DatabaseShardingSingle DatabaseSharding = "single" DatabaseShardingNone DatabaseSharding = "" )
type DatabaseTransaction ¶
type DatabaseTransaction interface { ListTransactions(ctx context.Context) ([]Transaction, error) ListTransactionsWithStatuses(ctx context.Context, statuses ...TransactionStatus) ([]Transaction, error) BeginTransaction(ctx context.Context, cols TransactionCollections, opts *BeginTransactionOptions) (Transaction, error) Transaction(ctx context.Context, id TransactionID) (Transaction, error) WithTransaction(ctx context.Context, cols TransactionCollections, opts *BeginTransactionOptions, commitOptions *CommitTransactionOptions, abortOptions *AbortTransactionOptions, w TransactionWrap) error }
DatabaseTransaction contains Streaming Transactions functions https://docs.arangodb.com/stable/develop/http-api/transactions/stream-transactions/
type DatabaseView ¶ added in v2.0.3
type DatabaseView interface { // View opens a connection to an existing view within the database. // If no view with given name exists, an NotFoundError is returned. View(ctx context.Context, name string) (View, error) // ViewExists returns true if a view with given name exists within the database. ViewExists(ctx context.Context, name string) (bool, error) // Views returns a reader to iterate over all views in the database Views(ctx context.Context) (ViewsResponseReader, error) // ViewsAll returns all views in the database ViewsAll(ctx context.Context) ([]View, error) // CreateArangoSearchView creates a new view of type ArangoSearch, // with given name and options, and opens a connection to it. // If a view with given name already exists within the database, a ConflictError is returned. CreateArangoSearchView(ctx context.Context, name string, options *ArangoSearchViewProperties) (ArangoSearchView, error) // CreateArangoSearchAliasView creates ArangoSearch alias view with given name and options, and opens a connection to it. // If a view with given name already exists within the database, a ConflictError is returned. CreateArangoSearchAliasView(ctx context.Context, name string, options *ArangoSearchAliasViewProperties) (ArangoSearchViewAlias, error) }
type DeleteEdgeDefinitionOptions ¶ added in v2.1.0
type DeleteEdgeDefinitionResponse ¶ added in v2.1.0
type DeleteEdgeDefinitionResponse struct { shared.ResponseStruct `json:",inline"` // GraphDefinition contains the updated graph definition GraphDefinition *GraphDefinition `json:"graph,omitempty"` }
type DeleteEdgeOptions ¶ added in v2.1.0
type DeleteEdgeOptions struct { // Define if the request should wait until synced to disk. WaitForSync *bool `json:"waitForSync,omitempty"` // Define if a presentation of the deleted document should be returned within the response object. OldObject interface{} // Conditionally delete a Edge based on a target revision id // If the “If-Match” header is given, then it must contain exactly one ETag (_rev). IfMatch string // To make this operation a part of a Stream Transaction, set this header to the transaction ID returned by the // DatabaseTransaction.BeginTransaction() method. TransactionID string }
type DeleteVertexCollectionOptions ¶ added in v2.1.0
type DeleteVertexCollectionOptions struct { // Drop the collection as well. The collection is only dropped if it is not used in other graphs. DropCollection *bool }
type DeleteVertexCollectionResponse ¶ added in v2.1.0
type DeleteVertexCollectionResponse struct { shared.ResponseStruct `json:",inline"` // GraphDefinition contains the updated graph definition GraphDefinition *GraphDefinition `json:"graph,omitempty"` }
type DeleteVertexOptions ¶ added in v2.1.0
type DeleteVertexOptions struct { // Define if the request should wait until synced to disk. WaitForSync *bool `json:"waitForSync,omitempty"` // Define if a presentation of the deleted document should be returned within the response object. OldObject interface{} // Conditionally delete a vertex based on a target revision id // If the “If-Match” header is given, then it must contain exactly one ETag (_rev). IfMatch string // To make this operation a part of a Stream Transaction, set this header to the transaction ID returned by the // DatabaseTransaction.BeginTransaction() method. TransactionID string }
type DeployFoxxServiceRequest ¶ added in v2.1.5
type DeployFoxxServiceRequest struct {
FoxxDeploymentOptions `json:",inline"`
}
ImportDocumentRequest holds Query parameters for /import.
type DocumentID ¶
type DocumentID string
type DocumentMeta ¶
type DocumentMeta struct { Key string `json:"_key,omitempty"` ID DocumentID `json:"_id,omitempty"` Rev string `json:"_rev,omitempty"` }
DocumentMeta contains all meta data used to identify a document.
type DocumentMetaSlice ¶
type DocumentMetaSlice []DocumentMeta
DocumentMetaSlice is a slice of DocumentMeta elements
func (DocumentMetaSlice) IDs ¶
func (l DocumentMetaSlice) IDs() []DocumentID
IDs returns the ID's of all elements.
func (DocumentMetaSlice) Keys ¶
func (l DocumentMetaSlice) Keys() []string
Keys returns the keys of all elements.
func (DocumentMetaSlice) Revs ¶
func (l DocumentMetaSlice) Revs() []string
Revs returns the revisions of all elements.
type DocumentMetaWithOldRev ¶ added in v2.1.3
type DocumentMetaWithOldRev struct { DocumentMeta OldRev string `json:"_oldRev,omitempty"` }
type Edge ¶ added in v2.1.0
type Edge interface { GraphCollection // Name returns the name of the Edge collection Name() string // GetEdge Gets an Edge from the given collection. // To get _key and _rev values, embed the DocumentMeta struct in your result struct. GetEdge(ctx context.Context, key string, result interface{}, opts *GetEdgeOptions) error // CreateEdge Creates a new edge in the specified collection. // Within the body the edge has to contain a '_from' and '_to' value referencing to valid vertices in the graph. // Furthermore, the edge has to be valid according to the edge definitions. // To get _key and _rev values, embed the DocumentMeta struct in your result struct and pass to EdgeCreateResponse.New. CreateEdge(ctx context.Context, Edge interface{}, opts *CreateEdgeOptions) (EdgeCreateResponse, error) // UpdateEdge Partially modify the data of the specific edge in the collection. UpdateEdge(ctx context.Context, key string, newValue interface{}, opts *EdgeUpdateOptions) (EdgeUpdateResponse, error) // ReplaceEdge Replaces the data of an Edge in the collection. ReplaceEdge(ctx context.Context, key string, newValue interface{}, opts *EdgeReplaceOptions) (EdgeReplaceResponse, error) // DeleteEdge Removes an Edge from the collection. DeleteEdge(ctx context.Context, key string, opts *DeleteEdgeOptions) (EdgeDeleteResponse, error) }
type EdgeCreateResponse ¶ added in v2.1.0
type EdgeCreateResponse struct { DocumentMeta shared.ResponseStruct `json:",inline"` New interface{} }
type EdgeDefinition ¶ added in v2.1.0
type EdgeDefinition struct { // Name of the edge collection, where the edges are stored in. Collection string `json:"collection"` // List of vertex collection names. // Edges in a collection can only be inserted if their _to is in any of the collections here. To []string `json:"to"` // List of vertex collection names. // Edges in a collection can only be inserted if their _from is in any of the collections here. From []string `json:"from"` }
type EdgeDeleteResponse ¶ added in v2.1.0
type EdgeDeleteResponse struct { shared.ResponseStruct `json:",inline"` Old interface{} }
type EdgeDetails ¶ added in v2.1.1
type EdgeDetails struct { DocumentMeta From string `json:"_from"` To string `json:"_to"` Label string `json:"$label"` }
type EdgeDirection ¶ added in v2.1.1
type EdgeDirection string
const ( // EdgeDirectionIn selects inbound edges EdgeDirectionIn EdgeDirection = "in" // EdgeDirectionOut selects outbound edges EdgeDirectionOut EdgeDirection = "out" )
type EdgeReplaceOptions ¶ added in v2.1.0
type EdgeReplaceOptions struct { // Define if the request should wait until synced to disk. WaitForSync *bool // Define if a presentation of the new document should be returned within the response object. NewObject interface{} // Define if a presentation of the deleted document should be returned within the response object. OldObject interface{} // Define if values set to null should be stored. By default (true), the given documents attribute(s) // are set to null. If this parameter is set to false, top-level attribute and sub-attributes with a null value // in the request are removed from the document (but not attributes of objects that are nested inside of arrays). KeepNull *bool // Conditionally replace a Edge based on a target revision id // If the “If-Match” header is given, then it must contain exactly one ETag (_rev). IfMatch string // To make this operation a part of a Stream Transaction, set this header to the transaction ID returned by the // DatabaseTransaction.BeginTransaction() method. TransactionID string }
type EdgeReplaceResponse ¶ added in v2.1.0
type EdgeReplaceResponse struct { DocumentMeta shared.ResponseStruct `json:",inline"` Old, New interface{} }
type EdgeUpdateOptions ¶ added in v2.1.0
type EdgeUpdateOptions struct { // Define if the request should wait until synced to disk. WaitForSync *bool // Define if a presentation of the new document should be returned within the response object. NewObject interface{} // Define if a presentation of the deleted document should be returned within the response object. OldObject interface{} // Define if values set to null should be stored. By default (true), the given documents attribute(s) // are set to null. If this parameter is set to false, top-level attribute and sub-attributes with a null value // in the request are removed from the document (but not attributes of objects that are nested inside of arrays). KeepNull *bool // Conditionally update a Edge based on a target revision id // If the “If-Match” header is given, then it must contain exactly one ETag (_rev). IfMatch string // To make this operation a part of a Stream Transaction, set this header to the transaction ID returned by the // DatabaseTransaction.BeginTransaction() method. TransactionID string }
type EdgeUpdateResponse ¶ added in v2.1.0
type EdgeUpdateResponse struct { DocumentMeta shared.ResponseStruct `json:",inline"` Old, New interface{} }
type EngineInfo ¶
type EngineInfo struct {
Type EngineType `json:"name"`
}
EngineInfo contains information about the database engine being used.
type EngineType ¶
type EngineType string
EngineType indicates type of database engine being used.
func (EngineType) String ¶
func (t EngineType) String() string
type Engines ¶ added in v2.1.5
type Engines struct { // Arangodb specifies the required ArangoDB version range (semver format). Arangodb *string `json:"arangodb,omitempty"` }
Engines specifies the ArangoDB engine requirements for the Foxx service.
type ExplainQueryOptimizerOptions ¶
type ExplainQueryOptimizerOptions struct { // A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, // telling the optimizer to include or exclude specific rules. // To disable a rule, prefix its name with a "-", to enable a rule, prefix it with a "+". // There is also a pseudo-rule "all", which matches all optimizer rules. "-all" disables all rules. Rules []string `json:"rules,omitempty"` }
type ExplainQueryOptions ¶
type ExplainQueryOptions struct { // If set to true, all possible execution plans will be returned. // The default is false, meaning only the optimal plan will be returned. AllPlans bool `json:"allPlans,omitempty"` // An optional maximum number of plans that the optimizer is allowed to generate. // Setting this attribute to a low value allows to put a cap on the amount of work the optimizer does. MaxNumberOfPlans *int `json:"maxNumberOfPlans,omitempty"` // Options related to the query optimizer. Optimizer ExplainQueryOptimizerOptions `json:"optimizer,omitempty"` }
type ExplainQueryResult ¶
type ExplainQueryResult struct { Plan ExplainQueryResultPlan `json:"plan,omitempty"` Plans []ExplainQueryResultPlan `json:"plans,omitempty"` // List of warnings that occurred during optimization or execution plan creation Warnings []string `json:"warnings,omitempty"` // Info about optimizer statistics Stats ExplainQueryResultExecutionStats `json:"stats,omitempty"` // Cacheable states whether the query results can be cached on the server if the query result cache were used. // This attribute is not present when allPlans is set to true. Cacheable *bool `json:"cacheable,omitempty"` }
type ExplainQueryResultExecutionNodeRaw ¶
type ExplainQueryResultExecutionNodeRaw map[string]interface{}
type ExplainQueryResultExecutionStats ¶
type ExplainQueryResultExecutionStats struct { RulesExecuted int `json:"rulesExecuted,omitempty"` RulesSkipped int `json:"rulesSkipped,omitempty"` PlansCreated int `json:"plansCreated,omitempty"` PeakMemoryUsage uint64 `json:"peakMemoryUsage,omitempty"` ExecutionTime float64 `json:"executionTime,omitempty"` }
type ExplainQueryResultPlan ¶
type ExplainQueryResultPlan struct { // Execution nodes of the plan. NodesRaw []ExplainQueryResultExecutionNodeRaw `json:"nodes,omitempty"` // List of rules the optimizer applied Rules []string `json:"rules,omitempty"` // List of collections used in the query Collections []ExplainQueryResultExecutionCollection `json:"collections,omitempty"` // List of variables used in the query (note: this may contain internal variables created by the optimizer) Variables []ExplainQueryResultExecutionVariable `json:"variables,omitempty"` // The total estimated cost for the plan. If there are multiple plans, the optimizer will choose the plan with the lowest total cost EstimatedCost float64 `json:"estimatedCost,omitempty"` // The estimated number of results. EstimatedNrItems int `json:"estimatedNrItems,omitempty"` }
type Flags ¶ added in v2.1.5
type Flags struct { // CanBeDisabled indicates whether the query can be disabled. CanBeDisabled *bool `json:"canBeDisabled,omitempty"` // CanBeExecuted indicates whether the query can be executed. CanCreateAdditionalPlans *bool `json:"canCreateAdditionalPlans,omitempty"` //ClusterOnly indicates whether the query is only available in a cluster environment. ClusterOnly *bool `json:"clusterOnly,omitempty"` // DisabledByDefault indicates whether the query is disabled by default. // This means that the query is not executed unless explicitly enabled. DisabledByDefault *bool `json:"disabledByDefault,omitempty"` // EnterpriseOnly indicates whether the query is only available in the Enterprise Edition. EnterpriseOnly *bool `json:"enterpriseOnly,omitempty"` // Hidden indicates whether the query is hidden from the user. Hidden *bool `json:"hidden,omitempty"` }
type FoxxDeleteOptions ¶ added in v2.1.4
type FoxxDeploymentOptions ¶ added in v2.1.5
type FoxxDeploymentOptions struct {
Mount *string
}
type FoxxServiceListItem ¶ added in v2.1.5
type FoxxServiceListItem struct { CommonFoxxServiceFields // Provides lists the capabilities or interfaces the service provides. // This is a flexible map that may contain metadata like API contracts or service roles. Provides map[string]interface{} `json:"provides"` }
FoxxServiceListItem represents a single Foxx service installed in an ArangoDB database.
type FoxxServiceObject ¶ added in v2.1.5
type FoxxServiceObject struct { // Common fields for all Foxx services. CommonFoxxServiceFields // Path is the local filesystem path where the service is installed. Path *string `json:"path,omitempty"` // Manifest contains the normalized manifest.json of the service. Manifest *Manifest `json:"manifest,omitempty"` // Options contains optional runtime options defined for the service. Options map[string]interface{} `json:"options,omitempty"` }
FoxxServiceObject is the top-level response object for a Foxx service details request.
type FoxxTestOptions ¶ added in v2.1.5
type FoxxTestOptions struct { FoxxDeploymentOptions Reporter *string `json:"reporter,omitempty"` Idiomatic *bool `json:"idiomatic,omitempty"` Filter *string `json:"filter,omitempty"` }
type GetCollectionOptions ¶ added in v2.1.0
type GetCollectionOptions struct { // SkipExistCheck skips checking if collection exists SkipExistCheck bool `json:"skipExistCheck,omitempty"` }
type GetDatabaseOptions ¶ added in v2.1.0
type GetDatabaseOptions struct { // SkipExistCheck skips checking if database exists SkipExistCheck bool `json:"skipExistCheck,omitempty"` }
GetDatabaseOptions contains options that customize the getting of a database.
type GetEdgeOptions ¶ added in v2.1.0
type GetEdgeOptions struct { // Must contain a revision. If this is set, a document is only returned if it has exactly this revision. // Also see if-match header as an alternative to this. Rev string `json:"rev,omitempty"` // If the “If-Match” header is given, then it must contain exactly one ETag (_rev). // The document is returned, if it has the same revision as the given ETag IfMatch string // If the “If-None-Match” header is given, then it must contain exactly one ETag (_rev). // The document is returned, if it has a different revision than the given ETag IfNoneMatch string // To make this operation a part of a Stream Transaction, set this header to the transaction ID returned by the // DatabaseTransaction.BeginTransaction() method. TransactionID string }
type GetEdgesOptions ¶ added in v2.1.1
type GetEdgesOptions struct { // The direction of the edges. Allowed values are "in" and "out". If not set, edges in both directions are returned. Direction EdgeDirection `json:"direction,omitempty"` // Set this to true to allow the Coordinator to ask any shard replica for the data, not only the shard leader. // This may result in “dirty reads”. AllowDirtyReads *bool `json:"-"` }
type GetGraphOptions ¶ added in v2.1.0
type GetGraphOptions struct { // SkipExistCheck skips checking if graph exists SkipExistCheck bool `json:"skipExistCheck,omitempty"` }
type GetVersionOptions ¶ added in v2.0.3
type GetVersionOptions struct { // If true, additional details will be returned in response // Default false Details *bool }
type GetVertexOptions ¶ added in v2.1.0
type GetVertexOptions struct { // Must contain a revision. If this is set, a document is only returned if it has exactly this revision. // Also see if-match header as an alternative to this. Rev string `json:"rev,omitempty"` // If the “If-Match” header is given, then it must contain exactly one ETag (_rev). // The document is returned, if it has the same revision as the given ETag IfMatch string // If the “If-None-Match” header is given, then it must contain exactly one ETag (_rev). // The document is returned, if it has a different revision than the given ETag IfNoneMatch string // To make this operation a part of a Stream Transaction, set this header to the transaction ID returned by the // DatabaseTransaction.BeginTransaction() method. TransactionID string }
type Grant ¶ added in v2.1.0
type Grant string
Grant specifies access rights for an object
const ( // GrantReadWrite indicates read/write access to an object GrantReadWrite Grant = "rw" // GrantReadOnly indicates read-only access to an object GrantReadOnly Grant = "ro" // GrantNone indicates no access to an object GrantNone Grant = "none" // GrantUndefined indicates undefined access to an object (read-only operation) GrantUndefined Grant = "undefined" )
type Graph ¶ added in v2.1.0
type Graph interface { // Name returns the name of the graph. Name() string // IsSmart Whether the graph is a SmartGraph (Enterprise Edition only). IsSmart() bool // IsSatellite Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not. IsSatellite() bool // IsDisjoint Whether the graph is a Disjoint SmartGraph (Enterprise Edition only). IsDisjoint() bool // EdgeDefinitions returns the edge definitions of the graph. EdgeDefinitions() []EdgeDefinition // SmartGraphAttribute of the sharding attribute in the SmartGraph case (Enterprise Edition only). SmartGraphAttribute() string // NumberOfShards Number of shards created for every new collection in the graph. NumberOfShards() *int // OrphanCollections An array of additional vertex collections. // Documents in these collections do not have edges within this graph. OrphanCollections() []string // ReplicationFactor The replication factor used for every new collection in the graph. // For SatelliteGraphs, it is the string "satellite" (Enterprise Edition only). ReplicationFactor() int // WriteConcern The default write concern for new collections in the graph. It determines how many copies of each shard // are required to be in sync on the different DB-Servers. If there are less than these many copies in the cluster, // a shard refuses to write. Writes to shards with enough up-to-date copies succeed at the same time, however. // The value of writeConcern cannot be greater than replicationFactor. For SatelliteGraphs, the writeConcern is // automatically controlled to equal the number of DB-Servers and the attribute is not available. (cluster only) WriteConcern() *int // Remove the entire graph with options. Remove(ctx context.Context, opts *RemoveGraphOptions) error // GraphVertexCollections - Vertex collection functions GraphVertexCollections // GraphEdgesDefinition - Edge collection functions GraphEdgesDefinition }
Graph provides access to all edge & vertex collections of a single graph in a database.
type GraphCollection ¶ added in v2.1.3
type GraphCollection interface { Name() string Database() Database // Count fetches the number of document in the collection. Count(ctx context.Context) (int64, error) CollectionDocuments CollectionIndexes }
type GraphDefinition ¶ added in v2.1.0
type GraphDefinition struct { Name string `json:"name"` // IsSmart Whether the graph is a SmartGraph (Enterprise Edition only). IsSmart bool `json:"isSmart"` // IsSatellite Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not. IsSatellite bool `json:"isSatellite"` // IsDisjoint Whether the graph is a Disjoint SmartGraph (Enterprise Edition only). IsDisjoint bool `json:"isDisjoint,omitempty"` // EdgeDefinitions An array of definitions for the relations of the graph EdgeDefinitions []EdgeDefinition `json:"edgeDefinitions,omitempty"` // NumberOfShards Number of shards created for every new collection in the graph. // For Satellite Graphs, it has to be set to 1 NumberOfShards *int `json:"numberOfShards,omitempty"` // OrphanCollections An array of additional vertex collections. // Documents in these collections do not have edges within this graph. OrphanCollections []string `json:"orphanCollections,omitempty"` // WriteConcern The default write concern for new collections in the graph. It determines how many copies of each shard // are required to be in sync on the different DB-Servers. If there are less than these many copies in the cluster, // a shard refuses to write. Writes to shards with enough up-to-date copies succeed at the same time, however. // The value of writeConcern cannot be greater than replicationFactor. For SatelliteGraphs, the writeConcern is // automatically controlled to equal the number of DB-Servers and the attribute is not available. (cluster only) WriteConcern *int `json:"writeConcern,omitempty"` // ReplicationFactor The replication factor used for every new collection in the graph. // For SatelliteGraphs, it is the string "satellite" (Enterprise Edition only). ReplicationFactor graphReplicationFactor `json:"replicationFactor,omitempty"` // SmartGraphAttribute of the sharding attribute in the SmartGraph case (Enterprise Edition only). SmartGraphAttribute string `json:"smartGraphAttribute,omitempty"` }
type GraphEdgesDefinition ¶ added in v2.1.0
type GraphEdgesDefinition interface { // EdgeDefinition opens a connection to an existing Edge collection within the graph. // If no Edge collection with given name exists, an NotFoundError is returned. // Note: When calling Remove on the returned Collection, the collection is removed from the graph. Not from the database. EdgeDefinition(ctx context.Context, collection string) (Edge, error) // EdgeDefinitionExists returns true if an Edge collection with given name exists within the graph. EdgeDefinitionExists(ctx context.Context, collection string) (bool, error) // GetEdgeDefinitions returns all Edge collections of this graph // Note: When calling Remove on any of the returned Collection's, the collection is removed from the graph. Not from the database. GetEdgeDefinitions(ctx context.Context) ([]Edge, error) // CreateEdgeDefinition creates an Edge collection in the graph // This edge definition has to contain a 'collection' and an array of each 'from' and 'to' vertex collections. // An edge definition can only be added if this definition is either not used in any other graph, or it is used // with exactly the same definition. // For example, it is not possible to store a definition “e” from “v1” to “v2” in one graph, // and “e” from “v2” to “v1” in another graph, but both can have “e” from “v1” to “v2”. CreateEdgeDefinition(ctx context.Context, collection string, from, to []string, opts *CreateEdgeDefinitionOptions) (CreateEdgeDefinitionResponse, error) // ReplaceEdgeDefinition Change one specific edge definition. // This modifies all occurrences of this definition in all graphs known to your database. ReplaceEdgeDefinition(ctx context.Context, collection string, from, to []string, opts *ReplaceEdgeOptions) (ReplaceEdgeDefinitionResponse, error) // DeleteEdgeDefinition Remove one edge definition from the graph. // This only removes the edge collection from the graph definition. // The vertex collections of the edge definition become orphan collections, // but otherwise remain untouched and can still be used in your queries. DeleteEdgeDefinition(ctx context.Context, collection string, opts *DeleteEdgeDefinitionOptions) (DeleteEdgeDefinitionResponse, error) }
type GraphVertexCollections ¶ added in v2.1.0
type GraphVertexCollections interface { // VertexCollection opens a connection to an existing vertex-collection within the graph. // If no vertex-collection with given name exists, an NotFoundError is returned. // Note: When calling Remove on the returned Collection, the collection is removed from the graph. Not from the database. VertexCollection(ctx context.Context, name string) (VertexCollection, error) // VertexCollectionExists returns true if a vertex-collection with given name exists within the graph. VertexCollectionExists(ctx context.Context, name string) (bool, error) // VertexCollections returns all vertex collections of this graph // Note: When calling Remove on any of the returned Collection's, the collection is removed from the graph. Not from the database. VertexCollections(ctx context.Context) ([]VertexCollection, error) // CreateVertexCollection creates a vertex collection in the graph CreateVertexCollection(ctx context.Context, name string, opts *CreateVertexCollectionOptions) (CreateVertexCollectionResponse, error) // DeleteVertexCollection Removes a vertex collection from the list of the graph’s orphan collections. // It can optionally delete the collection if it is not used in any other graph. // You cannot remove vertex collections that are used in one of the edge definitions of the graph. // You need to modify or remove the edge definition first to fully remove a vertex collection from the graph. DeleteVertexCollection(ctx context.Context, name string, opts *DeleteVertexCollectionOptions) (DeleteVertexCollectionResponse, error) }
type GraphsResponseReader ¶ added in v2.1.0
type IndexOptions ¶
type IndexOptions struct { // Fields returns a list of attributes of this index. Fields []string `json:"fields,omitempty"` // Estimates determines if the to-be-created index should maintain selectivity estimates or not - PersistentIndex only Estimates *bool `json:"estimates,omitempty"` // SelectivityEstimate determines the selectivity estimate value of the index - PersistentIndex only SelectivityEstimate float64 `json:"selectivityEstimate,omitempty"` // MinLength returns min length for this index if set. MinLength *int `json:"minLength,omitempty"` // Deduplicate returns deduplicate setting of this index. Deduplicate *bool `json:"deduplicate,omitempty"` // ExpireAfter returns an expiry after for this index if set. ExpireAfter *int `json:"expireAfter,omitempty"` // CacheEnabled if true, then the index will be cached in memory. Caching is turned off by default. CacheEnabled *bool `json:"cacheEnabled,omitempty"` // StoredValues returns a list of stored values for this index - PersistentIndex only StoredValues []string `json:"storedValues,omitempty"` // GeoJSON returns if geo json was set for this index or not. GeoJSON *bool `json:"geoJson,omitempty"` // LegacyPolygons returns if legacy polygons was set for this index or not before 3.10 - GeoIndex only LegacyPolygons *bool `json:"legacyPolygons,omitempty"` }
IndexOptions contains the information about a regular index type
type IndexResponse ¶
type IndexResponse struct { // Name optional user defined name used for hints in AQL queries Name string `json:"name,omitempty"` // Type returns the type of the index Type IndexType `json:"type"` // RegularIndex is the regular index object. It is empty for the InvertedIndex type. RegularIndex *IndexOptions `json:"indexes"` // InvertedIndex is the inverted index object. It is not empty only for InvertedIndex type. InvertedIndex *InvertedIndexOptions `json:"invertedIndexes"` }
IndexResponse is the response from the Index list method
func (*IndexResponse) UnmarshalJSON ¶
func (i *IndexResponse) UnmarshalJSON(data []byte) error
type IndexSharedOptions ¶
type IndexSharedOptions struct { string `json:"id,omitempty"` // Only the attributes in fields are checked for uniqueness. // Any attributes in from storedValues are not checked for their uniqueness. Unique *bool `json:"unique,omitempty"` // The inverted, fulltext, and geo index types are sparse by definition. Sparse *bool `json:"sparse,omitempty"` IsNewlyCreated *bool `json:"isNewlyCreated,omitempty"` }ID
IndexSharedOptions contains options that are shared between all index types
type InventoryCollection ¶ added in v2.1.0
type InventoryCollection struct { Parameters InventoryCollectionParameters `json:"parameters"` Indexes []InventoryIndex `json:"indexes,omitempty"` PlanVersion int64 `json:"planVersion,omitempty"` IsReady bool `json:"isReady,omitempty"` AllInSync bool `json:"allInSync,omitempty"` }
type InventoryCollectionParameters ¶ added in v2.1.0
type InventoryCollectionParameters struct { Deleted bool `json:"deleted,omitempty"` Shards map[ShardID][]ServerID `json:"shards,omitempty"` PlanID string `json:"planId,omitempty"` CollectionProperties }
type InventoryIndex ¶ added in v2.1.0
type InventoryIndex struct { ID string `json:"id,omitempty"` Type string `json:"type,omitempty"` Fields []string `json:"fields,omitempty"` Unique bool `json:"unique"` Sparse bool `json:"sparse"` Deduplicate bool `json:"deduplicate"` MinLength int `json:"minLength,omitempty"` GeoJSON bool `json:"geoJson,omitempty"` Name string `json:"name,omitempty"` ExpireAfter int `json:"expireAfter,omitempty"` Estimates bool `json:"estimates,omitempty"` FieldValueTypes string `json:"fieldValueTypes,omitempty"` CacheEnabled *bool `json:"cacheEnabled,omitempty"` }
type InventoryView ¶ added in v2.1.0
type InvertedIndexField ¶
type InvertedIndexField struct { // Name (Required) An attribute path. The '.' character denotes sub-attributes. Name string `json:"name"` // Analyzer indicating the name of an analyzer instance // Default: the value defined by the top-level analyzer option, or if not set, the default identity Analyzer. Analyzer string `json:"analyzer,omitempty"` // Features is a list of Analyzer features to use for this field. They define what features are enabled for the analyzer Features []ArangoSearchFeature `json:"features,omitempty"` // IncludeAllFields This option only applies if you use the inverted index in a search-alias Views. // If set to true, then all sub-attributes of this field are indexed, excluding any sub-attributes that are configured separately by other elements in the fields array (and their sub-attributes). The analyzer and features properties apply to the sub-attributes. // If set to false, then sub-attributes are ignored. The default value is defined by the top-level includeAllFields option, or false if not set. IncludeAllFields *bool `json:"includeAllFields,omitempty"` // SearchField This option only applies if you use the inverted index in a search-alias Views. // You can set the option to true to get the same behavior as with arangosearch Views regarding the indexing of array values for this field. If enabled, both, array and primitive values (strings, numbers, etc.) are accepted. Every element of an array is indexed according to the trackListPositions option. // If set to false, it depends on the attribute path. If it explicitly expand an array ([*]), then the elements are indexed separately. Otherwise, the array is indexed as a whole, but only geopoint and aql Analyzers accept array inputs. You cannot use an array expansion if searchField is enabled. // Default: the value defined by the top-level searchField option, or false if not set. SearchField *bool `json:"searchField,omitempty"` // TrackListPositions This option only applies if you use the inverted index in a search-alias Views. // If set to true, then track the value position in arrays for array values. For example, when querying a document like { attr: [ "valueX", "valueY", "valueZ" ] }, you need to specify the array element, e.g. doc.attr[1] == "valueY". // If set to false, all values in an array are treated as equal alternatives. You don’t specify an array element in queries, e.g. doc.attr == "valueY", and all elements are searched for a match. // Default: the value defined by the top-level trackListPositions option, or false if not set. TrackListPositions bool `json:"trackListPositions,omitempty"` // Cache - Enable this option to always cache the field normalization values in memory for this specific field // Default: the value defined by the top-level 'cache' option. Cache *bool `json:"cache,omitempty"` // Nested Index the specified sub-objects that are stored in an array. // Other than with the fields property, the values get indexed in a way that lets you query for co-occurring values. // For example, you can search the sub-objects and all the conditions need to be met by a single sub-object instead of across all of them. // Enterprise-only feature Nested []InvertedIndexNestedField `json:"nested,omitempty"` }
InvertedIndexField contains configuration for indexing of the field
type InvertedIndexNestedField ¶
type InvertedIndexNestedField struct { // Name An attribute path. The . character denotes sub-attributes. Name string `json:"name"` // Analyzer indicating the name of an analyzer instance // Default: the value defined by the top-level analyzer option, or if not set, the default identity Analyzer. Analyzer string `json:"analyzer,omitempty"` // Features is a list of Analyzer features to use for this field. They define what features are enabled for the analyzer Features []ArangoSearchFeature `json:"features,omitempty"` // SearchField This option only applies if you use the inverted index in a search-alias Views. // You can set the option to true to get the same behavior as with arangosearch Views regarding the indexing of array values for this field. If enabled, both, array and primitive values (strings, numbers, etc.) are accepted. Every element of an array is indexed according to the trackListPositions option. // If set to false, it depends on the attribute path. If it explicitly expand an array ([*]), then the elements are indexed separately. Otherwise, the array is indexed as a whole, but only geopoint and aql Analyzers accept array inputs. You cannot use an array expansion if searchField is enabled. // Default: the value defined by the top-level searchField option, or false if not set. SearchField *bool `json:"searchField,omitempty"` // Cache - Enable this option to always cache the field normalization values in memory for this specific field // Default: the value defined by the top-level 'cache' option. Cache *bool `json:"cache,omitempty"` // Nested - Index the specified sub-objects that are stored in an array. // Other than with the fields property, the values get indexed in a way that lets you query for co-occurring values. // For example, you can search the sub-objects and all the conditions need to be met by a single sub-object instead of across all of them. // Enterprise-only feature Nested []InvertedIndexNestedField `json:"nested,omitempty"` }
InvertedIndexNestedField contains sub-object configuration for indexing of the field
type InvertedIndexOptions ¶
type InvertedIndexOptions struct { // Name optional user defined name used for hints in AQL queries Name string `json:"name,omitempty"` // Fields contains the properties for individual fields of the element. // The key of the map are field names. // Required: true Fields []InvertedIndexField `json:"fields,omitempty"` // SearchField This option only applies if you use the inverted index in a search-alias Views. // You can set the option to true to get the same behavior as with arangosearch Views regarding the indexing of array values as the default. // If enabled, both, array and primitive values (strings, numbers, etc.) are accepted. Every element of an array is indexed according to the trackListPositions option. // If set to false, it depends on the attribute path. If it explicitly expand an array ([*]), then the elements are indexed separately. // Otherwise, the array is indexed as a whole, but only geopoint and aql Analyzers accept array inputs. // You cannot use an array expansion if searchField is enabled. SearchField *bool `json:"searchField,omitempty"` // Cache - Enable this option to always cache the field normalization values in memory for all fields by default. Cache *bool `json:"cache,omitempty"` // StoredValues The optional storedValues attribute can contain an array of paths to additional attributes to store in the index. // These additional attributes cannot be used for index lookups or for sorting, but they can be used for projections. // This allows an index to fully cover more queries and avoid extra document lookups. StoredValues []StoredValue `json:"storedValues,omitempty"` // PrimarySort You can define a primary sort order to enable an AQL optimization. // If a query iterates over all documents of a collection, wants to sort them by attribute values, and the (left-most) fields to sort by, // as well as their sorting direction, match with the primarySort definition, then the SORT operation is optimized away. PrimarySort *PrimarySort `json:"primarySort,omitempty"` // PrimaryKeyCache Enable this option to always cache the primary key column in memory. // This can improve the performance of queries that return many documents. PrimaryKeyCache *bool `json:"primaryKeyCache,omitempty"` // Analyzer The name of an Analyzer to use by default. This Analyzer is applied to the values of the indexed // fields for which you don’t define Analyzers explicitly. Analyzer string `json:"analyzer,omitempty"` // Features list of analyzer features. You can set this option to overwrite what features are enabled for the default analyzer Features []ArangoSearchFeature `json:"features,omitempty"` // IncludeAllFields If set to true, all fields of this element will be indexed. Defaults to false. // Warning: Using includeAllFields for a lot of attributes in combination with complex Analyzers // may significantly slow down the indexing process. IncludeAllFields *bool `json:"includeAllFields,omitempty"` // TrackListPositions track the value position in arrays for array values. TrackListPositions bool `json:"trackListPositions,omitempty"` // Parallelism - The number of threads to use for indexing the fields. Default: 2 Parallelism *int `json:"parallelism,omitempty"` // CleanupIntervalStep Wait at least this many commits between removing unused files in the ArangoSearch data directory // (default: 2, to disable use: 0). CleanupIntervalStep *int64 `json:"cleanupIntervalStep,omitempty"` // CommitIntervalMsec Wait at least this many milliseconds between committing View data store changes and making // documents visible to queries (default: 1000, to disable use: 0). CommitIntervalMsec *int64 `json:"commitIntervalMsec,omitempty"` // ConsolidationIntervalMsec Wait at least this many milliseconds between applying ‘consolidationPolicy’ to consolidate View data store // and possibly release space on the filesystem (default: 1000, to disable use: 0). ConsolidationIntervalMsec *int64 `json:"consolidationIntervalMsec,omitempty"` // ConsolidationPolicy The consolidation policy to apply for selecting which segments should be merged (default: {}). ConsolidationPolicy *ConsolidationPolicy `json:"consolidationPolicy,omitempty"` // WriteBufferIdle Maximum number of writers (segments) cached in the pool (default: 64, use 0 to disable) WriteBufferIdle *int64 `json:"writebufferIdle,omitempty"` // WriteBufferActive Maximum number of concurrent active writers (segments) that perform a transaction. // Other writers (segments) wait till current active writers (segments) finish (default: 0, use 0 to disable) WriteBufferActive *int64 `json:"writebufferActive,omitempty"` // WriteBufferSizeMax Maximum memory byte size per writer (segment) before a writer (segment) flush is triggered. // 0 value turns off this limit for any writer (buffer) and data will be flushed periodically based on the value defined for the flush thread (ArangoDB server startup option). // 0 value should be used carefully due to high potential memory consumption (default: 33554432, use 0 to disable) WriteBufferSizeMax *int64 `json:"writebufferSizeMax,omitempty"` // OptimizeTopK is an array of strings defining optimized sort expressions. // Introduced in v3.11.0, Enterprise Edition only. OptimizeTopK []string `json:"optimizeTopK,omitempty"` // InBackground You can set this option to true to create the index in the background, // which will not write-lock the underlying collection for as long as if the index is built in the foreground. // The default value is false. InBackground *bool `json:"inBackground,omitempty"` }
InvertedIndexOptions provides specific options for creating an inverted index
type KeyGeneratorType ¶
type KeyGeneratorType string
KeyGeneratorType is a type of key generated, used in `CollectionKeyOptions`.
type KeyGeneratorsResponse ¶ added in v2.1.5
type KeyGeneratorsResponse struct {
KeyGenerators []string `json:"keyGenerators"`
}
type License ¶ added in v2.0.3
type License struct { // Features describe properties of the license. Features LicenseFeatures `json:"features"` // License is an encrypted license key in Base64 encoding. License string `json:"license,omitempty"` // Status is a status of a license. Status LicenseStatus `json:"status,omitempty"` // Version is a version of a license. Version int `json:"version"` // Hash The hash value of the license. Hash string `json:"hash,omitempty"` }
License describes license information.
type LicenseFeatures ¶ added in v2.0.3
type LicenseFeatures struct { // Expires is expiry date as Unix timestamp (seconds since January 1st, 1970 UTC). Expires int `json:"expires"` }
LicenseFeatures describes license's features.
type LicenseStatus ¶ added in v2.0.3
type LicenseStatus string
LicenseStatus describes license's status.
const ( // LicenseStatusGood - The license is valid for more than 2 weeks. LicenseStatusGood LicenseStatus = "good" // LicenseStatusExpired - The license has expired. In this situation, no new Enterprise Edition features can be utilized. LicenseStatusExpired LicenseStatus = "expired" // LicenseStatusExpiring - The license is valid for less than 2 weeks. LicenseStatusExpiring LicenseStatus = "expiring" // LicenseStatusReadOnly - The license is expired over 2 weeks. The instance is now restricted to read-only mode. LicenseStatusReadOnly LicenseStatus = "read-only" )
type ListBackupsResponse ¶ added in v2.1.0
type ListBackupsResponse struct { Server string `json:"server,omitempty"` Backups map[string]BackupMeta `json:"list,omitempty"` }
type LogLevelsGetOptions ¶
type LogLevelsGetOptions struct { // serverID describes log levels for a specific server ID. ServerID ServerID }
LogLevelsGetOptions describes log levels get options.
type LogLevelsSetOptions ¶
type LogLevelsSetOptions struct { // serverID describes log levels for a specific server ID. ServerID ServerID }
LogLevelsSetOptions describes log levels set options.
type MDIFieldType ¶ added in v2.1.0
type MDIFieldType string
const MDIDoubleFieldType MDIFieldType = "double"
type Manifest ¶ added in v2.1.5
type Manifest struct { // Schema is the JSON schema URL for the manifest structure. Schema *string `json:"$schema,omitempty"` // Name is the name of the Foxx service. Name *string `json:"name,omitempty"` // Version is the service's semantic version. Version *string `json:"version,omitempty"` // License is the license identifier (e.g., "Apache-2.0"). License *string `json:"license,omitempty"` // Repository contains details about the service's source repository. Repository *Repository `json:"repository,omitempty"` // Author is the main author of the service. Author *string `json:"author,omitempty"` // Contributors is a list of people who contributed to the service. Contributors []*Contributor `json:"contributors,omitempty"` // Description provides a human-readable explanation of the service. Description *string `json:"description,omitempty"` // Engines specifies the engine requirements for running the service. Engines *Engines `json:"engines,omitempty"` // DefaultDocument specifies the default document to serve (e.g., "index.html"). DefaultDocument *string `json:"defaultDocument,omitempty"` // Main specifies the main entry point JavaScript file of the service. Main *string `json:"main,omitempty"` // Configuration contains service-specific configuration options. Configuration map[string]interface{} `json:"configuration,omitempty"` // Dependencies defines other services or packages this service depends on. Dependencies map[string]interface{} `json:"dependencies,omitempty"` // Files maps URL paths to static files or directories included in the service. Files map[string]interface{} `json:"files,omitempty"` // Scripts contains script definitions for service lifecycle hooks or tasks. Scripts map[string]interface{} `json:"scripts,omitempty"` }
Manifest represents the normalized manifest.json of the Foxx service.
type NumberOfServersResponse ¶ added in v2.1.0
type OptimizerRules ¶ added in v2.1.5
type PrimarySort ¶
type PrimarySort struct { // Fields (Required) - An array of the fields to sort the index by and the direction to sort each field in. Fields []PrimarySortEntry `json:"fields,omitempty"` // Compression Defines how to compress the primary sort data Compression PrimarySortCompression `json:"compression,omitempty"` // Cache - Enable this option to always cache the primary sort columns in memory. // This can improve the performance of queries that utilize the primary sort order. Cache *bool `json:"cache,omitempty"` }
PrimarySort defines compression and list of fields to be sorted
type PrimarySortCompression ¶
type PrimarySortCompression string
PrimarySortCompression Defines how to compress the primary sort data (introduced in v3.7.1)
const ( // PrimarySortCompressionLz4 (default): use LZ4 fast compression. PrimarySortCompressionLz4 PrimarySortCompression = "lz4" // PrimarySortCompressionNone disable compression to trade space for speed. PrimarySortCompressionNone PrimarySortCompression = "none" )
type PrimarySortEntry ¶
type PrimarySortEntry struct { // Field An attribute path. The . character denotes sub-attributes. Field string `json:"field,required"` // Ascending The sorting direction Ascending bool `json:"asc,required"` }
PrimarySortEntry field to sort the index by and the direction
type QueryCacheEntriesRespObject ¶ added in v2.1.5
type QueryCacheEntriesRespObject struct { CacheRespObject `json:",inline"` // Result is the number of documents in the query result. Results *uint32 `json:"results,omitempty"` // RunTime is the time it took to execute the query in seconds. RunTime string `json:"runTime,omitempty"` // Size is the size of the query result in bytes. Size *uint64 `json:"size,omitempty"` // Started is the time when the query has been started. // Date and time at which the query result has been added to the cache. Started *string `json:"started,omitempty"` }
type QueryCacheProperties ¶ added in v2.1.5
type QueryCacheProperties struct { // IncludeSystem indicates whether the query cache includes system collections. IncludeSystem *bool `json:"includeSystem,omitempty"` // MaxEntrySize is the maximum size of a single query cache entry in bytes. MaxEntrySize *uint64 `json:"maxEntrySize,omitempty"` // MaxResults is the maximum number of results that can be stored in the query cache. MaxResults *uint16 `json:"maxResults,omitempty"` // MaxResultsSize is the maximum size of the query cache in bytes. MaxResultsSize *uint64 `json:"maxResultsSize,omitempty"` // Mode is the query cache mode. // The mode can be one of the following values: // "on" - the query cache is enabled and will be used for all queries. // "off" - the query cache is disabled and will not be used for any queries. // "demand" - the query cache is enabled, but will only be used for queries that explicitly request it. Mode *string `json:"mode,omitempty"` }
type QueryOptions ¶
type QueryOptions struct { // Set this to true to allow the Coordinator to ask any shard replica for the data, not only the shard leader. // This may result in “dirty reads”. // This option is ignored if this operation is part of a DatabaseTransaction (TransactionID option). // The header set when creating the transaction decides about dirty reads for the entire transaction, // not the individual read operations. AllowDirtyReads *bool `json:"-"` // To make this operation a part of a Stream Transaction, set this header to the transaction ID returned by the // DatabaseTransaction.BeginTransaction() method. TransactionID string `json:"-"` // Indicates whether the number of documents in the result set should be returned in the "count" attribute of the result. // Calculating the "count" attribute might have a performance impact for some queries in the future so this option is // turned off by default, and "count" is only returned when requested. Count bool `json:"count,omitempty"` // maximum number of result documents to be transferred from the server to the client in one roundtrip. // If this attribute is not set, a server-controlled default value will be used. A batchSize value of 0 is disallowed. BatchSize int `json:"batchSize,omitempty"` // flag to determine whether the AQL query cache shall be used. If set to false, then any query cache lookup // will be skipped for the query. If set to true, it will lead to the query cache being checked for the query // if the query cache mode is either on or demand. Cache bool `json:"cache,omitempty"` // the maximum number of memory (measured in bytes) that the query is allowed to use. If set, then the query will fail // with error "resource limit exceeded" in case it allocates too much memory. A value of 0 indicates that there is no memory limit. MemoryLimit int64 `json:"memoryLimit,omitempty"` // The time-to-live for the cursor (in seconds). The cursor will be removed on the server automatically after the specified // amount of time. This is useful to ensure garbage collection of cursors that are not fully fetched by clients. // If not set, a server-defined value will be used. TTL float64 `json:"ttl,omitempty"` // key/value pairs representing the bind parameters. BindVars map[string]interface{} `json:"bindVars,omitempty"` Options QuerySubOptions `json:"options,omitempty"` }
type QueryPlanCacheRespObject ¶ added in v2.1.5
type QueryPlanCacheRespObject struct { CacheRespObject `json:",inline"` // QueryHash is the hash of the AQL query string. QueryHash *uint32 `json:"queryHash,omitempty"` // FullCount indicates whether the query result contains the full count of documents. FullCount *bool `json:"fullCount,omitempty"` // Created is the time when the query plan has been added to the cache. Created *string `json:"created,omitempty"` // MemoryUsage is the memory usage of the cached plan in bytes. // This is the amount of memory used by the cached plan on the server. MemoryUsage *uint64 `json:"memoryUsage,omitempty"` }
type QueryProperties ¶ added in v2.1.5
type QueryProperties struct { Enabled *bool `json:"enabled"` TrackSlowQueries *bool `json:"trackSlowQueries"` TrackBindVars *bool `json:"trackBindVars"` MaxSlowQueries *int `json:"maxSlowQueries"` SlowQueryThreshold *float64 `json:"slowQueryThreshold"` MaxQueryStringLength *int `json:"maxQueryStringLength"` }
type QueryRequest ¶
type QueryRequest struct {
Query string `json:"query"`
}
type QuerySubOptions ¶
type QuerySubOptions struct { // If you set this option to true and execute the query against a cluster deployment, then the Coordinator is // allowed to read from any shard replica and not only from the leader. // You may observe data inconsistencies (dirty reads) when reading from followers, namely obsolete revisions of // documents because changes have not yet been replicated to the follower, as well as changes to documents before // they are officially committed on the leader. // //This feature is only available in the Enterprise Edition. AllowDirtyReads bool `json:"allowDirtyReads,omitempty"` // AllowRetry If set to `true`, ArangoDB will store cursor results in such a way // that batch reads can be retried in the case of a communication error. AllowRetry bool `json:"allowRetry,omitempty"` // When set to true, the query will throw an exception and abort instead of producing a warning. // This option should be used during development to catch potential issues early. // When the attribute is set to false, warnings will not be propagated to exceptions and will be returned // with the query result. There is also a server configuration option --query.fail-on-warning for setting // the default value for failOnWarning so it does not need to be set on a per-query level. FailOnWarning *bool `json:"failOnWarning,omitempty"` // If set to true or not specified, this will make the query store the data it reads via the RocksDB storage engine // in the RocksDB block cache. This is usually the desired behavior. The option can be set to false for queries that // are known to either read a lot of data which would thrash the block cache, or for queries that read data which // are known to be outside of the hot set. By setting the option to false, data read by the query will not make it // into the RocksDB block cache if not already in there, thus leaving more room for the actual hot set. FillBlockCache bool `json:"fillBlockCache,omitempty"` // if set to true and the query contains a LIMIT clause, then the result will have an extra attribute with the sub-attributes // stats and fullCount, { ... , "extra": { "stats": { "fullCount": 123 } } }. The fullCount attribute will contain the number // of documents in the result before the last LIMIT in the query was applied. It can be used to count the number of documents // that match certain filter criteria, but only return a subset of them, in one go. It is thus similar to MySQL's SQL_CALC_FOUND_ROWS hint. // Note that setting the option will disable a few LIMIT optimizations and may lead to more documents being processed, and // thus make queries run longer. Note that the fullCount attribute will only be present in the result if the query has a LIMIT clause // and the LIMIT clause is actually used in the query. FullCount bool `json:"fullCount,omitempty"` // The maximum number of operations after which an intermediate commit is performed automatically. IntermediateCommitCount *int `json:"intermediateCommitCount,omitempty"` // The maximum total size of operations after which an intermediate commit is performed automatically. IntermediateCommitSize *int `json:"intermediateCommitSize,omitempty"` // A threshold for the maximum number of OR sub-nodes in the internal representation of an AQL FILTER condition. // Yon can use this option to limit the computation time and memory usage when converting complex AQL FILTER // conditions into the internal DNF (disjunctive normal form) format. FILTER conditions with a lot of logical // branches (AND, OR, NOT) can take a large amount of processing time and memory. This query option limits // the computation time and memory usage for such conditions. // // Once the threshold value is reached during the DNF conversion of a FILTER condition, the conversion is aborted, // and the query continues with a simplified internal representation of the condition, // which cannot be used for index lookups. // // You can set the threshold globally instead of per query with the --query.max-dnf-condition-members startup option. MaxDNFConditionMembers *int `json:"maxDNFConditionMembers,omitempty"` // The number of execution nodes in the query plan after that stack splitting is performed to avoid a potential // stack overflow. Defaults to the configured value of the startup option `--query.max-nodes-per-callstack`. // This option is only useful for testing and debugging and normally does not need any adjustment. MaxNodesPerCallstack *int `json:"maxNodesPerCallstack,omitempty"` // Limits the maximum number of plans that are created by the AQL query optimizer. MaxNumberOfPlans *int `json:"maxNumberOfPlans,omitempty"` // MaxRuntime specify the timeout which can be used to kill a query on the server after the specified // amount in time. The timeout value is specified in seconds. A value of 0 means no timeout will be enforced. MaxRuntime float64 `json:"maxRuntime,omitempty"` // The transaction size limit in bytes. MaxTransactionSize *int `json:"maxTransactionSize,omitempty"` // Limits the maximum number of warnings a query will return. The number of warnings a query will return is limited // to 10 by default, but that number can be increased or decreased by setting this attribute. MaxWarningCount *int `json:"maxWarningCount,omitempty"` // Optimizer contains options related to the query optimizer. Optimizer QuerySubOptionsOptimizer `json:"optimizer,omitempty"` // Profile If set to 1, then the additional query profiling information is returned in the profile sub-attribute // of the extra return attribute, unless the query result is served from the query cache. // If set to 2, the query includes execution stats per query plan node in stats.nodes // sub-attribute of the extra return attribute. // Additionally, the query plan is returned in the extra.plan sub-attribute. Profile uint `json:"profile,omitempty"` // This Enterprise Edition parameter allows to configure how long a DBServer will have time to bring the SatelliteCollections // involved in the query into sync. The default value is 60.0 (seconds). When the max time has been reached the query will be stopped. SatelliteSyncWait float64 `json:"satelliteSyncWait,omitempty"` // Let AQL queries (especially graph traversals) treat collection to which a user has no access rights for as if // these collections are empty. Instead of returning a forbidden access error, your queries execute normally. // This is intended to help with certain use-cases: A graph contains several collections and different users // execute AQL queries on that graph. You can naturally limit the accessible results by changing the access rights // of users on collections. // // This feature is only available in the Enterprise Edition. SkipInaccessibleCollections *bool `json:"skipInaccessibleCollections,omitempty"` // This option allows queries to store intermediate and final results temporarily on disk if the amount of memory // used (in bytes) exceeds the specified value. This is used for decreasing the memory usage during the query execution. // // This option only has an effect on queries that use the SORT operation but without a LIMIT, and if you enable //the spillover feature by setting a path for the directory to store the temporary data in with // the --temp.intermediate-results-path startup option. // // Default value: 128MB. SpillOverThresholdMemoryUsage *int `json:"spillOverThresholdMemoryUsage,omitempty"` // This option allows queries to store intermediate and final results temporarily on disk if the number of rows // produced by the query exceeds the specified value. This is used for decreasing the memory usage during the query // execution. In a query that iterates over a collection that contains documents, each row is a document, and in // a query that iterates over temporary values (i.e. FOR i IN 1..100), each row is one of such temporary values. // // This option only has an effect on queries that use the SORT operation but without a LIMIT, and if you enable // the spillover feature by setting a path for the directory to store the temporary data in with // the --temp.intermediate-results-path startup option. // // Default value: 5000000 rows. SpillOverThresholdNumRows *int `json:"spillOverThresholdNumRows,omitempty"` // Specify true and the query will be executed in a streaming fashion. The query result is not stored on // the server, but calculated on the fly. Beware: long-running queries will need to hold the collection // locks for as long as the query cursor exists. When set to false a query will be executed right away in // its entirety. Stream bool `json:"stream,omitempty"` // [unofficial] Limits the maximum number of plans that are created by the AQL query optimizer. MaxPlans int `json:"maxPlans,omitempty"` // [unofficial] ShardId query option ShardIds []string `json:"shardIds,omitempty"` // [unofficial] This query option can be used in complex queries in case the query optimizer cannot // automatically detect that the query can be limited to only a single server (e.g. in a Disjoint SmartGraph case). ForceOneShardAttributeValue *string `json:"forceOneShardAttributeValue,omitempty"` }
type QuerySubOptionsOptimizer ¶
type QuerySubOptionsOptimizer struct { // A list of to-be-included or to-be-excluded optimizer rules can be put into this attribute, // telling the optimizer to include or exclude specific rules. // To disable a rule, prefix its name with a -, to enable a rule, prefix it with a +. // There is also a pseudo-rule all, which will match all optimizer rules. Rules []string `json:"rules,omitempty"` }
QuerySubOptionsOptimizer describes optimization's settings for AQL queries.
type RemoveCollectionOptions ¶ added in v2.0.3
type RemoveCollectionOptions struct { // IsSystem when set to true allows to remove system collections. // Use on your own risk! IsSystem *bool }
type RemoveGraphOptions ¶ added in v2.1.0
type RemoveGraphOptions struct { // Drop the collections of this graph as well. Collections are only dropped if they are not used in other graphs. DropCollections bool }
type RemoveViewOptions ¶ added in v2.0.3
type RemoveViewOptions struct { // IsSystem when set to true allows to remove system views. // Use on your own risk! IsSystem *bool }
type RenameCollectionRequest ¶ added in v2.1.5
type RenameCollectionRequest struct {
Name string `json:"name"`
}
type ReplaceEdgeDefinitionResponse ¶ added in v2.1.0
type ReplaceEdgeDefinitionResponse struct { shared.ResponseStruct `json:",inline"` // GraphDefinition contains the updated graph definition GraphDefinition *GraphDefinition `json:"graph,omitempty"` Edge }
type ReplaceEdgeOptions ¶ added in v2.1.0
type ReplaceEdgeOptions struct { // An array of collection names that is used to create SatelliteCollections for a (Disjoint) SmartGraph // using SatelliteCollections (Enterprise Edition only). // Each array element must be a string and a valid collection name. The collection type cannot be modified later. Satellites []string `json:"satellites,omitempty"` // Define if the request should wait until synced to disk. WaitForSync *bool `json:"-"` // Drop the collection as well. The collection is only dropped if it is not used in other graphs. DropCollection *bool `json:"-"` }
type ReplicationFactor ¶
type ReplicationFactor int
const ( // ReplicationFactorSatellite represents a SatelliteCollection's replication factor ReplicationFactorSatellite ReplicationFactor = -1 )
func (ReplicationFactor) MarshalJSON ¶
func (r ReplicationFactor) MarshalJSON() ([]byte, error)
MarshalJSON marshals InventoryCollectionParameters to arangodb json representation
func (*ReplicationFactor) UnmarshalJSON ¶
func (r *ReplicationFactor) UnmarshalJSON(d []byte) error
UnmarshalJSON marshals InventoryCollectionParameters to arangodb json representation
type Repository ¶ added in v2.1.5
type Repository struct { // Type is the type of repository (e.g., "git"). Type *string `json:"type,omitempty"` // URL is the link to the repository. URL *string `json:"url,omitempty"` }
Repository describes the version control repository for the Foxx service.
type Requests ¶
type Requests interface { Get(ctx context.Context, output interface{}, urlParts ...string) (connection.Response, error) Post(ctx context.Context, output, input interface{}, urlParts ...string) (connection.Response, error) Put(ctx context.Context, output, input interface{}, urlParts ...string) (connection.Response, error) Delete(ctx context.Context, output interface{}, urlParts ...string) (connection.Response, error) Head(ctx context.Context, output interface{}, urlParts ...string) (connection.Response, error) Patch(ctx context.Context, output, input interface{}, urlParts ...string) (connection.Response, error) }
func NewRequests ¶
func NewRequests(connection connection.Connection, urlParts ...string) Requests
type ResponsibleShardRequest ¶ added in v2.1.5
type ResponsibleShardRequest struct { // Fill with shard key fields expected Key string `json:"_key,omitempty"` }
type RunningAQLQuery ¶ added in v2.1.5
type RunningAQLQuery struct { // The unique identifier of the query. Id *string `json:"id,omitempty"` // The database in which the query is running. Database *string `json:"database,omitempty"` // The user who executed the query. // This is the user who executed the query, not the user who is currently running the User *string `json:"user,omitempty"` // The query string. // This is the AQL query string that was executed. Query *string `json:"query,omitempty"` // The bind variables used in the query. BindVars *map[string]interface{} `json:"bindVars,omitempty"` // The time when the query started executing. // This is the time when the query started executing on the server. Started *string `json:"started,omitempty"` // The time when the query was last updated. // This is the time when the query was last updated on the server. RunTime *float64 `json:"runTime,omitempty"` // The PeakMemoryUsage is the peak memory usage of the query in bytes. PeakMemoryUsage *uint64 `json:"peakMemoryUsage,omitempty"` // The State of the query. // This is the current state of the query, e.g. "running", "finished", "executing", etc. State *string `json:"state,omitempty"` // The stream option indicates whether the query is executed in streaming mode. Stream *bool `json:"stream,omitempty"` }
type ServerHealth ¶
type ServerHealth struct { Endpoint string `json:"Endpoint"` LastHeartbeatAcked time.Time `json:"LastHeartbeatAcked"` LastHeartbeatSent time.Time `json:"LastHeartbeatSent"` LastHeartbeatStatus string `json:"LastHeartbeatStatus"` Role ServerRole `json:"Role"` ShortName string `json:"ShortName"` Status ServerStatus `json:"Status"` CanBeDeleted bool `json:"CanBeDeleted"` HostID string `json:"Host,omitempty"` Version Version `json:"Version,omitempty"` Engine EngineType `json:"Engine,omitempty"` SyncStatus ServerSyncStatus `json:"SyncStatus,omitempty"` // Only for Coordinators AdvertisedEndpoint *string `json:"AdvertisedEndpoint,omitempty"` // Only for Agents Leader *string `json:"Leader,omitempty"` Leading *bool `json:"Leading,omitempty"` }
ServerHealth contains health information of a single server in a cluster.
type ServerMode ¶ added in v2.0.3
type ServerMode string
const ( // ServerModeDefault is the normal mode of the database in which read and write requests // are allowed. ServerModeDefault ServerMode = "default" // ServerModeReadOnly is the mode in which all modifications to th database are blocked. // Behavior is the same as user that has read-only access to all databases & collections. ServerModeReadOnly ServerMode = "readonly" )
type ServerRole ¶
type ServerRole string
ServerRole is the role of an arangod server
const ( // ServerRoleSingle indicates that the server is a single-server instance ServerRoleSingle ServerRole = "Single" // ServerRoleSingleActive indicates that the server is a the leader of a single-server resilient pair ServerRoleSingleActive ServerRole = "SingleActive" // ServerRoleSinglePassive indicates that the server is a a follower of a single-server resilient pair ServerRoleSinglePassive ServerRole = "SinglePassive" // ServerRoleDBServer indicates that the server is a dbserver within a cluster ServerRoleDBServer ServerRole = "DBServer" // ServerRoleCoordinator indicates that the server is a coordinator within a cluster ServerRoleCoordinator ServerRole = "Coordinator" // ServerRoleAgent indicates that the server is an agent within a cluster ServerRoleAgent ServerRole = "Agent" // ServerRoleUndefined indicates that the role of the server cannot be determined ServerRoleUndefined ServerRole = "Undefined" )
func ConvertServerRole ¶
func ConvertServerRole(arangoDBRole string) ServerRole
ConvertServerRole returns go-driver server role based on ArangoDB role.
type ServerStatus ¶
type ServerStatus string
ServerStatus describes the health status of a server
const ( // ServerStatusGood indicates server is in good state ServerStatusGood ServerStatus = "GOOD" // ServerStatusBad indicates server has missed 1 heartbeat ServerStatusBad ServerStatus = "BAD" // ServerStatusFailed indicates server has been declared failed by the supervision, this happens after about 15s being bad. ServerStatusFailed ServerStatus = "FAILED" )
type ServerSyncStatus ¶
type ServerSyncStatus string
ServerSyncStatus describes the servers sync status
const ( ServerSyncStatusUnknown ServerSyncStatus = "UNKNOWN" ServerSyncStatusUndefined ServerSyncStatus = "UNDEFINED" ServerSyncStatusStartup ServerSyncStatus = "STARTUP" ServerSyncStatusStopping ServerSyncStatus = "STOPPING" ServerSyncStatusStopped ServerSyncStatus = "STOPPED" ServerSyncStatusServing ServerSyncStatus = "SERVING" ServerSyncStatusShutdown ServerSyncStatus = "SHUTDOWN" )
type SetCollectionPropertiesOptionsV2 ¶ added in v2.1.4
type SetCollectionPropertiesOptionsV2 struct { // If true then creating or changing a document will wait until the data has been synchronized to disk. WaitForSync *bool `json:"waitForSync,omitempty"` // The maximal size of a journal or datafile in bytes. The value must be at least 1048576 (1 MB). Note that when changing the journalSize value, it will only have an effect for additional journals or datafiles that are created. Already existing journals or datafiles will not be affected. JournalSize *int64 `json:"journalSize,omitempty"` // ReplicationFactor contains how many copies of each shard are kept on different DBServers. // Only available in cluster setup. ReplicationFactor *ReplicationFactor `json:"replicationFactor,omitempty"` // WriteConcern contains how many copies must be available before a collection can be written. // Available from 3.6 arangod version. WriteConcern *int `json:"writeConcern,omitempty"` // CacheEnabled set cacheEnabled option in collection properties CacheEnabled *bool `json:"cacheEnabled,omitempty"` // Schema for collection validation Schema *CollectionSchemaOptions `json:"schema,omitempty"` // ComputedValues let configure collections to generate document attributes when documents are created or modified, using an AQL expression ComputedValues *[]ComputedValue `json:"computedValues,omitempty"` }
SetCollectionPropertiesOptions contains data for Collection.SetProperties.
type ShardingStrategy ¶
type ShardingStrategy string
ShardingStrategy describes the sharding strategy of a collection
const ( ShardingStrategyCommunityCompat ShardingStrategy = "community-compat" ShardingStrategyEnterpriseCompat ShardingStrategy = "enterprise-compat" ShardingStrategyEnterpriseSmartEdgeCompat ShardingStrategy = "enterprise-smart-edge-compat" ShardingStrategyHash ShardingStrategy = "hash" ShardingStrategyEnterpriseHashSmartEdge ShardingStrategy = "enterprise-hash-smart-edge" )
type SortDirection ¶
type SortDirection string
SortDirection describes the sorting direction
const ( // SortDirectionAsc sort ascending SortDirectionAsc SortDirection = "asc" // SortDirectionDesc sort descending SortDirectionDesc SortDirection = "desc" )
type StoredValue ¶
type StoredValue struct { // Fields A list of attribute paths. The . character denotes sub-attributes. Fields []string `json:"fields,omitempty"` // Compression Defines how to compress the attribute values. Compression PrimarySortCompression `json:"compression,omitempty"` // Cache attribute allows you to always cache stored values in memory // Introduced in v3.9.5, Enterprise Edition only Cache *bool `json:"cache,omitempty"` }
StoredValue defines the value stored in the index
type SwaggerInfo ¶ added in v2.1.5
type SwaggerInfo struct { // Title is the title of the API. Title *string `json:"title,omitempty"` // Description provides a short description of the API. Description *string `json:"description,omitempty"` // Version specifies the version of the API. Version *string `json:"version,omitempty"` // License provides licensing information for the API. License map[string]interface{} `json:"license,omitempty"` }
SwaggerInfo contains general metadata about the API, typically displayed in tools like Swagger UI.
type SwaggerResponse ¶ added in v2.1.5
type SwaggerResponse struct { // Swagger specifies the Swagger specification version (e.g., "2.0"). Swagger *string `json:"swagger,omitempty"` // BasePath defines the base path on which the API is served, relative to the host. BasePath *string `json:"basePath,omitempty"` // Paths holds the available endpoints and their supported operations. Paths map[string]interface{} `json:"paths,omitempty"` // Info provides metadata about the API, such as title, version, and license. Info *SwaggerInfo `json:"info,omitempty"` }
SwaggerResponse represents the root object of a Swagger (OpenAPI 2.0) specification. It contains metadata, versioning information, available API paths, and additional details.
type Task ¶ added in v2.1.5
type Task interface { // ID returns the ID of the task. ID() *string // Name returns the name of the task. Name() *string // Command returns the JavaScript code of the task. Command() *string // Params returns the parameters of the task. Params(result interface{}) error // Period returns the period (in seconds) of the task. Period() *int64 // Offset returns the offset (in milliseconds) of the task. Offset() *float64 }
Task provides access to a single task on the server.
type TaskOptions ¶ added in v2.1.5
type TaskOptions struct { // ID is an optional identifier for the task. ID *string `json:"id,omitempty"` // Name is an optional name for the task. Name *string `json:"name,omitempty"` // Command is the JavaScript code to be executed. Command *string `json:"command"` // Params are optional parameters passed to the command. Params interface{} `json:"params,omitempty"` // Period is the interval (in seconds) at which the task runs periodically. // If zero, the task runs once after the offset. Period *int64 `json:"period,omitempty"` // Offset is the delay (in milliseconds) before the task is first executed. Offset *float64 `json:"offset,omitempty"` }
TaskOptions contains options for creating a new task.
type Transaction ¶
type Transaction interface { ID() TransactionID Status(ctx context.Context) (TransactionStatusRecord, error) Commit(ctx context.Context, opts *CommitTransactionOptions) error Abort(ctx context.Context, opts *AbortTransactionOptions) error DatabaseCollection DatabaseQuery }
type TransactionCollections ¶
type TransactionCollections struct { // Collections that the transaction reads from. Read []string `json:"read,omitempty"` // Collections that the transaction writes to. Write []string `json:"write,omitempty"` // Collections that the transaction writes exclusively to. Exclusive []string `json:"exclusive,omitempty"` }
TransactionCollections is used to specify which collections are accessed by a transaction and how
type TransactionJSOptions ¶ added in v2.0.3
type TransactionJSOptions struct { // The actual transaction operations to be executed, in the form of stringified JavaScript code Action string `json:"action"` // An optional boolean flag that, if set, will force the transaction to write // all data to disk before returning. WaitForSync *bool `json:"waitForSync,omitempty"` // Allow reading from undeclared collections. AllowImplicit *bool `json:"allowImplicit,omitempty"` // An optional numeric value that can be used to set a timeout for waiting on collection locks. // If not specified, a default value will be used. // Setting lockTimeout to 0 will make ArangoDB not time out waiting for a lock. LockTimeout *int `json:"lockTimeout,omitempty"` // Optional arguments passed to action. Params []string `json:"params,omitempty"` // Transaction size limit in bytes. Honored by the RocksDB storage engine only. MaxTransactionSize *int `json:"maxTransactionSize,omitempty"` Collections TransactionCollections `json:"collections"` }
TransactionJSOptions contains options that customize the JavaScript transaction
type TransactionStatus ¶
type TransactionStatus string
TransactionStatus describes the status of an transaction
const ( TransactionRunning TransactionStatus = "running" TransactionCommitted TransactionStatus = "committed" TransactionAborted TransactionStatus = "aborted" )
type TransactionStatusRecord ¶
type TransactionStatusRecord struct {
Status TransactionStatus
}
TransactionStatusRecord provides insight about the status of transaction
type TransactionStatuses ¶
type TransactionStatuses []TransactionStatus
TransactionStatuses list of transaction statuses
func (TransactionStatuses) Contains ¶
func (t TransactionStatuses) Contains(status TransactionStatus) bool
type TransactionWrap ¶
type TransactionWrap func(ctx context.Context, t Transaction) error
type TransferMonitor ¶ added in v2.1.0
type TransferType ¶ added in v2.1.0
type TransferType string
const ( TransferTypeUpload TransferType = "upload" TransferTypeDownload TransferType = "download" )
type UninstallFoxxServiceRequest ¶ added in v2.1.4
type UninstallFoxxServiceRequest struct {
FoxxDeleteOptions `json:",inline"`
}
type Unmarshal ¶ added in v2.1.3
type Unmarshal[C, T any] struct { Current *C Object T }
func (*Unmarshal[C, T]) UnmarshalJSON ¶ added in v2.1.3
type UnmarshalData ¶ added in v2.1.3
type UnmarshalData []byte
func (*UnmarshalData) Extract ¶ added in v2.1.3
func (u *UnmarshalData) Extract(key string) Unmarshaler
func (*UnmarshalData) Inject ¶ added in v2.1.3
func (u *UnmarshalData) Inject(object any) error
func (*UnmarshalData) UnmarshalJSON ¶ added in v2.1.3
func (u *UnmarshalData) UnmarshalJSON(bytes []byte) error
type UnmarshalInto ¶
type UnmarshalInto struct {
// contains filtered or unexported fields
}
func (*UnmarshalInto) UnmarshalJSON ¶
func (u *UnmarshalInto) UnmarshalJSON(d []byte) error
type Unmarshaler ¶ added in v2.1.3
type Unmarshaler interface { json.Unmarshaler Inject(object any) error Extract(key string) Unmarshaler }
type User ¶ added in v2.1.0
type User interface { // Name returns the name of the user. Name() string // IsActive returns whether the user is active. IsActive() bool // Extra information about this user that was passed during its creation/update/replacement Extra(result interface{}) error UserPermissions }
User provides access to a single user of a single server / cluster of servers.
type UserDefinedFunctionObject ¶ added in v2.1.5
type UserDefinedFunctionObject struct { // Code is the JavaScript function body as a string. Code *string `json:"code"` // Name is the fully qualified name of the user-defined function, including namespace. Name *string `json:"name"` // IsDeterministic indicates whether the function always produces the same output for identical input. IsDeterministic *bool `json:"isDeterministic"` }
type UserOptions ¶ added in v2.1.0
type UserOptions struct { // The user password as a string. If not specified, it will default to an empty string. Password string `json:"passwd,omitempty"` // An optional flag that specifies whether the user is active. If not specified, this will default to true. Active *bool `json:"active,omitempty"` // A JSON object with extra user information. // The data contained in extra will be stored for the user but not be interpreted further by ArangoDB. Extra interface{} `json:"extra,omitempty"` }
UserOptions contains options for creating a new user, updating or replacing a user.
type UserPermissions ¶ added in v2.1.0
type UserPermissions interface { // AccessibleDatabases returns a list of all databases that can be accessed (read/write or read-only) by this user. AccessibleDatabases(ctx context.Context) (map[string]Grant, error) // AccessibleDatabasesFull return the full set of access levels for all databases and all collections. AccessibleDatabasesFull(ctx context.Context) (map[string]DatabasePermissions, error) // GetDatabaseAccess fetch the database access level for a specific database GetDatabaseAccess(ctx context.Context, db string) (Grant, error) // GetCollectionAccess returns the collection access level for a specific collection GetCollectionAccess(ctx context.Context, db, col string) (Grant, error) // SetDatabaseAccess sets the access this user has to the given database. // Pass a `nil` database to set the default access this user has to any new database. // You need the Administrate server access level SetDatabaseAccess(ctx context.Context, db string, access Grant) error // SetCollectionAccess sets the access this user has to a collection. // You need the Administrate server access level SetCollectionAccess(ctx context.Context, db, col string, access Grant) error // RemoveDatabaseAccess removes the access this user has to the given database. // As a consequence, the default database access level is used. // If there is no defined default database access level, it defaults to No access. // You need a write permissions (Administrate access level) for the '_system' database RemoveDatabaseAccess(ctx context.Context, db string) error // RemoveCollectionAccess removes the access this user has to a collection. // As a consequence, the default collection access level is used. // If there is no defined default collection access level, it defaults to No access. RemoveCollectionAccess(ctx context.Context, db, col string) error }
type Version ¶
type Version string
Version holds a server version string. The string has the format "major.minor.sub". Major and minor will be numeric, and sub may contain a number or a textual version.
func (Version) CompareTo ¶
CompareTo returns an integer comparing two versions. The result will be 0 if v==other, -1 if v < other, and +1 if v > other. If major & minor parts are equal and sub part is not a number, the sub part will be compared using lexicographical string comparison.
type VersionInfo ¶
type VersionInfo struct { // This will always contain "arango" Server string `json:"server,omitempty"` // The server version string. The string has the format "major.minor.sub". // Major and minor will be numeric, and sub may contain a number or a textual version. Version Version `json:"version,omitempty"` // Type of license of the server License string `json:"license,omitempty"` // Optional additional details. This is returned only if details were requested Details map[string]interface{} `json:"details,omitempty"` }
VersionInfo describes the version of a database server.
func (VersionInfo) IsEnterprise ¶
func (v VersionInfo) IsEnterprise() bool
func (VersionInfo) String ¶
func (v VersionInfo) String() string
String creates a string representation of the given VersionInfo.
type VertexCollection ¶ added in v2.1.0
type VertexCollection interface { GraphCollection // Name returns the name of the vertex collection Name() string // GetVertex Gets a vertex from the given collection. // To get _key and _rev values, embed the DocumentMeta struct in your result struct. GetVertex(ctx context.Context, key string, result interface{}, opts *GetVertexOptions) error // CreateVertex Adds a vertex to the given collection. // To get _key and _rev values, embed the DocumentMeta struct in your result struct and pass to VertexCreateResponse.New. CreateVertex(ctx context.Context, vertex interface{}, opts *CreateVertexOptions) (VertexCreateResponse, error) // UpdateVertex Updates the data of the specific vertex in the collection. UpdateVertex(ctx context.Context, key string, newValue interface{}, opts *VertexUpdateOptions) (VertexUpdateResponse, error) // ReplaceVertex Replaces the data of a vertex in the collection. ReplaceVertex(ctx context.Context, key string, newValue interface{}, opts *VertexReplaceOptions) (VertexReplaceResponse, error) // DeleteVertex Removes a vertex from the collection. DeleteVertex(ctx context.Context, key string, opts *DeleteVertexOptions) (VertexDeleteResponse, error) }
type VertexCreateResponse ¶ added in v2.1.0
type VertexCreateResponse struct { DocumentMeta shared.ResponseStruct `json:",inline"` New interface{} }
type VertexDeleteResponse ¶ added in v2.1.0
type VertexDeleteResponse struct { shared.ResponseStruct `json:",inline"` Old interface{} }
type VertexReplaceOptions ¶ added in v2.1.0
type VertexReplaceOptions struct { // Define if the request should wait until synced to disk. WaitForSync *bool // Define if a presentation of the new document should be returned within the response object. NewObject interface{} // Define if a presentation of the deleted document should be returned within the response object. OldObject interface{} // Define if values set to null should be stored. By default (true), the given documents attribute(s) // are set to null. If this parameter is set to false, top-level attribute and sub-attributes with a null value // in the request are removed from the document (but not attributes of objects that are nested inside of arrays). KeepNull *bool // Conditionally replace a vertex based on a target revision id // If the “If-Match” header is given, then it must contain exactly one ETag (_rev). IfMatch string // To make this operation a part of a Stream Transaction, set this header to the transaction ID returned by the // DatabaseTransaction.BeginTransaction() method. TransactionID string }
type VertexReplaceResponse ¶ added in v2.1.0
type VertexReplaceResponse struct { DocumentMeta shared.ResponseStruct `json:",inline"` Old, New interface{} }
type VertexUpdateOptions ¶ added in v2.1.0
type VertexUpdateOptions struct { // Define if the request should wait until synced to disk. WaitForSync *bool // Define if a presentation of the new document should be returned within the response object. NewObject interface{} // Define if a presentation of the deleted document should be returned within the response object. OldObject interface{} // Define if values set to null should be stored. By default (true), the given documents attribute(s) // are set to null. If this parameter is set to false, top-level attribute and sub-attributes with a null value // in the request are removed from the document (but not attributes of objects that are nested inside of arrays). KeepNull *bool // Conditionally update a vertex based on a target revision id // If the “If-Match” header is given, then it must contain exactly one ETag (_rev). IfMatch string // To make this operation a part of a Stream Transaction, set this header to the transaction ID returned by the // DatabaseTransaction.BeginTransaction() method. TransactionID string }
type VertexUpdateResponse ¶ added in v2.1.0
type VertexUpdateResponse struct { DocumentMeta shared.ResponseStruct `json:",inline"` Old, New interface{} }
type View ¶ added in v2.0.3
type View interface { // Name returns the name of the view. Name() string // Type returns the type of this view. Type() ViewType // ArangoSearchView returns this view as an ArangoSearch view. // When the type of the view is not ArangoSearch, an error is returned. ArangoSearchView() (ArangoSearchView, error) // ArangoSearchViewAlias returns this view as an ArangoSearch view alias. // When the type of the view is not ArangoSearch alias, an error is returned. ArangoSearchViewAlias() (ArangoSearchViewAlias, error) // Database returns the database containing the view. Database() Database // Rename renames the view (SINGLE server only). Rename(ctx context.Context, newName string) error // Remove removes the entire view. // If the view does not exist, a NotFoundError is returned. Remove(ctx context.Context) error // RemoveWithOptions removes the entire view. // If the view does not exist, a NotFoundError is returned. RemoveWithOptions(ctx context.Context, opts *RemoveViewOptions) error }
type ViewsResponseReader ¶ added in v2.0.3
Source Files
¶
- analyzer.go
- analyzer_impl.go
- arango_search.go
- asyncjob.go
- asyncjob_impl.go
- client.go
- client_admin.go
- client_admin_backup.go
- client_admin_backup_impl.go
- client_admin_cluster.go
- client_admin_cluster_impl.go
- client_admin_impl.go
- client_admin_license_impl.go
- client_admin_log_impl.go
- client_database.go
- client_database_impl.go
- client_foxx.go
- client_foxx_impl.go
- client_impl.go
- client_server_info.go
- client_server_info_impl.go
- collection.go
- collection_documents.go
- collection_documents_create.go
- collection_documents_create_impl.go
- collection_documents_delete.go
- collection_documents_delete_impl.go
- collection_documents_impl.go
- collection_documents_import.go
- collection_documents_import_impl.go
- collection_documents_read.go
- collection_documents_read_impl.go
- collection_documents_replace.go
- collection_documents_replace_impl.go
- collection_documents_update.go
- collection_documents_update_impl.go
- collection_impl.go
- collection_indexe_inverted.go
- collection_indexes.go
- collection_indexes_impl.go
- collection_opts.go
- collection_opts_schema.go
- cursor.go
- cursor_impl.go
- database.go
- database_analyzer.go
- database_analyzer_impl.go
- database_collection.go
- database_collection_impl.go
- database_collection_opts.go
- database_graph.go
- database_graph_impl.go
- database_impl.go
- database_opts.go
- database_query.go
- database_query_impl.go
- database_transaction.go
- database_transaction_impl.go
- database_transaction_opts.go
- database_view.go
- database_view_impl.go
- graph.go
- graph_collection.go
- graph_edge_definitions.go
- graph_edge_definitions_edges.go
- graph_edge_definitions_edges_impl.go
- graph_edge_definitions_impl.go
- graph_impl.go
- graph_vertex_collections.go
- graph_vertex_collections_impl.go
- graph_vertex_collections_vertices.go
- graph_vertex_collections_vertices_impl.go
- meta.go
- requests.go
- server_mode.go
- shared.go
- tasks.go
- tasks_impl.go
- transaction.go
- transaction_impl.go
- unmarshaller.go
- users.go
- users_impl.go
- users_permissions.go
- users_permissions_impl.go
- utils.go
- version.go
- view.go
- view_arango_search.go
- view_arango_search_impl.go
- view_impl.go
- view_search_alias.go
- view_search_alias_impl.go