provisioning

package
v0.95.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Dec 17, 2025 License: Apache-2.0 Imports: 9 Imported by: 10

Documentation

Overview

These APIs allow you to manage Credentials, Encryption Keys, Networks, Private Access, Storage, Vpc Endpoints, Workspaces, etc.

Index

Examples

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type AwsCredentials

type AwsCredentials struct {
	StsRole *StsRole `json:"sts_role,omitempty"`
}

type AwsKeyInfo

type AwsKeyInfo struct {
	// The AWS KMS key alias.
	KeyAlias string `json:"key_alias,omitempty"`
	// The AWS KMS key's Amazon Resource Name (ARN).
	KeyArn string `json:"key_arn"`
	// The AWS KMS key region.
	KeyRegion string `json:"key_region"`
	// This field applies only if the `use_cases` property includes `STORAGE`.
	// If this is set to true or omitted, the key is also used to encrypt
	// cluster EBS volumes. If you do not want to use this key for encrypting
	// EBS volumes, set to false.
	ReuseKeyForClusterVolumes bool `json:"reuse_key_for_cluster_volumes,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (AwsKeyInfo) MarshalJSON added in v0.23.0

func (s AwsKeyInfo) MarshalJSON() ([]byte, error)

func (*AwsKeyInfo) UnmarshalJSON added in v0.23.0

func (s *AwsKeyInfo) UnmarshalJSON(b []byte) error

type AzureKeyInfo added in v0.86.0

type AzureKeyInfo struct {
	// The Disk Encryption Set id that is used to represent the key info used
	// for Managed Disk BYOK use case
	DiskEncryptionSetId string `json:"disk_encryption_set_id,omitempty"`
	// The structure to store key access credential This is set if the Managed
	// Identity is being used to access the Azure Key Vault key.
	KeyAccessConfiguration *KeyAccessConfiguration `json:"key_access_configuration,omitempty"`
	// The name of the key in KeyVault.
	KeyName string `json:"key_name,omitempty"`
	// The base URI of the KeyVault.
	KeyVaultUri string `json:"key_vault_uri,omitempty"`
	// The tenant id where the KeyVault lives.
	TenantId string `json:"tenant_id,omitempty"`
	// The current key version.
	Version string `json:"version,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (AzureKeyInfo) MarshalJSON added in v0.86.0

func (s AzureKeyInfo) MarshalJSON() ([]byte, error)

func (*AzureKeyInfo) UnmarshalJSON added in v0.86.0

func (s *AzureKeyInfo) UnmarshalJSON(b []byte) error

type AzureWorkspaceInfo added in v0.28.0

type AzureWorkspaceInfo struct {
	// Azure Resource Group name
	ResourceGroup string `json:"resource_group,omitempty"`
	// Azure Subscription ID
	SubscriptionId string `json:"subscription_id,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (AzureWorkspaceInfo) MarshalJSON added in v0.28.0

func (s AzureWorkspaceInfo) MarshalJSON() ([]byte, error)

func (*AzureWorkspaceInfo) UnmarshalJSON added in v0.28.0

func (s *AzureWorkspaceInfo) UnmarshalJSON(b []byte) error

type CloudResourceContainer

type CloudResourceContainer struct {
	Gcp *CustomerFacingGcpCloudResourceContainer `json:"gcp,omitempty"`
}

type CreateAwsKeyInfo

type CreateAwsKeyInfo struct {
	// The AWS KMS key alias.
	KeyAlias string `json:"key_alias,omitempty"`
	// The AWS KMS key's Amazon Resource Name (ARN).
	KeyArn string `json:"key_arn"`
	// The AWS KMS key region.
	KeyRegion string `json:"key_region,omitempty"`
	// This field applies only if the `use_cases` property includes `STORAGE`.
	// If this is set to true or omitted, the key is also used to encrypt
	// cluster EBS volumes. If you do not want to use this key for encrypting
	// EBS volumes, set to false.
	ReuseKeyForClusterVolumes bool `json:"reuse_key_for_cluster_volumes,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (CreateAwsKeyInfo) MarshalJSON added in v0.23.0

func (s CreateAwsKeyInfo) MarshalJSON() ([]byte, error)

func (*CreateAwsKeyInfo) UnmarshalJSON added in v0.23.0

func (s *CreateAwsKeyInfo) UnmarshalJSON(b []byte) error

type CreateCredentialAwsCredentials

type CreateCredentialAwsCredentials struct {
	StsRole *CreateCredentialStsRole `json:"sts_role,omitempty"`
}

type CreateCredentialRequest

type CreateCredentialRequest struct {
	AwsCredentials CreateCredentialAwsCredentials `json:"aws_credentials"`
	// The human-readable name of the credential configuration object.
	CredentialsName string `json:"credentials_name"`
}

type CreateCredentialStsRole

type CreateCredentialStsRole struct {
	// The Amazon Resource Name (ARN) of the cross account IAM role.
	RoleArn string `json:"role_arn,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (CreateCredentialStsRole) MarshalJSON added in v0.23.0

func (s CreateCredentialStsRole) MarshalJSON() ([]byte, error)

func (*CreateCredentialStsRole) UnmarshalJSON added in v0.23.0

func (s *CreateCredentialStsRole) UnmarshalJSON(b []byte) error

type CreateCustomerManagedKeyRequest

type CreateCustomerManagedKeyRequest struct {
	AwsKeyInfo *CreateAwsKeyInfo `json:"aws_key_info,omitempty"`

	GcpKeyInfo *CreateGcpKeyInfo `json:"gcp_key_info,omitempty"`
	// The cases that the key can be used for.
	UseCases []KeyUseCase `json:"use_cases"`
}

type CreateGcpKeyInfo added in v0.9.0

type CreateGcpKeyInfo struct {
	// Globally unique service account email that has access to the KMS key. The
	// service account exists within the Databricks CP project.
	GcpServiceAccount *GcpServiceAccount `json:"gcp_service_account,omitempty"`
	// Globally unique kms key resource id of the form
	// projects/testProjectId/locations/us-east4/keyRings/gcpCmkKeyRing/cryptoKeys/cmk-eastus4
	KmsKeyId string `json:"kms_key_id"`
}

type CreateNetworkRequest

type CreateNetworkRequest struct {
	GcpNetworkInfo *GcpNetworkInfo `json:"gcp_network_info,omitempty"`
	// The human-readable name of the network configuration.
	NetworkName string `json:"network_name,omitempty"`
	// IDs of one to five security groups associated with this network. Security
	// group IDs **cannot** be used in multiple network configurations.
	SecurityGroupIds []string `json:"security_group_ids,omitempty"`
	// IDs of at least two subnets associated with this network. Subnet IDs
	// **cannot** be used in multiple network configurations.
	SubnetIds []string `json:"subnet_ids,omitempty"`

	VpcEndpoints *NetworkVpcEndpoints `json:"vpc_endpoints,omitempty"`
	// The ID of the VPC associated with this network configuration. VPC IDs can
	// be used in multiple networks.
	VpcId string `json:"vpc_id,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (CreateNetworkRequest) MarshalJSON added in v0.23.0

func (s CreateNetworkRequest) MarshalJSON() ([]byte, error)

func (*CreateNetworkRequest) UnmarshalJSON added in v0.23.0

func (s *CreateNetworkRequest) UnmarshalJSON(b []byte) error

type CreatePrivateAccessSettingsRequest added in v0.79.0

type CreatePrivateAccessSettingsRequest struct {
	// An array of Databricks VPC endpoint IDs. This is the Databricks ID
	// returned when registering the VPC endpoint configuration in your
	// Databricks account. This is not the ID of the VPC endpoint in AWS. Only
	// used when private_access_level is set to ENDPOINT. This is an allow list
	// of VPC endpoints registered in your Databricks account that can connect
	// to your workspace over AWS PrivateLink. Note: If hybrid access to your
	// workspace is enabled by setting public_access_enabled to true, this
	// control only works for PrivateLink connections. To control how your
	// workspace is accessed via public internet, see IP access lists.
	AllowedVpcEndpointIds []string `json:"allowed_vpc_endpoint_ids,omitempty"`
	// The private access level controls which VPC endpoints can connect to the
	// UI or API of any workspace that attaches this private access settings
	// object. `ACCOUNT` level access (the default) allows only VPC endpoints
	// that are registered in your Databricks account connect to your workspace.
	// `ENDPOINT` level access allows only specified VPC endpoints connect to
	// your workspace. For details, see allowed_vpc_endpoint_ids.
	PrivateAccessLevel PrivateAccessLevel `json:"private_access_level,omitempty"`
	// The human-readable name of the private access settings object.
	PrivateAccessSettingsName string `json:"private_access_settings_name,omitempty"`
	// Determines if the workspace can be accessed over public internet. For
	// fully private workspaces, you can optionally specify false, but only if
	// you implement both the front-end and the back-end PrivateLink
	// connections. Otherwise, specify true, which means that public access is
	// enabled.
	PublicAccessEnabled bool `json:"public_access_enabled,omitempty"`
	// The AWS region for workspaces attached to this private access settings
	// object.
	Region string `json:"region,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (CreatePrivateAccessSettingsRequest) MarshalJSON added in v0.79.0

func (s CreatePrivateAccessSettingsRequest) MarshalJSON() ([]byte, error)

func (*CreatePrivateAccessSettingsRequest) UnmarshalJSON added in v0.79.0

func (s *CreatePrivateAccessSettingsRequest) UnmarshalJSON(b []byte) error

type CreateStorageConfigurationRequest

type CreateStorageConfigurationRequest struct {
	// Optional IAM role that is used to access the workspace catalog which is
	// created during workspace creation for UC by Default. If a storage
	// configuration with this field populated is used to create a workspace,
	// then a workspace catalog is created together with the workspace. The
	// workspace catalog shares the root bucket with internal workspace storage
	// (including DBFS root) but uses a dedicated bucket path prefix.
	RoleArn string `json:"role_arn,omitempty"`
	// Root S3 bucket information.
	RootBucketInfo RootBucketInfo `json:"root_bucket_info"`
	// The human-readable name of the storage configuration.
	StorageConfigurationName string `json:"storage_configuration_name"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (CreateStorageConfigurationRequest) MarshalJSON added in v0.73.0

func (s CreateStorageConfigurationRequest) MarshalJSON() ([]byte, error)

func (*CreateStorageConfigurationRequest) UnmarshalJSON added in v0.73.0

func (s *CreateStorageConfigurationRequest) UnmarshalJSON(b []byte) error

type CreateVpcEndpointRequest

type CreateVpcEndpointRequest struct {
	// The ID of the VPC endpoint object in AWS.
	AwsVpcEndpointId string `json:"aws_vpc_endpoint_id,omitempty"`
	// The cloud info of this vpc endpoint.
	GcpVpcEndpointInfo *GcpVpcEndpointInfo `json:"gcp_vpc_endpoint_info,omitempty"`
	// The region in which this VPC endpoint object exists.
	Region string `json:"region,omitempty"`
	// The human-readable name of the storage configuration.
	VpcEndpointName string `json:"vpc_endpoint_name,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (CreateVpcEndpointRequest) MarshalJSON added in v0.23.0

func (s CreateVpcEndpointRequest) MarshalJSON() ([]byte, error)

func (*CreateVpcEndpointRequest) UnmarshalJSON added in v0.23.0

func (s *CreateVpcEndpointRequest) UnmarshalJSON(b []byte) error

type CreateWorkspaceRequest

type CreateWorkspaceRequest struct {
	AwsRegion string `json:"aws_region,omitempty"`
	// The cloud name. This field always has the value `gcp`.
	Cloud string `json:"cloud,omitempty"`

	CloudResourceContainer *CloudResourceContainer `json:"cloud_resource_container,omitempty"`
	// If the compute mode is `SERVERLESS`, a serverless workspace is created
	// that comes pre-configured with serverless compute and default storage,
	// providing a fully-managed, enterprise-ready SaaS experience. This means
	// you don't need to provide any resources managed by you, such as
	// credentials, storage, or network. If the compute mode is `HYBRID` (which
	// is the default option), a classic workspace is created that uses
	// customer-managed resources.
	ComputeMode CustomerFacingComputeMode `json:"compute_mode,omitempty"`
	// ID of the workspace's credential configuration object.
	CredentialsId string `json:"credentials_id,omitempty"`
	// The custom tags key-value pairing that is attached to this workspace. The
	// key-value pair is a string of utf-8 characters. The value can be an empty
	// string, with maximum length of 255 characters. The key can be of maximum
	// length of 127 characters, and cannot be empty.
	CustomTags map[string]string `json:"custom_tags,omitempty"`
	// The deployment name defines part of the subdomain for the workspace. The
	// workspace URL for the web application and REST APIs is
	// <workspace-deployment-name>.cloud.databricks.com. For example, if the
	// deployment name is abcsales, your workspace URL will be
	// https://abcsales.cloud.databricks.com. Hyphens are allowed. This property
	// supports only the set of characters that are allowed in a subdomain. To
	// set this value, you must have a deployment name prefix. Contact your
	// Databricks account team to add an account deployment name prefix to your
	// account. Workspace deployment names follow the account prefix and a
	// hyphen. For example, if your account's deployment prefix is acme and the
	// workspace deployment name is workspace-1, the JSON response for the
	// deployment_name field becomes acme-workspace-1. The workspace URL would
	// be acme-workspace-1.cloud.databricks.com. You can also set the
	// deployment_name to the reserved keyword EMPTY if you want the deployment
	// name to only include the deployment prefix. For example, if your
	// account's deployment prefix is acme and the workspace deployment name is
	// EMPTY, the deployment_name becomes acme only and the workspace URL is
	// acme.cloud.databricks.com. This value must be unique across all
	// non-deleted deployments across all AWS regions. If a new workspace omits
	// this property, the server generates a unique deployment name for you with
	// the pattern dbc-xxxxxxxx-xxxx.
	DeploymentName string `json:"deployment_name,omitempty"`

	GcpManagedNetworkConfig *GcpManagedNetworkConfig `json:"gcp_managed_network_config,omitempty"`

	GkeConfig *GkeConfig `json:"gke_config,omitempty"`
	// The Google Cloud region of the workspace data plane in your Google
	// account (for example, `us-east4`).
	Location string `json:"location,omitempty"`
	// The ID of the workspace's managed services encryption key configuration
	// object. This is used to help protect and control access to the
	// workspace's notebooks, secrets, Databricks SQL queries, and query
	// history. The provided key configuration object property use_cases must
	// contain MANAGED_SERVICES.
	ManagedServicesCustomerManagedKeyId string `json:"managed_services_customer_managed_key_id,omitempty"`
	// The object ID of network connectivity config. Once assigned, the
	// workspace serverless compute resources use the same set of stable IP CIDR
	// blocks and optional private link to access your resources.
	NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"`
	// The ID of the workspace's network configuration object. To use AWS
	// PrivateLink, this field is required.
	NetworkId string `json:"network_id,omitempty"`

	PricingTier PricingTier `json:"pricing_tier,omitempty"`
	// ID of the workspace's private access settings object. Only used for
	// PrivateLink. You must specify this ID if you are using [AWS PrivateLink]
	// for either front-end (user-to-workspace connection), back-end (data plane
	// to control plane connection), or both connection types. Before
	// configuring PrivateLink, read the [Databricks article about
	// PrivateLink].",
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink/
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	PrivateAccessSettingsId string `json:"private_access_settings_id,omitempty"`
	// ID of the workspace's storage configuration object.
	StorageConfigurationId string `json:"storage_configuration_id,omitempty"`
	// The ID of the workspace's storage encryption key configuration object.
	// This is used to encrypt the workspace's root S3 bucket (root DBFS and
	// system data) and, optionally, cluster EBS volumes. The provided key
	// configuration object property use_cases must contain STORAGE.
	StorageCustomerManagedKeyId string `json:"storage_customer_managed_key_id,omitempty"`
	// The human-readable name of the workspace.
	WorkspaceName string `json:"workspace_name,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (CreateWorkspaceRequest) MarshalJSON added in v0.23.0

func (s CreateWorkspaceRequest) MarshalJSON() ([]byte, error)

func (*CreateWorkspaceRequest) UnmarshalJSON added in v0.23.0

func (s *CreateWorkspaceRequest) UnmarshalJSON(b []byte) error

type Credential

type Credential struct {
	// The Databricks account ID that hosts the credential.
	AccountId string `json:"account_id,omitempty"`

	AwsCredentials *AwsCredentials `json:"aws_credentials,omitempty"`
	// Time in epoch milliseconds when the credential was created.
	CreationTime int64 `json:"creation_time,omitempty"`
	// Databricks credential configuration ID.
	CredentialsId string `json:"credentials_id,omitempty"`
	// The human-readable name of the credential configuration object.
	CredentialsName string `json:"credentials_name,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (Credential) MarshalJSON added in v0.23.0

func (s Credential) MarshalJSON() ([]byte, error)

func (*Credential) UnmarshalJSON added in v0.23.0

func (s *Credential) UnmarshalJSON(b []byte) error

type CredentialsAPI

type CredentialsAPI struct {
	// contains filtered or unexported fields
}

These APIs manage credential configurations for this workspace. Databricks needs access to a cross-account service IAM role in your AWS account so that Databricks can deploy clusters in the appropriate VPC for the new workspace. A credential configuration encapsulates this role information, and its ID is used when creating a new workspace.

func NewCredentials

func NewCredentials(client *client.DatabricksClient) *CredentialsAPI

func (*CredentialsAPI) Create

func (a *CredentialsAPI) Create(ctx context.Context, request CreateCredentialRequest) (*Credential, error)
Example (Credentials)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{
	CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsCredentials: provisioning.CreateCredentialAwsCredentials{
		StsRole: &provisioning.CreateCredentialStsRole{
			RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"),
		},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", role)

// cleanup

_, err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId)
if err != nil {
	panic(err)
}
Example (LogDelivery)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

creds, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{
	CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsCredentials: provisioning.CreateCredentialAwsCredentials{
		StsRole: &provisioning.CreateCredentialStsRole{
			RoleArn: os.Getenv("TEST_LOGDELIVERY_ARN"),
		},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", creds)

// cleanup

_, err = a.Credentials.DeleteByCredentialsId(ctx, creds.CredentialsId)
if err != nil {
	panic(err)
}
Example (Workspaces)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{
	CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsCredentials: provisioning.CreateCredentialAwsCredentials{
		StsRole: &provisioning.CreateCredentialStsRole{
			RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"),
		},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", role)

// cleanup

_, err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId)
if err != nil {
	panic(err)
}

func (*CredentialsAPI) CredentialCredentialsNameToCredentialsIdMap

func (a *CredentialsAPI) CredentialCredentialsNameToCredentialsIdMap(ctx context.Context) (map[string]string, error)

CredentialCredentialsNameToCredentialsIdMap calls CredentialsAPI.List and creates a map of results with Credential.CredentialsName as key and Credential.CredentialsId as value.

Returns an error if there's more than one Credential with the same .CredentialsName.

Note: All Credential instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*CredentialsAPI) Delete

func (a *CredentialsAPI) Delete(ctx context.Context, request DeleteCredentialRequest) (*Credential, error)

func (*CredentialsAPI) DeleteByCredentialsId

func (a *CredentialsAPI) DeleteByCredentialsId(ctx context.Context, credentialsId string) (*Credential, error)

Deletes a Databricks credential configuration object for an account, both specified by ID. You cannot delete a credential that is associated with any workspace.

func (*CredentialsAPI) Get

func (a *CredentialsAPI) Get(ctx context.Context, request GetCredentialRequest) (*Credential, error)
Example (Credentials)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{
	CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsCredentials: provisioning.CreateCredentialAwsCredentials{
		StsRole: &provisioning.CreateCredentialStsRole{
			RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"),
		},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", role)

byId, err := a.Credentials.GetByCredentialsId(ctx, role.CredentialsId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

_, err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId)
if err != nil {
	panic(err)
}

func (*CredentialsAPI) GetByCredentialsId

func (a *CredentialsAPI) GetByCredentialsId(ctx context.Context, credentialsId string) (*Credential, error)

Gets a Databricks credential configuration object for an account, both specified by ID.

func (*CredentialsAPI) GetByCredentialsName

func (a *CredentialsAPI) GetByCredentialsName(ctx context.Context, name string) (*Credential, error)

GetByCredentialsName calls CredentialsAPI.CredentialCredentialsNameToCredentialsIdMap and returns a single Credential.

Returns an error if there's more than one Credential with the same .CredentialsName.

Note: All Credential instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*CredentialsAPI) List

func (a *CredentialsAPI) List(ctx context.Context) ([]Credential, error)
Example (Credentials)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

configs, err := a.Credentials.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", configs)

type CredentialsInterface added in v0.29.0

type CredentialsInterface interface {

	// Creates a Databricks credential configuration that represents cloud
	// cross-account credentials for a specified account. Databricks uses this to
	// set up network infrastructure properly to host Databricks clusters. For your
	// AWS IAM role, you need to trust the External ID (the Databricks Account API
	// account ID) in the returned credential object, and configure the required
	// access policy.
	//
	// Save the response's `credentials_id` field, which is the ID for your new
	// credential configuration object.
	//
	// For information about how to create a new workspace with this API, see
	// [Create a new workspace using the Account API]
	//
	// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
	Create(ctx context.Context, request CreateCredentialRequest) (*Credential, error)

	// Deletes a Databricks credential configuration object for an account, both
	// specified by ID. You cannot delete a credential that is associated with any
	// workspace.
	Delete(ctx context.Context, request DeleteCredentialRequest) (*Credential, error)

	// Deletes a Databricks credential configuration object for an account, both
	// specified by ID. You cannot delete a credential that is associated with any
	// workspace.
	DeleteByCredentialsId(ctx context.Context, credentialsId string) (*Credential, error)

	// Gets a Databricks credential configuration object for an account, both
	// specified by ID.
	Get(ctx context.Context, request GetCredentialRequest) (*Credential, error)

	// Gets a Databricks credential configuration object for an account, both
	// specified by ID.
	GetByCredentialsId(ctx context.Context, credentialsId string) (*Credential, error)

	// List Databricks credential configuration objects for an account, specified by
	// ID.
	List(ctx context.Context) ([]Credential, error)

	// CredentialCredentialsNameToCredentialsIdMap calls [CredentialsAPI.List] and creates a map of results with [Credential].CredentialsName as key and [Credential].CredentialsId as value.
	//
	// Returns an error if there's more than one [Credential] with the same .CredentialsName.
	//
	// Note: All [Credential] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	CredentialCredentialsNameToCredentialsIdMap(ctx context.Context) (map[string]string, error)

	// GetByCredentialsName calls [CredentialsAPI.CredentialCredentialsNameToCredentialsIdMap] and returns a single [Credential].
	//
	// Returns an error if there's more than one [Credential] with the same .CredentialsName.
	//
	// Note: All [Credential] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByCredentialsName(ctx context.Context, name string) (*Credential, error)
}

type CredentialsService deprecated

type CredentialsService interface {

	// Creates a Databricks credential configuration that represents cloud
	// cross-account credentials for a specified account. Databricks uses this
	// to set up network infrastructure properly to host Databricks clusters.
	// For your AWS IAM role, you need to trust the External ID (the Databricks
	// Account API account ID) in the returned credential object, and configure
	// the required access policy.
	//
	// Save the response's `credentials_id` field, which is the ID for your new
	// credential configuration object.
	//
	// For information about how to create a new workspace with this API, see
	// [Create a new workspace using the Account API]
	//
	// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
	Create(ctx context.Context, request CreateCredentialRequest) (*Credential, error)

	// Deletes a Databricks credential configuration object for an account, both
	// specified by ID. You cannot delete a credential that is associated with
	// any workspace.
	Delete(ctx context.Context, request DeleteCredentialRequest) (*Credential, error)

	// Gets a Databricks credential configuration object for an account, both
	// specified by ID.
	Get(ctx context.Context, request GetCredentialRequest) (*Credential, error)

	// List Databricks credential configuration objects for an account,
	// specified by ID.
	List(ctx context.Context) ([]Credential, error)
}

These APIs manage credential configurations for this workspace. Databricks needs access to a cross-account service IAM role in your AWS account so that Databricks can deploy clusters in the appropriate VPC for the new workspace. A credential configuration encapsulates this role information, and its ID is used when creating a new workspace.

Deprecated: Do not use this interface, it will be removed in a future version of the SDK.

type CustomerFacingComputeMode added in v0.86.0

type CustomerFacingComputeMode string

Corresponds to compute mode defined here: https://src.dev.databricks.com/databricks/universe@9076536b18479afd639d1c1f9dd5a59f72215e69/-/blob/central/api/common.proto?L872

const CustomerFacingComputeModeHybrid CustomerFacingComputeMode = `HYBRID`
const CustomerFacingComputeModeServerless CustomerFacingComputeMode = `SERVERLESS`

func (*CustomerFacingComputeMode) Set added in v0.86.0

Set raw string value and validate it against allowed values

func (*CustomerFacingComputeMode) String added in v0.86.0

func (f *CustomerFacingComputeMode) String() string

String representation for fmt.Print

func (*CustomerFacingComputeMode) Type added in v0.86.0

Type always returns CustomerFacingComputeMode to satisfy [pflag.Value] interface

func (*CustomerFacingComputeMode) Values added in v0.86.0

Values returns all possible values for CustomerFacingComputeMode.

There is no guarantee on the order of the values in the slice.

type CustomerFacingGcpCloudResourceContainer

type CustomerFacingGcpCloudResourceContainer struct {
	ProjectId string `json:"project_id,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (CustomerFacingGcpCloudResourceContainer) MarshalJSON added in v0.23.0

func (s CustomerFacingGcpCloudResourceContainer) MarshalJSON() ([]byte, error)

func (*CustomerFacingGcpCloudResourceContainer) UnmarshalJSON added in v0.23.0

func (s *CustomerFacingGcpCloudResourceContainer) UnmarshalJSON(b []byte) error

type CustomerFacingStorageMode added in v0.86.0

type CustomerFacingStorageMode string
const CustomerFacingStorageModeCustomerHosted CustomerFacingStorageMode = `CUSTOMER_HOSTED`
const CustomerFacingStorageModeDefaultStorage CustomerFacingStorageMode = `DEFAULT_STORAGE`

func (*CustomerFacingStorageMode) Set added in v0.86.0

Set raw string value and validate it against allowed values

func (*CustomerFacingStorageMode) String added in v0.86.0

func (f *CustomerFacingStorageMode) String() string

String representation for fmt.Print

func (*CustomerFacingStorageMode) Type added in v0.86.0

Type always returns CustomerFacingStorageMode to satisfy [pflag.Value] interface

func (*CustomerFacingStorageMode) Values added in v0.86.0

Values returns all possible values for CustomerFacingStorageMode.

There is no guarantee on the order of the values in the slice.

type CustomerManagedKey

type CustomerManagedKey struct {
	// The Databricks account ID that holds the customer-managed key.
	AccountId string `json:"account_id,omitempty"`

	AwsKeyInfo *AwsKeyInfo `json:"aws_key_info,omitempty"`

	AzureKeyInfo *AzureKeyInfo `json:"azure_key_info,omitempty"`
	// Time in epoch milliseconds when the customer key was created.
	CreationTime int64 `json:"creation_time,omitempty"`
	// ID of the encryption key configuration object.
	CustomerManagedKeyId string `json:"customer_managed_key_id,omitempty"`

	GcpKeyInfo *GcpKeyInfo `json:"gcp_key_info,omitempty"`
	// The cases that the key can be used for.
	UseCases []KeyUseCase `json:"use_cases,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (CustomerManagedKey) MarshalJSON added in v0.23.0

func (s CustomerManagedKey) MarshalJSON() ([]byte, error)

func (*CustomerManagedKey) UnmarshalJSON added in v0.23.0

func (s *CustomerManagedKey) UnmarshalJSON(b []byte) error

type DeleteCredentialRequest

type DeleteCredentialRequest struct {
	// Databricks Account API credential configuration ID
	CredentialsId string `json:"-" url:"-"`
}

type DeleteEncryptionKeyRequest

type DeleteEncryptionKeyRequest struct {
	// Databricks encryption key configuration ID.
	CustomerManagedKeyId string `json:"-" url:"-"`
}

type DeleteNetworkRequest

type DeleteNetworkRequest struct {
	// Databricks Account API network configuration ID.
	NetworkId string `json:"-" url:"-"`
}

type DeletePrivateAccesRequest

type DeletePrivateAccesRequest struct {
	PrivateAccessSettingsId string `json:"-" url:"-"`
}

type DeleteStorageRequest

type DeleteStorageRequest struct {
	StorageConfigurationId string `json:"-" url:"-"`
}

type DeleteVpcEndpointRequest

type DeleteVpcEndpointRequest struct {
	VpcEndpointId string `json:"-" url:"-"`
}

type DeleteWorkspaceRequest

type DeleteWorkspaceRequest struct {
	WorkspaceId int64 `json:"-" url:"-"`
}

type EncryptionKeysAPI

type EncryptionKeysAPI struct {
	// contains filtered or unexported fields
}

These APIs manage encryption key configurations for this workspace (optional). A key configuration encapsulates the AWS KMS key information and some information about how the key configuration can be used. There are two possible uses for key configurations:

* Managed services: A key configuration can be used to encrypt a workspace's notebook and secret data in the control plane, as well as Databricks SQL queries and query history. * Storage: A key configuration can be used to encrypt a workspace's DBFS and EBS data in the data plane.

In both of these cases, the key configuration's ID is used when creating a new workspace. This Preview feature is available if your account is on the E2 version of the platform. Updating a running workspace with workspace storage encryption requires that the workspace is on the E2 version of the platform. If you have an older workspace, it might not be on the E2 version of the platform. If you are not sure, contact your Databricks representative.

func NewEncryptionKeys

func NewEncryptionKeys(client *client.DatabricksClient) *EncryptionKeysAPI

func (*EncryptionKeysAPI) Create

func (a *EncryptionKeysAPI) Create(ctx context.Context, request CreateCustomerManagedKeyRequest) (*CustomerManagedKey, error)
Example (EncryptionKeys)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

created, err := a.EncryptionKeys.Create(ctx, provisioning.CreateCustomerManagedKeyRequest{
	AwsKeyInfo: &provisioning.CreateAwsKeyInfo{
		KeyArn:   os.Getenv("TEST_MANAGED_KMS_KEY_ARN"),
		KeyAlias: os.Getenv("TEST_STORAGE_KMS_KEY_ALIAS"),
	},
	UseCases: []provisioning.KeyUseCase{provisioning.KeyUseCaseManagedServices},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

_, err = a.EncryptionKeys.DeleteByCustomerManagedKeyId(ctx, created.CustomerManagedKeyId)
if err != nil {
	panic(err)
}

func (*EncryptionKeysAPI) Delete

func (a *EncryptionKeysAPI) Delete(ctx context.Context, request DeleteEncryptionKeyRequest) (*CustomerManagedKey, error)

func (*EncryptionKeysAPI) DeleteByCustomerManagedKeyId

func (a *EncryptionKeysAPI) DeleteByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) (*CustomerManagedKey, error)

Deletes a customer-managed key configuration object for an account. You cannot delete a configuration that is associated with a running workspace.

func (*EncryptionKeysAPI) Get

func (a *EncryptionKeysAPI) Get(ctx context.Context, request GetEncryptionKeyRequest) (*CustomerManagedKey, error)
Example (EncryptionKeys)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

created, err := a.EncryptionKeys.Create(ctx, provisioning.CreateCustomerManagedKeyRequest{
	AwsKeyInfo: &provisioning.CreateAwsKeyInfo{
		KeyArn:   os.Getenv("TEST_MANAGED_KMS_KEY_ARN"),
		KeyAlias: os.Getenv("TEST_STORAGE_KMS_KEY_ALIAS"),
	},
	UseCases: []provisioning.KeyUseCase{provisioning.KeyUseCaseManagedServices},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

byId, err := a.EncryptionKeys.GetByCustomerManagedKeyId(ctx, created.CustomerManagedKeyId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

_, err = a.EncryptionKeys.DeleteByCustomerManagedKeyId(ctx, created.CustomerManagedKeyId)
if err != nil {
	panic(err)
}

func (*EncryptionKeysAPI) GetByCustomerManagedKeyId

func (a *EncryptionKeysAPI) GetByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) (*CustomerManagedKey, error)

Gets a customer-managed key configuration object for an account, specified by ID. This operation uploads a reference to a customer-managed key to Databricks. If assigned as a workspace's customer-managed key for managed services, Databricks uses the key to encrypt the workspaces notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. If it is specified as a workspace's customer-managed key for storage, the key encrypts the workspace's root S3 bucket (which contains the workspace's root DBFS and system data) and, optionally, cluster EBS volume data.

**Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions.

This operation is available only if your account is on the E2 version of the platform.",

func (*EncryptionKeysAPI) List

func (a *EncryptionKeysAPI) List(ctx context.Context) ([]CustomerManagedKey, error)
Example (EncryptionKeys)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

all, err := a.EncryptionKeys.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)

type EncryptionKeysInterface added in v0.29.0

type EncryptionKeysInterface interface {

	// Creates a customer-managed key configuration object for an account, specified
	// by ID. This operation uploads a reference to a customer-managed key to
	// Databricks. If the key is assigned as a workspace's customer-managed key for
	// managed services, Databricks uses the key to encrypt the workspaces notebooks
	// and secrets in the control plane, in addition to Databricks SQL queries and
	// query history. If it is specified as a workspace's customer-managed key for
	// workspace storage, the key encrypts the workspace's root S3 bucket (which
	// contains the workspace's root DBFS and system data) and, optionally, cluster
	// EBS volume data.
	//
	// **Important**: Customer-managed keys are supported only for some deployment
	// types, subscription types, and AWS regions that currently support creation of
	// Databricks workspaces.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform or on a select custom plan that allows multiple workspaces per
	// account.
	Create(ctx context.Context, request CreateCustomerManagedKeyRequest) (*CustomerManagedKey, error)

	// Deletes a customer-managed key configuration object for an account. You
	// cannot delete a configuration that is associated with a running workspace.
	Delete(ctx context.Context, request DeleteEncryptionKeyRequest) (*CustomerManagedKey, error)

	// Deletes a customer-managed key configuration object for an account. You
	// cannot delete a configuration that is associated with a running workspace.
	DeleteByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) (*CustomerManagedKey, error)

	// Gets a customer-managed key configuration object for an account, specified by
	// ID. This operation uploads a reference to a customer-managed key to
	// Databricks. If assigned as a workspace's customer-managed key for managed
	// services, Databricks uses the key to encrypt the workspaces notebooks and
	// secrets in the control plane, in addition to Databricks SQL queries and query
	// history. If it is specified as a workspace's customer-managed key for
	// storage, the key encrypts the workspace's root S3 bucket (which contains the
	// workspace's root DBFS and system data) and, optionally, cluster EBS volume
	// data.
	//
	// **Important**: Customer-managed keys are supported only for some deployment
	// types, subscription types, and AWS regions.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform.",
	Get(ctx context.Context, request GetEncryptionKeyRequest) (*CustomerManagedKey, error)

	// Gets a customer-managed key configuration object for an account, specified by
	// ID. This operation uploads a reference to a customer-managed key to
	// Databricks. If assigned as a workspace's customer-managed key for managed
	// services, Databricks uses the key to encrypt the workspaces notebooks and
	// secrets in the control plane, in addition to Databricks SQL queries and query
	// history. If it is specified as a workspace's customer-managed key for
	// storage, the key encrypts the workspace's root S3 bucket (which contains the
	// workspace's root DBFS and system data) and, optionally, cluster EBS volume
	// data.
	//
	// **Important**: Customer-managed keys are supported only for some deployment
	// types, subscription types, and AWS regions.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform.",
	GetByCustomerManagedKeyId(ctx context.Context, customerManagedKeyId string) (*CustomerManagedKey, error)

	// Lists Databricks customer-managed key configurations for an account.
	List(ctx context.Context) ([]CustomerManagedKey, error)
}

type EncryptionKeysService deprecated

type EncryptionKeysService interface {

	// Creates a customer-managed key configuration object for an account,
	// specified by ID. This operation uploads a reference to a customer-managed
	// key to Databricks. If the key is assigned as a workspace's
	// customer-managed key for managed services, Databricks uses the key to
	// encrypt the workspaces notebooks and secrets in the control plane, in
	// addition to Databricks SQL queries and query history. If it is specified
	// as a workspace's customer-managed key for workspace storage, the key
	// encrypts the workspace's root S3 bucket (which contains the workspace's
	// root DBFS and system data) and, optionally, cluster EBS volume data.
	//
	// **Important**: Customer-managed keys are supported only for some
	// deployment types, subscription types, and AWS regions that currently
	// support creation of Databricks workspaces.
	//
	// This operation is available only if your account is on the E2 version of
	// the platform or on a select custom plan that allows multiple workspaces
	// per account.
	Create(ctx context.Context, request CreateCustomerManagedKeyRequest) (*CustomerManagedKey, error)

	// Deletes a customer-managed key configuration object for an account. You
	// cannot delete a configuration that is associated with a running
	// workspace.
	Delete(ctx context.Context, request DeleteEncryptionKeyRequest) (*CustomerManagedKey, error)

	// Gets a customer-managed key configuration object for an account,
	// specified by ID. This operation uploads a reference to a customer-managed
	// key to Databricks. If assigned as a workspace's customer-managed key for
	// managed services, Databricks uses the key to encrypt the workspaces
	// notebooks and secrets in the control plane, in addition to Databricks SQL
	// queries and query history. If it is specified as a workspace's
	// customer-managed key for storage, the key encrypts the workspace's root
	// S3 bucket (which contains the workspace's root DBFS and system data) and,
	// optionally, cluster EBS volume data.
	//
	// **Important**: Customer-managed keys are supported only for some
	// deployment types, subscription types, and AWS regions.
	//
	// This operation is available only if your account is on the E2 version of
	// the platform.",
	Get(ctx context.Context, request GetEncryptionKeyRequest) (*CustomerManagedKey, error)

	// Lists Databricks customer-managed key configurations for an account.
	List(ctx context.Context) ([]CustomerManagedKey, error)
}

These APIs manage encryption key configurations for this workspace (optional). A key configuration encapsulates the AWS KMS key information and some information about how the key configuration can be used. There are two possible uses for key configurations:

* Managed services: A key configuration can be used to encrypt a workspace's notebook and secret data in the control plane, as well as Databricks SQL queries and query history. * Storage: A key configuration can be used to encrypt a workspace's DBFS and EBS data in the data plane.

In both of these cases, the key configuration's ID is used when creating a new workspace. This Preview feature is available if your account is on the E2 version of the platform. Updating a running workspace with workspace storage encryption requires that the workspace is on the E2 version of the platform. If you have an older workspace, it might not be on the E2 version of the platform. If you are not sure, contact your Databricks representative.

Deprecated: Do not use this interface, it will be removed in a future version of the SDK.

type EndpointUseCase

type EndpointUseCase string
const EndpointUseCaseDataplaneRelayAccess EndpointUseCase = `DATAPLANE_RELAY_ACCESS`
const EndpointUseCaseWorkspaceAccess EndpointUseCase = `WORKSPACE_ACCESS`

func (*EndpointUseCase) Set

func (f *EndpointUseCase) Set(v string) error

Set raw string value and validate it against allowed values

func (*EndpointUseCase) String

func (f *EndpointUseCase) String() string

String representation for fmt.Print

func (*EndpointUseCase) Type

func (f *EndpointUseCase) Type() string

Type always returns EndpointUseCase to satisfy [pflag.Value] interface

func (*EndpointUseCase) Values added in v0.72.0

func (f *EndpointUseCase) Values() []EndpointUseCase

Values returns all possible values for EndpointUseCase.

There is no guarantee on the order of the values in the slice.

type ErrorType

type ErrorType string

ErrorType and WarningType are used to represent the type of error or warning by NetworkHealth and NetworkWarning defined in central/api/accounts/accounts.proto

const ErrorTypeCredentials ErrorType = `credentials`
const ErrorTypeNetworkAcl ErrorType = `networkAcl`
const ErrorTypeSecurityGroup ErrorType = `securityGroup`
const ErrorTypeSubnet ErrorType = `subnet`
const ErrorTypeVpc ErrorType = `vpc`

func (*ErrorType) Set

func (f *ErrorType) Set(v string) error

Set raw string value and validate it against allowed values

func (*ErrorType) String

func (f *ErrorType) String() string

String representation for fmt.Print

func (*ErrorType) Type

func (f *ErrorType) Type() string

Type always returns ErrorType to satisfy [pflag.Value] interface

func (*ErrorType) Values added in v0.72.0

func (f *ErrorType) Values() []ErrorType

Values returns all possible values for ErrorType.

There is no guarantee on the order of the values in the slice.

type GcpCommonNetworkConfig added in v0.86.0

type GcpCommonNetworkConfig struct {
	// The IP range that will be used to allocate GKE cluster master resources
	// from. This field must not be set if
	// gke_cluster_type=PUBLIC_NODE_PUBLIC_MASTER.
	GkeClusterMasterIpRange string `json:"gke_cluster_master_ip_range,omitempty"`
	// The type of network connectivity of the GKE cluster.
	GkeConnectivityType GkeConfigConnectivityType `json:"gke_connectivity_type,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

The shared network config for GCP workspace. This object has common network configurations that are network attributions of a workspace. DEPRECATED. Use GkeConfig instead.

func (GcpCommonNetworkConfig) MarshalJSON added in v0.86.0

func (s GcpCommonNetworkConfig) MarshalJSON() ([]byte, error)

func (*GcpCommonNetworkConfig) UnmarshalJSON added in v0.86.0

func (s *GcpCommonNetworkConfig) UnmarshalJSON(b []byte) error

type GcpKeyInfo added in v0.9.0

type GcpKeyInfo struct {
	// Globally unique service account email that has access to the KMS key. The
	// service account exists within the Databricks CP project.
	GcpServiceAccount *GcpServiceAccount `json:"gcp_service_account,omitempty"`
	// Globally unique kms key resource id of the form
	// projects/testProjectId/locations/us-east4/keyRings/gcpCmkKeyRing/cryptoKeys/cmk-eastus4
	KmsKeyId string `json:"kms_key_id"`
}

type GcpManagedNetworkConfig

type GcpManagedNetworkConfig struct {
	// The IP range that will be used to allocate GKE cluster Pods from.
	GkeClusterPodIpRange string `json:"gke_cluster_pod_ip_range,omitempty"`
	// The IP range that will be used to allocate GKE cluster Services from.
	GkeClusterServiceIpRange string `json:"gke_cluster_service_ip_range,omitempty"`
	// The IP range which will be used to allocate GKE cluster nodes from. Note:
	// Pods, services and master IP range must be mutually exclusive.
	SubnetCidr string `json:"subnet_cidr,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

The network configuration for the workspace.

func (GcpManagedNetworkConfig) MarshalJSON added in v0.23.0

func (s GcpManagedNetworkConfig) MarshalJSON() ([]byte, error)

func (*GcpManagedNetworkConfig) UnmarshalJSON added in v0.23.0

func (s *GcpManagedNetworkConfig) UnmarshalJSON(b []byte) error

type GcpNetworkInfo

type GcpNetworkInfo struct {
	// The GCP project ID for network resources. This project is where the VPC
	// and subnet resides.
	NetworkProjectId string `json:"network_project_id"`
	// Name of the secondary range within the subnet that will be used by GKE as
	// Pod IP range. This is BYO VPC specific. DB VPC uses
	// network.getGcpManagedNetworkConfig.getGkeClusterPodIpRange
	PodIpRangeName string `json:"pod_ip_range_name"`
	// Name of the secondary range within the subnet that will be used by GKE as
	// Service IP range.
	ServiceIpRangeName string `json:"service_ip_range_name"`
	// The customer-provided Subnet ID that will be available to Clusters in
	// Workspaces using this Network.
	SubnetId string `json:"subnet_id"`

	SubnetRegion string `json:"subnet_region"`
	// The customer-provided VPC ID.
	VpcId string `json:"vpc_id"`
}

type GcpServiceAccount added in v0.94.0

type GcpServiceAccount struct {
	ServiceAccountEmail string `json:"service_account_email,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (GcpServiceAccount) MarshalJSON added in v0.94.0

func (s GcpServiceAccount) MarshalJSON() ([]byte, error)

func (*GcpServiceAccount) UnmarshalJSON added in v0.94.0

func (s *GcpServiceAccount) UnmarshalJSON(b []byte) error

type GcpVpcEndpointInfo

type GcpVpcEndpointInfo struct {
	EndpointRegion string `json:"endpoint_region"`

	ProjectId string `json:"project_id"`

	PscConnectionId string `json:"psc_connection_id,omitempty"`

	PscEndpointName string `json:"psc_endpoint_name"`

	ServiceAttachmentId string `json:"service_attachment_id,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (GcpVpcEndpointInfo) MarshalJSON added in v0.23.0

func (s GcpVpcEndpointInfo) MarshalJSON() ([]byte, error)

func (*GcpVpcEndpointInfo) UnmarshalJSON added in v0.23.0

func (s *GcpVpcEndpointInfo) UnmarshalJSON(b []byte) error

type GetCredentialRequest

type GetCredentialRequest struct {
	// Credential configuration ID
	CredentialsId string `json:"-" url:"-"`
}

type GetEncryptionKeyRequest

type GetEncryptionKeyRequest struct {
	// Databricks encryption key configuration ID.
	CustomerManagedKeyId string `json:"-" url:"-"`
}

type GetNetworkRequest

type GetNetworkRequest struct {
	// Databricks Account API network configuration ID.
	NetworkId string `json:"-" url:"-"`
}

type GetPrivateAccesRequest

type GetPrivateAccesRequest struct {
	PrivateAccessSettingsId string `json:"-" url:"-"`
}

type GetStorageRequest

type GetStorageRequest struct {
	StorageConfigurationId string `json:"-" url:"-"`
}

type GetVpcEndpointRequest

type GetVpcEndpointRequest struct {
	// Databricks VPC endpoint ID.
	VpcEndpointId string `json:"-" url:"-"`
}

type GetWorkspaceRequest

type GetWorkspaceRequest struct {
	WorkspaceId int64 `json:"-" url:"-"`
}

type GkeConfig

type GkeConfig struct {
	// The type of network connectivity of the GKE cluster.
	ConnectivityType GkeConfigConnectivityType `json:"connectivity_type,omitempty"`
	// The IP range that will be used to allocate GKE cluster master resources
	// from. This field must not be set if
	// gke_cluster_type=PUBLIC_NODE_PUBLIC_MASTER.
	MasterIpRange string `json:"master_ip_range,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

The configurations of the GKE cluster used by the GCP workspace.

func (GkeConfig) MarshalJSON added in v0.23.0

func (s GkeConfig) MarshalJSON() ([]byte, error)

func (*GkeConfig) UnmarshalJSON added in v0.23.0

func (s *GkeConfig) UnmarshalJSON(b []byte) error

type GkeConfigConnectivityType

type GkeConfigConnectivityType string

Specifies the network connectivity types for the GKE nodes and the GKE master network.

Set to `PRIVATE_NODE_PUBLIC_MASTER` for a private GKE cluster for the workspace. The GKE nodes will not have public IPs.

Set to `PUBLIC_NODE_PUBLIC_MASTER` for a public GKE cluster. The nodes of a public GKE cluster have public IP addresses.

const GkeConfigConnectivityTypePrivateNodePublicMaster GkeConfigConnectivityType = `PRIVATE_NODE_PUBLIC_MASTER`
const GkeConfigConnectivityTypePublicNodePublicMaster GkeConfigConnectivityType = `PUBLIC_NODE_PUBLIC_MASTER`

func (*GkeConfigConnectivityType) Set

Set raw string value and validate it against allowed values

func (*GkeConfigConnectivityType) String

func (f *GkeConfigConnectivityType) String() string

String representation for fmt.Print

func (*GkeConfigConnectivityType) Type

Type always returns GkeConfigConnectivityType to satisfy [pflag.Value] interface

func (*GkeConfigConnectivityType) Values added in v0.72.0

Values returns all possible values for GkeConfigConnectivityType.

There is no guarantee on the order of the values in the slice.

type KeyAccessConfiguration added in v0.86.0

type KeyAccessConfiguration struct {
	CredentialId string `json:"credential_id,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

The credential ID that is used to access the key vault.

func (KeyAccessConfiguration) MarshalJSON added in v0.86.0

func (s KeyAccessConfiguration) MarshalJSON() ([]byte, error)

func (*KeyAccessConfiguration) UnmarshalJSON added in v0.86.0

func (s *KeyAccessConfiguration) UnmarshalJSON(b []byte) error

type KeyUseCase

type KeyUseCase string
const KeyUseCaseManagedServices KeyUseCase = `MANAGED_SERVICES`
const KeyUseCaseStorage KeyUseCase = `STORAGE`

func (*KeyUseCase) Set

func (f *KeyUseCase) Set(v string) error

Set raw string value and validate it against allowed values

func (*KeyUseCase) String

func (f *KeyUseCase) String() string

String representation for fmt.Print

func (*KeyUseCase) Type

func (f *KeyUseCase) Type() string

Type always returns KeyUseCase to satisfy [pflag.Value] interface

func (*KeyUseCase) Values added in v0.72.0

func (f *KeyUseCase) Values() []KeyUseCase

Values returns all possible values for KeyUseCase.

There is no guarantee on the order of the values in the slice.

type Network

type Network struct {
	// The Databricks account ID associated with this network configuration.
	AccountId string `json:"account_id,omitempty"`
	// Time in epoch milliseconds when the network was created.
	CreationTime int64 `json:"creation_time,omitempty"`
	// Array of error messages about the network configuration.
	ErrorMessages []NetworkHealth `json:"error_messages,omitempty"`

	GcpNetworkInfo *GcpNetworkInfo `json:"gcp_network_info,omitempty"`
	// The Databricks network configuration ID.
	NetworkId string `json:"network_id,omitempty"`
	// The human-readable name of the network configuration.
	NetworkName string `json:"network_name,omitempty"`
	// IDs of one to five security groups associated with this network. Security
	// group IDs **cannot** be used in multiple network configurations.
	SecurityGroupIds []string `json:"security_group_ids,omitempty"`
	// IDs of at least two subnets associated with this network. Subnet IDs
	// **cannot** be used in multiple network configurations.
	SubnetIds []string `json:"subnet_ids,omitempty"`

	VpcEndpoints *NetworkVpcEndpoints `json:"vpc_endpoints,omitempty"`
	// The ID of the VPC associated with this network configuration. VPC IDs can
	// be used in multiple networks.
	VpcId string `json:"vpc_id,omitempty"`

	VpcStatus VpcStatus `json:"vpc_status,omitempty"`
	// Array of warning messages about the network configuration.
	WarningMessages []NetworkWarning `json:"warning_messages,omitempty"`
	// Workspace ID associated with this network configuration.
	WorkspaceId int64 `json:"workspace_id,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (Network) MarshalJSON added in v0.23.0

func (s Network) MarshalJSON() ([]byte, error)

func (*Network) UnmarshalJSON added in v0.23.0

func (s *Network) UnmarshalJSON(b []byte) error

type NetworkHealth

type NetworkHealth struct {
	// Details of the error.
	ErrorMessage string `json:"error_message,omitempty"`

	ErrorType ErrorType `json:"error_type,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (NetworkHealth) MarshalJSON added in v0.23.0

func (s NetworkHealth) MarshalJSON() ([]byte, error)

func (*NetworkHealth) UnmarshalJSON added in v0.23.0

func (s *NetworkHealth) UnmarshalJSON(b []byte) error

type NetworkVpcEndpoints

type NetworkVpcEndpoints struct {
	// The VPC endpoint ID used by this network to access the Databricks secure
	// cluster connectivity relay.
	DataplaneRelay []string `json:"dataplane_relay,omitempty"`
	// The VPC endpoint ID used by this network to access the Databricks REST
	// API.
	RestApi []string `json:"rest_api,omitempty"`
}

type NetworkWarning

type NetworkWarning struct {
	// Details of the warning.
	WarningMessage string `json:"warning_message,omitempty"`

	WarningType WarningType `json:"warning_type,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (NetworkWarning) MarshalJSON added in v0.23.0

func (s NetworkWarning) MarshalJSON() ([]byte, error)

func (*NetworkWarning) UnmarshalJSON added in v0.23.0

func (s *NetworkWarning) UnmarshalJSON(b []byte) error

type NetworksAPI

type NetworksAPI struct {
	// contains filtered or unexported fields
}

These APIs manage network configurations for customer-managed VPCs (optional). Its ID is used when creating a new workspace if you use customer-managed VPCs.

func NewNetworks

func NewNetworks(client *client.DatabricksClient) *NetworksAPI

func (*NetworksAPI) Create

func (a *NetworksAPI) Create(ctx context.Context, request CreateNetworkRequest) (*Network, error)
Example (Networks)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

netw, err := a.Networks.Create(ctx, provisioning.CreateNetworkRequest{
	NetworkName:      fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	VpcId:            fmt.Sprintf("%x", time.Now().UnixNano()),
	SubnetIds:        []string{fmt.Sprintf("%x", time.Now().UnixNano()), fmt.Sprintf("%x", time.Now().UnixNano())},
	SecurityGroupIds: []string{fmt.Sprintf("%x", time.Now().UnixNano())},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", netw)

func (*NetworksAPI) Delete

func (a *NetworksAPI) Delete(ctx context.Context, request DeleteNetworkRequest) (*Network, error)

func (*NetworksAPI) DeleteByNetworkId

func (a *NetworksAPI) DeleteByNetworkId(ctx context.Context, networkId string) (*Network, error)

Deletes a Databricks network configuration, which represents a cloud VPC and its resources. You cannot delete a network that is associated with a workspace.

This operation is available only if your account is on the E2 version of the platform.

func (*NetworksAPI) Get

func (a *NetworksAPI) Get(ctx context.Context, request GetNetworkRequest) (*Network, error)
Example (Networks)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

netw, err := a.Networks.Create(ctx, provisioning.CreateNetworkRequest{
	NetworkName:      fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	VpcId:            fmt.Sprintf("%x", time.Now().UnixNano()),
	SubnetIds:        []string{fmt.Sprintf("%x", time.Now().UnixNano()), fmt.Sprintf("%x", time.Now().UnixNano())},
	SecurityGroupIds: []string{fmt.Sprintf("%x", time.Now().UnixNano())},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", netw)

byId, err := a.Networks.GetByNetworkId(ctx, netw.NetworkId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

func (*NetworksAPI) GetByNetworkId

func (a *NetworksAPI) GetByNetworkId(ctx context.Context, networkId string) (*Network, error)

Gets a Databricks network configuration, which represents a cloud VPC and its resources.

func (*NetworksAPI) GetByNetworkName

func (a *NetworksAPI) GetByNetworkName(ctx context.Context, name string) (*Network, error)

GetByNetworkName calls NetworksAPI.NetworkNetworkNameToNetworkIdMap and returns a single Network.

Returns an error if there's more than one Network with the same .NetworkName.

Note: All Network instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*NetworksAPI) List

func (a *NetworksAPI) List(ctx context.Context) ([]Network, error)
Example (Networks)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

configs, err := a.Networks.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", configs)

func (*NetworksAPI) NetworkNetworkNameToNetworkIdMap

func (a *NetworksAPI) NetworkNetworkNameToNetworkIdMap(ctx context.Context) (map[string]string, error)

NetworkNetworkNameToNetworkIdMap calls NetworksAPI.List and creates a map of results with Network.NetworkName as key and Network.NetworkId as value.

Returns an error if there's more than one Network with the same .NetworkName.

Note: All Network instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

type NetworksInterface added in v0.29.0

type NetworksInterface interface {

	// Creates a Databricks network configuration that represents an VPC and its
	// resources. The VPC will be used for new Databricks clusters. This requires a
	// pre-existing VPC and subnets.
	Create(ctx context.Context, request CreateNetworkRequest) (*Network, error)

	// Deletes a Databricks network configuration, which represents a cloud VPC and
	// its resources. You cannot delete a network that is associated with a
	// workspace.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform.
	Delete(ctx context.Context, request DeleteNetworkRequest) (*Network, error)

	// Deletes a Databricks network configuration, which represents a cloud VPC and
	// its resources. You cannot delete a network that is associated with a
	// workspace.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform.
	DeleteByNetworkId(ctx context.Context, networkId string) (*Network, error)

	// Gets a Databricks network configuration, which represents a cloud VPC and its
	// resources.
	Get(ctx context.Context, request GetNetworkRequest) (*Network, error)

	// Gets a Databricks network configuration, which represents a cloud VPC and its
	// resources.
	GetByNetworkId(ctx context.Context, networkId string) (*Network, error)

	// Lists Databricks network configurations for an account.
	List(ctx context.Context) ([]Network, error)

	// NetworkNetworkNameToNetworkIdMap calls [NetworksAPI.List] and creates a map of results with [Network].NetworkName as key and [Network].NetworkId as value.
	//
	// Returns an error if there's more than one [Network] with the same .NetworkName.
	//
	// Note: All [Network] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	NetworkNetworkNameToNetworkIdMap(ctx context.Context) (map[string]string, error)

	// GetByNetworkName calls [NetworksAPI.NetworkNetworkNameToNetworkIdMap] and returns a single [Network].
	//
	// Returns an error if there's more than one [Network] with the same .NetworkName.
	//
	// Note: All [Network] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByNetworkName(ctx context.Context, name string) (*Network, error)
}

type NetworksService deprecated

type NetworksService interface {

	// Creates a Databricks network configuration that represents an VPC and its
	// resources. The VPC will be used for new Databricks clusters. This
	// requires a pre-existing VPC and subnets.
	Create(ctx context.Context, request CreateNetworkRequest) (*Network, error)

	// Deletes a Databricks network configuration, which represents a cloud VPC
	// and its resources. You cannot delete a network that is associated with a
	// workspace.
	//
	// This operation is available only if your account is on the E2 version of
	// the platform.
	Delete(ctx context.Context, request DeleteNetworkRequest) (*Network, error)

	// Gets a Databricks network configuration, which represents a cloud VPC and
	// its resources.
	Get(ctx context.Context, request GetNetworkRequest) (*Network, error)

	// Lists Databricks network configurations for an account.
	List(ctx context.Context) ([]Network, error)
}

These APIs manage network configurations for customer-managed VPCs (optional). Its ID is used when creating a new workspace if you use customer-managed VPCs.

Deprecated: Do not use this interface, it will be removed in a future version of the SDK.

type PricingTier

type PricingTier string
const PricingTierCommunityEdition PricingTier = `COMMUNITY_EDITION`
const PricingTierDedicated PricingTier = `DEDICATED`
const PricingTierEnterprise PricingTier = `ENTERPRISE`
const PricingTierPremium PricingTier = `PREMIUM`
const PricingTierStandard PricingTier = `STANDARD`
const PricingTierUnknown PricingTier = `UNKNOWN`

func (*PricingTier) Set

func (f *PricingTier) Set(v string) error

Set raw string value and validate it against allowed values

func (*PricingTier) String

func (f *PricingTier) String() string

String representation for fmt.Print

func (*PricingTier) Type

func (f *PricingTier) Type() string

Type always returns PricingTier to satisfy [pflag.Value] interface

func (*PricingTier) Values added in v0.72.0

func (f *PricingTier) Values() []PricingTier

Values returns all possible values for PricingTier.

There is no guarantee on the order of the values in the slice.

type PrivateAccessAPI

type PrivateAccessAPI struct {
	// contains filtered or unexported fields
}

These APIs manage private access settings for this account.

func NewPrivateAccess

func NewPrivateAccess(client *client.DatabricksClient) *PrivateAccessAPI

func (*PrivateAccessAPI) Create

func (a *PrivateAccessAPI) Create(ctx context.Context, request CreatePrivateAccessSettingsRequest) (*PrivateAccessSettings, error)
Example (PrivateAccess)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

created, err := a.PrivateAccess.Create(ctx, provisioning.CreatePrivateAccessSettingsRequest{
	PrivateAccessSettingsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	Region:                    os.Getenv("AWS_REGION"),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

_, err = a.PrivateAccess.DeleteByPrivateAccessSettingsId(ctx, created.PrivateAccessSettingsId)
if err != nil {
	panic(err)
}

func (*PrivateAccessAPI) Delete

func (a *PrivateAccessAPI) Delete(ctx context.Context, request DeletePrivateAccesRequest) (*PrivateAccessSettings, error)

func (*PrivateAccessAPI) DeleteByPrivateAccessSettingsId

func (a *PrivateAccessAPI) DeleteByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) (*PrivateAccessSettings, error)

Deletes a Databricks private access settings configuration, both specified by ID.

func (*PrivateAccessAPI) Get

func (a *PrivateAccessAPI) Get(ctx context.Context, request GetPrivateAccesRequest) (*PrivateAccessSettings, error)
Example (PrivateAccess)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

created, err := a.PrivateAccess.Create(ctx, provisioning.CreatePrivateAccessSettingsRequest{
	PrivateAccessSettingsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	Region:                    os.Getenv("AWS_REGION"),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

byId, err := a.PrivateAccess.GetByPrivateAccessSettingsId(ctx, created.PrivateAccessSettingsId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

_, err = a.PrivateAccess.DeleteByPrivateAccessSettingsId(ctx, created.PrivateAccessSettingsId)
if err != nil {
	panic(err)
}

func (*PrivateAccessAPI) GetByPrivateAccessSettingsId

func (a *PrivateAccessAPI) GetByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) (*PrivateAccessSettings, error)

Gets a Databricks private access settings configuration, both specified by ID.

func (*PrivateAccessAPI) GetByPrivateAccessSettingsName

func (a *PrivateAccessAPI) GetByPrivateAccessSettingsName(ctx context.Context, name string) (*PrivateAccessSettings, error)

GetByPrivateAccessSettingsName calls PrivateAccessAPI.PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap and returns a single PrivateAccessSettings.

Returns an error if there's more than one PrivateAccessSettings with the same .PrivateAccessSettingsName.

Note: All PrivateAccessSettings instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*PrivateAccessAPI) List

func (a *PrivateAccessAPI) List(ctx context.Context) ([]PrivateAccessSettings, error)
Example (PrivateAccess)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

all, err := a.PrivateAccess.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)

func (*PrivateAccessAPI) PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap

func (a *PrivateAccessAPI) PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap(ctx context.Context) (map[string]string, error)

PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap calls PrivateAccessAPI.List and creates a map of results with PrivateAccessSettings.PrivateAccessSettingsName as key and PrivateAccessSettings.PrivateAccessSettingsId as value.

Returns an error if there's more than one PrivateAccessSettings with the same .PrivateAccessSettingsName.

Note: All PrivateAccessSettings instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

func (*PrivateAccessAPI) Replace

func (a *PrivateAccessAPI) Replace(ctx context.Context, request ReplacePrivateAccessSettingsRequest) (*PrivateAccessSettings, error)
Example (PrivateAccess)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

created, err := a.PrivateAccess.Create(ctx, provisioning.CreatePrivateAccessSettingsRequest{
	PrivateAccessSettingsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	Region:                    os.Getenv("AWS_REGION"),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = a.PrivateAccess.Replace(ctx, provisioning.ReplacePrivateAccessSettingsRequest{
	PrivateAccessSettingsId: created.PrivateAccessSettingsId,
	CustomerFacingPrivateAccessSettings: provisioning.PrivateAccessSettings{
		PrivateAccessSettingsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
		Region:                    os.Getenv("AWS_REGION"),
	},
})
if err != nil {
	panic(err)
}

// cleanup

_, err = a.PrivateAccess.DeleteByPrivateAccessSettingsId(ctx, created.PrivateAccessSettingsId)
if err != nil {
	panic(err)
}

type PrivateAccessInterface added in v0.29.0

type PrivateAccessInterface interface {

	// Creates a private access settings configuration, which represents network
	// access restrictions for workspace resources. Private access settings
	// configure whether workspaces can be accessed from the public internet or only
	// from private endpoints.
	Create(ctx context.Context, request CreatePrivateAccessSettingsRequest) (*PrivateAccessSettings, error)

	// Deletes a Databricks private access settings configuration, both specified by
	// ID.
	Delete(ctx context.Context, request DeletePrivateAccesRequest) (*PrivateAccessSettings, error)

	// Deletes a Databricks private access settings configuration, both specified by
	// ID.
	DeleteByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) (*PrivateAccessSettings, error)

	// Gets a Databricks private access settings configuration, both specified by
	// ID.
	Get(ctx context.Context, request GetPrivateAccesRequest) (*PrivateAccessSettings, error)

	// Gets a Databricks private access settings configuration, both specified by
	// ID.
	GetByPrivateAccessSettingsId(ctx context.Context, privateAccessSettingsId string) (*PrivateAccessSettings, error)

	// Lists Databricks private access settings for an account.
	List(ctx context.Context) ([]PrivateAccessSettings, error)

	// PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap calls [PrivateAccessAPI.List] and creates a map of results with [PrivateAccessSettings].PrivateAccessSettingsName as key and [PrivateAccessSettings].PrivateAccessSettingsId as value.
	//
	// Returns an error if there's more than one [PrivateAccessSettings] with the same .PrivateAccessSettingsName.
	//
	// Note: All [PrivateAccessSettings] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap(ctx context.Context) (map[string]string, error)

	// GetByPrivateAccessSettingsName calls [PrivateAccessAPI.PrivateAccessSettingsPrivateAccessSettingsNameToPrivateAccessSettingsIdMap] and returns a single [PrivateAccessSettings].
	//
	// Returns an error if there's more than one [PrivateAccessSettings] with the same .PrivateAccessSettingsName.
	//
	// Note: All [PrivateAccessSettings] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByPrivateAccessSettingsName(ctx context.Context, name string) (*PrivateAccessSettings, error)

	// Updates an existing private access settings object, which specifies how your
	// workspace is accessed over AWS PrivateLink. To use AWS PrivateLink, a
	// workspace must have a private access settings object referenced by ID in the
	// workspace's private_access_settings_id property. This operation completely
	// overwrites your existing private access settings object attached to your
	// workspaces. All workspaces attached to the private access settings are
	// affected by any change. If public_access_enabled, private_access_level, or
	// allowed_vpc_endpoint_ids are updated, effects of these changes might take
	// several minutes to propagate to the workspace API. You can share one private
	// access settings object with multiple workspaces in a single account. However,
	// private access settings are specific to AWS regions, so only workspaces in
	// the same AWS region can use a given private access settings object. Before
	// configuring PrivateLink, read the Databricks article about PrivateLink.
	Replace(ctx context.Context, request ReplacePrivateAccessSettingsRequest) (*PrivateAccessSettings, error)
}

type PrivateAccessLevel

type PrivateAccessLevel string
const PrivateAccessLevelAccount PrivateAccessLevel = `ACCOUNT`
const PrivateAccessLevelEndpoint PrivateAccessLevel = `ENDPOINT`

func (*PrivateAccessLevel) Set

func (f *PrivateAccessLevel) Set(v string) error

Set raw string value and validate it against allowed values

func (*PrivateAccessLevel) String

func (f *PrivateAccessLevel) String() string

String representation for fmt.Print

func (*PrivateAccessLevel) Type

func (f *PrivateAccessLevel) Type() string

Type always returns PrivateAccessLevel to satisfy [pflag.Value] interface

func (*PrivateAccessLevel) Values added in v0.72.0

func (f *PrivateAccessLevel) Values() []PrivateAccessLevel

Values returns all possible values for PrivateAccessLevel.

There is no guarantee on the order of the values in the slice.

type PrivateAccessService deprecated

type PrivateAccessService interface {

	// Creates a private access settings configuration, which represents network
	// access restrictions for workspace resources. Private access settings
	// configure whether workspaces can be accessed from the public internet or
	// only from private endpoints.
	Create(ctx context.Context, request CreatePrivateAccessSettingsRequest) (*PrivateAccessSettings, error)

	// Deletes a Databricks private access settings configuration, both
	// specified by ID.
	Delete(ctx context.Context, request DeletePrivateAccesRequest) (*PrivateAccessSettings, error)

	// Gets a Databricks private access settings configuration, both specified
	// by ID.
	Get(ctx context.Context, request GetPrivateAccesRequest) (*PrivateAccessSettings, error)

	// Lists Databricks private access settings for an account.
	List(ctx context.Context) ([]PrivateAccessSettings, error)

	// Updates an existing private access settings object, which specifies how
	// your workspace is accessed over AWS PrivateLink. To use AWS PrivateLink,
	// a workspace must have a private access settings object referenced by ID
	// in the workspace's private_access_settings_id property. This operation
	// completely overwrites your existing private access settings object
	// attached to your workspaces. All workspaces attached to the private
	// access settings are affected by any change. If public_access_enabled,
	// private_access_level, or allowed_vpc_endpoint_ids are updated, effects of
	// these changes might take several minutes to propagate to the workspace
	// API. You can share one private access settings object with multiple
	// workspaces in a single account. However, private access settings are
	// specific to AWS regions, so only workspaces in the same AWS region can
	// use a given private access settings object. Before configuring
	// PrivateLink, read the Databricks article about PrivateLink.
	Replace(ctx context.Context, request ReplacePrivateAccessSettingsRequest) (*PrivateAccessSettings, error)
}

These APIs manage private access settings for this account.

Deprecated: Do not use this interface, it will be removed in a future version of the SDK.

type PrivateAccessSettings

type PrivateAccessSettings struct {
	// The Databricks account ID that hosts the private access settings.
	AccountId string `json:"account_id,omitempty"`
	// An array of Databricks VPC endpoint IDs. This is the Databricks ID that
	// is returned when registering the VPC endpoint configuration in your
	// Databricks account. This is not the ID of the VPC endpoint in AWS. Only
	// used when private_access_level is set to ENDPOINT. This is an allow list
	// of VPC endpoints that in your account that can connect to your workspace
	// over AWS PrivateLink. If hybrid access to your workspace is enabled by
	// setting public_access_enabled to true, this control only works for
	// PrivateLink connections. To control how your workspace is accessed via
	// public internet, see IP access lists.
	AllowedVpcEndpointIds []string `json:"allowed_vpc_endpoint_ids,omitempty"`
	// The private access level controls which VPC endpoints can connect to the
	// UI or API of any workspace that attaches this private access settings
	// object. `ACCOUNT` level access (the default) allows only VPC endpoints
	// that are registered in your Databricks account connect to your workspace.
	// `ENDPOINT` level access allows only specified VPC endpoints connect to
	// your workspace. For details, see allowed_vpc_endpoint_ids.
	PrivateAccessLevel PrivateAccessLevel `json:"private_access_level,omitempty"`
	// Databricks private access settings ID.
	PrivateAccessSettingsId string `json:"private_access_settings_id,omitempty"`
	// The human-readable name of the private access settings object.
	PrivateAccessSettingsName string `json:"private_access_settings_name,omitempty"`
	// Determines if the workspace can be accessed over public internet. For
	// fully private workspaces, you can optionally specify false, but only if
	// you implement both the front-end and the back-end PrivateLink
	// connections. Otherwise, specify true, which means that public access is
	// enabled.
	PublicAccessEnabled bool `json:"public_access_enabled,omitempty"`
	// The AWS region for workspaces attached to this private access settings
	// object.
	Region string `json:"region,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

*

func (PrivateAccessSettings) MarshalJSON added in v0.23.0

func (s PrivateAccessSettings) MarshalJSON() ([]byte, error)

func (*PrivateAccessSettings) UnmarshalJSON added in v0.23.0

func (s *PrivateAccessSettings) UnmarshalJSON(b []byte) error

type ReplacePrivateAccessSettingsRequest added in v0.79.0

type ReplacePrivateAccessSettingsRequest struct {
	// Properties of the new private access settings object.
	CustomerFacingPrivateAccessSettings PrivateAccessSettings `json:"customer_facing_private_access_settings"`
	// Databricks private access settings ID.
	PrivateAccessSettingsId string `json:"-" url:"-"`
}

type RootBucketInfo

type RootBucketInfo struct {
	// Name of the S3 bucket
	BucketName string `json:"bucket_name,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (RootBucketInfo) MarshalJSON added in v0.23.0

func (s RootBucketInfo) MarshalJSON() ([]byte, error)

func (*RootBucketInfo) UnmarshalJSON added in v0.23.0

func (s *RootBucketInfo) UnmarshalJSON(b []byte) error

type StorageAPI

type StorageAPI struct {
	// contains filtered or unexported fields
}

These APIs manage storage configurations for this workspace. A root storage S3 bucket in your account is required to store objects like cluster logs, notebook revisions, and job results. You can also use the root storage S3 bucket for storage of non-production DBFS data. A storage configuration encapsulates this bucket information, and its ID is used when creating a new workspace.

func NewStorage

func NewStorage(client *client.DatabricksClient) *StorageAPI

func (*StorageAPI) Create

func (a *StorageAPI) Create(ctx context.Context, request CreateStorageConfigurationRequest) (*StorageConfiguration, error)
Example (LogDelivery)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

bucket, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{
	StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	RootBucketInfo: provisioning.RootBucketInfo{
		BucketName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", bucket)

// cleanup

_, err = a.Storage.DeleteByStorageConfigurationId(ctx, bucket.StorageConfigurationId)
if err != nil {
	panic(err)
}
Example (Storage)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{
	StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	RootBucketInfo: provisioning.RootBucketInfo{
		BucketName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storage)
Example (Workspaces)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{
	StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	RootBucketInfo: provisioning.RootBucketInfo{
		BucketName: os.Getenv("TEST_ROOT_BUCKET"),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storage)

// cleanup

_, err = a.Storage.DeleteByStorageConfigurationId(ctx, storage.StorageConfigurationId)
if err != nil {
	panic(err)
}

func (*StorageAPI) Delete

func (a *StorageAPI) Delete(ctx context.Context, request DeleteStorageRequest) (*StorageConfiguration, error)

func (*StorageAPI) DeleteByStorageConfigurationId

func (a *StorageAPI) DeleteByStorageConfigurationId(ctx context.Context, storageConfigurationId string) (*StorageConfiguration, error)

Deletes a Databricks storage configuration. You cannot delete a storage configuration that is associated with any workspace.

func (*StorageAPI) Get

func (a *StorageAPI) Get(ctx context.Context, request GetStorageRequest) (*StorageConfiguration, error)
Example (Storage)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{
	StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	RootBucketInfo: provisioning.RootBucketInfo{
		BucketName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storage)

byId, err := a.Storage.GetByStorageConfigurationId(ctx, storage.StorageConfigurationId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

func (*StorageAPI) GetByStorageConfigurationId

func (a *StorageAPI) GetByStorageConfigurationId(ctx context.Context, storageConfigurationId string) (*StorageConfiguration, error)

Gets a Databricks storage configuration for an account, both specified by ID.

func (*StorageAPI) GetByStorageConfigurationName

func (a *StorageAPI) GetByStorageConfigurationName(ctx context.Context, name string) (*StorageConfiguration, error)

GetByStorageConfigurationName calls StorageAPI.StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap and returns a single StorageConfiguration.

Returns an error if there's more than one StorageConfiguration with the same .StorageConfigurationName.

Note: All StorageConfiguration instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*StorageAPI) List

func (a *StorageAPI) List(ctx context.Context) ([]StorageConfiguration, error)
Example (Storage)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

configs, err := a.Storage.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", configs)

func (*StorageAPI) StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap

func (a *StorageAPI) StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap(ctx context.Context) (map[string]string, error)

StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap calls StorageAPI.List and creates a map of results with StorageConfiguration.StorageConfigurationName as key and StorageConfiguration.StorageConfigurationId as value.

Returns an error if there's more than one StorageConfiguration with the same .StorageConfigurationName.

Note: All StorageConfiguration instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

type StorageConfiguration

type StorageConfiguration struct {
	// The Databricks account ID associated with this storage configuration.
	AccountId string `json:"account_id,omitempty"`
	// Time in epoch milliseconds when the storage configuration was created.
	CreationTime int64 `json:"creation_time,omitempty"`
	// Optional IAM role that is used to access the workspace catalog which is
	// created during workspace creation for UC by Default. If a storage
	// configuration with this field populated is used to create a workspace,
	// then a workspace catalog is created together with the workspace. The
	// workspace catalog shares the root bucket with internal workspace storage
	// (including DBFS root) but uses a dedicated bucket path prefix.
	RoleArn string `json:"role_arn,omitempty"`
	// The root bucket information for the storage configuration.
	RootBucketInfo *RootBucketInfo `json:"root_bucket_info,omitempty"`
	// Databricks storage configuration ID.
	StorageConfigurationId string `json:"storage_configuration_id,omitempty"`
	// The human-readable name of the storage configuration.
	StorageConfigurationName string `json:"storage_configuration_name,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (StorageConfiguration) MarshalJSON added in v0.23.0

func (s StorageConfiguration) MarshalJSON() ([]byte, error)

func (*StorageConfiguration) UnmarshalJSON added in v0.23.0

func (s *StorageConfiguration) UnmarshalJSON(b []byte) error

type StorageInterface added in v0.29.0

type StorageInterface interface {

	// Creates a Databricks storage configuration for an account.
	Create(ctx context.Context, request CreateStorageConfigurationRequest) (*StorageConfiguration, error)

	// Deletes a Databricks storage configuration. You cannot delete a storage
	// configuration that is associated with any workspace.
	Delete(ctx context.Context, request DeleteStorageRequest) (*StorageConfiguration, error)

	// Deletes a Databricks storage configuration. You cannot delete a storage
	// configuration that is associated with any workspace.
	DeleteByStorageConfigurationId(ctx context.Context, storageConfigurationId string) (*StorageConfiguration, error)

	// Gets a Databricks storage configuration for an account, both specified by ID.
	Get(ctx context.Context, request GetStorageRequest) (*StorageConfiguration, error)

	// Gets a Databricks storage configuration for an account, both specified by ID.
	GetByStorageConfigurationId(ctx context.Context, storageConfigurationId string) (*StorageConfiguration, error)

	// Lists Databricks storage configurations for an account, specified by ID.
	List(ctx context.Context) ([]StorageConfiguration, error)

	// StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap calls [StorageAPI.List] and creates a map of results with [StorageConfiguration].StorageConfigurationName as key and [StorageConfiguration].StorageConfigurationId as value.
	//
	// Returns an error if there's more than one [StorageConfiguration] with the same .StorageConfigurationName.
	//
	// Note: All [StorageConfiguration] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap(ctx context.Context) (map[string]string, error)

	// GetByStorageConfigurationName calls [StorageAPI.StorageConfigurationStorageConfigurationNameToStorageConfigurationIdMap] and returns a single [StorageConfiguration].
	//
	// Returns an error if there's more than one [StorageConfiguration] with the same .StorageConfigurationName.
	//
	// Note: All [StorageConfiguration] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByStorageConfigurationName(ctx context.Context, name string) (*StorageConfiguration, error)
}

type StorageService deprecated

type StorageService interface {

	// Creates a Databricks storage configuration for an account.
	Create(ctx context.Context, request CreateStorageConfigurationRequest) (*StorageConfiguration, error)

	// Deletes a Databricks storage configuration. You cannot delete a storage
	// configuration that is associated with any workspace.
	Delete(ctx context.Context, request DeleteStorageRequest) (*StorageConfiguration, error)

	// Gets a Databricks storage configuration for an account, both specified by
	// ID.
	Get(ctx context.Context, request GetStorageRequest) (*StorageConfiguration, error)

	// Lists Databricks storage configurations for an account, specified by ID.
	List(ctx context.Context) ([]StorageConfiguration, error)
}

These APIs manage storage configurations for this workspace. A root storage S3 bucket in your account is required to store objects like cluster logs, notebook revisions, and job results. You can also use the root storage S3 bucket for storage of non-production DBFS data. A storage configuration encapsulates this bucket information, and its ID is used when creating a new workspace.

Deprecated: Do not use this interface, it will be removed in a future version of the SDK.

type StsRole

type StsRole struct {
	// The Amazon Resource Name (ARN) of the cross account IAM role.
	RoleArn string `json:"role_arn,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (StsRole) MarshalJSON added in v0.23.0

func (s StsRole) MarshalJSON() ([]byte, error)

func (*StsRole) UnmarshalJSON added in v0.23.0

func (s *StsRole) UnmarshalJSON(b []byte) error

type UpdateWorkspaceRequest

type UpdateWorkspaceRequest struct {
	CustomerFacingWorkspace Workspace `json:"customer_facing_workspace"`
	// The field mask must be a single string, with multiple fields separated by
	// commas (no spaces). The field path is relative to the resource object,
	// using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`).
	// Specification of elements in sequence or map fields is not allowed, as
	// only the entire collection field can be specified. Field names must
	// exactly match the resource field names.
	//
	// A field mask of `*` indicates full replacement. It’s recommended to
	// always explicitly list the fields being updated and avoid using `*`
	// wildcards, as it can lead to unintended results if the API changes in the
	// future.
	UpdateMask string `json:"-" url:"update_mask,omitempty"`
	// A unique integer ID for the workspace
	WorkspaceId int64 `json:"-" url:"-"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (UpdateWorkspaceRequest) MarshalJSON added in v0.23.0

func (s UpdateWorkspaceRequest) MarshalJSON() ([]byte, error)

func (*UpdateWorkspaceRequest) UnmarshalJSON added in v0.23.0

func (s *UpdateWorkspaceRequest) UnmarshalJSON(b []byte) error

type VpcEndpoint

type VpcEndpoint struct {
	// The Databricks account ID that hosts the VPC endpoint configuration. TODO
	// - This may signal an OpenAPI diff; it does not show up in the generated
	// spec
	AccountId string `json:"account_id,omitempty"`
	// The AWS Account in which the VPC endpoint object exists.
	AwsAccountId string `json:"aws_account_id,omitempty"`
	// The ID of the Databricks [endpoint service] that this VPC endpoint is
	// connected to. For a list of endpoint service IDs for each supported AWS
	// region, see the [Databricks PrivateLink documentation].
	//
	// [Databricks PrivateLink documentation]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	// [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/endpoint-service.html
	AwsEndpointServiceId string `json:"aws_endpoint_service_id,omitempty"`
	// The ID of the VPC endpoint object in AWS.
	AwsVpcEndpointId string `json:"aws_vpc_endpoint_id,omitempty"`
	// The cloud info of this vpc endpoint. Info for a GCP vpc endpoint.
	GcpVpcEndpointInfo *GcpVpcEndpointInfo `json:"gcp_vpc_endpoint_info,omitempty"`
	// The AWS region in which this VPC endpoint object exists.
	Region string `json:"region,omitempty"`
	// The current state (such as `available` or `rejected`) of the VPC
	// endpoint. Derived from AWS. For the full set of values, see [AWS
	// DescribeVpcEndpoint documentation].
	//
	// [AWS DescribeVpcEndpoint documentation]: https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-vpc-endpoints.html
	State string `json:"state,omitempty"`
	// This enumeration represents the type of Databricks VPC endpoint service
	// that was used when creating this VPC endpoint. If the VPC endpoint
	// connects to the Databricks control plane for either the front-end
	// connection or the back-end REST API connection, the value is
	// WORKSPACE_ACCESS. If the VPC endpoint connects to the Databricks
	// workspace for the back-end secure cluster connectivity relay, the value
	// is DATAPLANE_RELAY_ACCESS.
	UseCase EndpointUseCase `json:"use_case,omitempty"`
	// Databricks VPC endpoint ID. This is the Databricks-specific name of the
	// VPC endpoint. Do not confuse this with the `aws_vpc_endpoint_id`, which
	// is the ID within AWS of the VPC endpoint.
	VpcEndpointId string `json:"vpc_endpoint_id,omitempty"`
	// The human-readable name of the storage configuration.
	VpcEndpointName string `json:"vpc_endpoint_name,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

*

func (VpcEndpoint) MarshalJSON added in v0.23.0

func (s VpcEndpoint) MarshalJSON() ([]byte, error)

func (*VpcEndpoint) UnmarshalJSON added in v0.23.0

func (s *VpcEndpoint) UnmarshalJSON(b []byte) error

type VpcEndpointsAPI

type VpcEndpointsAPI struct {
	// contains filtered or unexported fields
}

These APIs manage VPC endpoint configurations for this account.

func NewVpcEndpoints

func NewVpcEndpoints(client *client.DatabricksClient) *VpcEndpointsAPI

func (*VpcEndpointsAPI) Create

func (a *VpcEndpointsAPI) Create(ctx context.Context, request CreateVpcEndpointRequest) (*VpcEndpoint, error)
Example (VpcEndpoints)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

created, err := a.VpcEndpoints.Create(ctx, provisioning.CreateVpcEndpointRequest{
	AwsVpcEndpointId: os.Getenv("TEST_RELAY_VPC_ENDPOINT"),
	Region:           os.Getenv("AWS_REGION"),
	VpcEndpointName:  fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

_, err = a.VpcEndpoints.DeleteByVpcEndpointId(ctx, created.VpcEndpointId)
if err != nil {
	panic(err)
}

func (*VpcEndpointsAPI) Delete

func (a *VpcEndpointsAPI) Delete(ctx context.Context, request DeleteVpcEndpointRequest) (*VpcEndpoint, error)

func (*VpcEndpointsAPI) DeleteByVpcEndpointId

func (a *VpcEndpointsAPI) DeleteByVpcEndpointId(ctx context.Context, vpcEndpointId string) (*VpcEndpoint, error)

Deletes a Databricks VPC endpoint configuration. You cannot delete a VPC endpoint configuration that is associated with any workspace.

func (*VpcEndpointsAPI) Get

func (a *VpcEndpointsAPI) Get(ctx context.Context, request GetVpcEndpointRequest) (*VpcEndpoint, error)
Example (VpcEndpoints)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

created, err := a.VpcEndpoints.Create(ctx, provisioning.CreateVpcEndpointRequest{
	AwsVpcEndpointId: os.Getenv("TEST_RELAY_VPC_ENDPOINT"),
	Region:           os.Getenv("AWS_REGION"),
	VpcEndpointName:  fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

byId, err := a.VpcEndpoints.GetByVpcEndpointId(ctx, created.VpcEndpointId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

_, err = a.VpcEndpoints.DeleteByVpcEndpointId(ctx, created.VpcEndpointId)
if err != nil {
	panic(err)
}

func (*VpcEndpointsAPI) GetByVpcEndpointId

func (a *VpcEndpointsAPI) GetByVpcEndpointId(ctx context.Context, vpcEndpointId string) (*VpcEndpoint, error)

Gets a VPC endpoint configuration, which represents a VPC endpoint object in AWS used to communicate privately with Databricks over AWS PrivateLink.

func (*VpcEndpointsAPI) GetByVpcEndpointName

func (a *VpcEndpointsAPI) GetByVpcEndpointName(ctx context.Context, name string) (*VpcEndpoint, error)

GetByVpcEndpointName calls VpcEndpointsAPI.VpcEndpointVpcEndpointNameToVpcEndpointIdMap and returns a single VpcEndpoint.

Returns an error if there's more than one VpcEndpoint with the same .VpcEndpointName.

Note: All VpcEndpoint instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*VpcEndpointsAPI) List

func (a *VpcEndpointsAPI) List(ctx context.Context) ([]VpcEndpoint, error)
Example (VpcEndpoints)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

all, err := a.VpcEndpoints.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)

func (*VpcEndpointsAPI) VpcEndpointVpcEndpointNameToVpcEndpointIdMap

func (a *VpcEndpointsAPI) VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ctx context.Context) (map[string]string, error)

VpcEndpointVpcEndpointNameToVpcEndpointIdMap calls VpcEndpointsAPI.List and creates a map of results with VpcEndpoint.VpcEndpointName as key and VpcEndpoint.VpcEndpointId as value.

Returns an error if there's more than one VpcEndpoint with the same .VpcEndpointName.

Note: All VpcEndpoint instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

type VpcEndpointsInterface added in v0.29.0

type VpcEndpointsInterface interface {

	// Creates a VPC endpoint configuration, which represents a [VPC endpoint]
	// object in AWS used to communicate privately with Databricks over [AWS
	// PrivateLink].
	//
	// After you create the VPC endpoint configuration, the Databricks [endpoint
	// service] automatically accepts the VPC endpoint.
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	// [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html
	// [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html
	Create(ctx context.Context, request CreateVpcEndpointRequest) (*VpcEndpoint, error)

	// Deletes a Databricks VPC endpoint configuration. You cannot delete a VPC
	// endpoint configuration that is associated with any workspace.
	Delete(ctx context.Context, request DeleteVpcEndpointRequest) (*VpcEndpoint, error)

	// Deletes a Databricks VPC endpoint configuration. You cannot delete a VPC
	// endpoint configuration that is associated with any workspace.
	DeleteByVpcEndpointId(ctx context.Context, vpcEndpointId string) (*VpcEndpoint, error)

	// Gets a VPC endpoint configuration, which represents a [VPC endpoint] object
	// in AWS used to communicate privately with Databricks over [AWS PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html
	Get(ctx context.Context, request GetVpcEndpointRequest) (*VpcEndpoint, error)

	// Gets a VPC endpoint configuration, which represents a [VPC endpoint] object
	// in AWS used to communicate privately with Databricks over [AWS PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html
	GetByVpcEndpointId(ctx context.Context, vpcEndpointId string) (*VpcEndpoint, error)

	// Lists Databricks VPC endpoint configurations for an account.
	List(ctx context.Context) ([]VpcEndpoint, error)

	// VpcEndpointVpcEndpointNameToVpcEndpointIdMap calls [VpcEndpointsAPI.List] and creates a map of results with [VpcEndpoint].VpcEndpointName as key and [VpcEndpoint].VpcEndpointId as value.
	//
	// Returns an error if there's more than one [VpcEndpoint] with the same .VpcEndpointName.
	//
	// Note: All [VpcEndpoint] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	VpcEndpointVpcEndpointNameToVpcEndpointIdMap(ctx context.Context) (map[string]string, error)

	// GetByVpcEndpointName calls [VpcEndpointsAPI.VpcEndpointVpcEndpointNameToVpcEndpointIdMap] and returns a single [VpcEndpoint].
	//
	// Returns an error if there's more than one [VpcEndpoint] with the same .VpcEndpointName.
	//
	// Note: All [VpcEndpoint] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByVpcEndpointName(ctx context.Context, name string) (*VpcEndpoint, error)
}

type VpcEndpointsService deprecated

type VpcEndpointsService interface {

	// Creates a VPC endpoint configuration, which represents a [VPC endpoint]
	// object in AWS used to communicate privately with Databricks over [AWS
	// PrivateLink].
	//
	// After you create the VPC endpoint configuration, the Databricks [endpoint
	// service] automatically accepts the VPC endpoint.
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	// [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html
	// [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html
	Create(ctx context.Context, request CreateVpcEndpointRequest) (*VpcEndpoint, error)

	// Deletes a Databricks VPC endpoint configuration. You cannot delete a VPC
	// endpoint configuration that is associated with any workspace.
	Delete(ctx context.Context, request DeleteVpcEndpointRequest) (*VpcEndpoint, error)

	// Gets a VPC endpoint configuration, which represents a [VPC endpoint]
	// object in AWS used to communicate privately with Databricks over [AWS
	// PrivateLink].
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink
	// [VPC endpoint]: https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html
	Get(ctx context.Context, request GetVpcEndpointRequest) (*VpcEndpoint, error)

	// Lists Databricks VPC endpoint configurations for an account.
	List(ctx context.Context) ([]VpcEndpoint, error)
}

These APIs manage VPC endpoint configurations for this account.

Deprecated: Do not use this interface, it will be removed in a future version of the SDK.

type VpcStatus

type VpcStatus string
const VpcStatusBroken VpcStatus = `BROKEN`
const VpcStatusUnattached VpcStatus = `UNATTACHED`
const VpcStatusValid VpcStatus = `VALID`
const VpcStatusWarned VpcStatus = `WARNED`

func (*VpcStatus) Set

func (f *VpcStatus) Set(v string) error

Set raw string value and validate it against allowed values

func (*VpcStatus) String

func (f *VpcStatus) String() string

String representation for fmt.Print

func (*VpcStatus) Type

func (f *VpcStatus) Type() string

Type always returns VpcStatus to satisfy [pflag.Value] interface

func (*VpcStatus) Values added in v0.72.0

func (f *VpcStatus) Values() []VpcStatus

Values returns all possible values for VpcStatus.

There is no guarantee on the order of the values in the slice.

type WaitGetWorkspaceRunning added in v0.10.0

type WaitGetWorkspaceRunning[R any] struct {
	Response    *R
	WorkspaceId int64 `json:"workspace_id"`
	Poll        func(time.Duration, func(*Workspace)) (*Workspace, error)
	// contains filtered or unexported fields
}

WaitGetWorkspaceRunning is a wrapper that calls WorkspacesAPI.WaitGetWorkspaceRunning and waits to reach RUNNING state.

func (*WaitGetWorkspaceRunning[R]) Get added in v0.10.0

func (w *WaitGetWorkspaceRunning[R]) Get() (*Workspace, error)

Get the Workspace with the default timeout of 20 minutes.

func (*WaitGetWorkspaceRunning[R]) GetWithTimeout added in v0.10.0

func (w *WaitGetWorkspaceRunning[R]) GetWithTimeout(timeout time.Duration) (*Workspace, error)

Get the Workspace with custom timeout.

func (*WaitGetWorkspaceRunning[R]) OnProgress added in v0.10.0

func (w *WaitGetWorkspaceRunning[R]) OnProgress(callback func(*Workspace)) *WaitGetWorkspaceRunning[R]

OnProgress invokes a callback every time it polls for the status update.

type WarningType

type WarningType string
const WarningTypeSecurityGroup WarningType = `securityGroup`
const WarningTypeSubnet WarningType = `subnet`

func (*WarningType) Set

func (f *WarningType) Set(v string) error

Set raw string value and validate it against allowed values

func (*WarningType) String

func (f *WarningType) String() string

String representation for fmt.Print

func (*WarningType) Type

func (f *WarningType) Type() string

Type always returns WarningType to satisfy [pflag.Value] interface

func (*WarningType) Values added in v0.72.0

func (f *WarningType) Values() []WarningType

Values returns all possible values for WarningType.

There is no guarantee on the order of the values in the slice.

type Workspace

type Workspace struct {
	// Databricks account ID.
	AccountId string `json:"account_id,omitempty"`

	AwsRegion string `json:"aws_region,omitempty"`

	AzureWorkspaceInfo *AzureWorkspaceInfo `json:"azure_workspace_info,omitempty"`
	// The cloud name. This field can have values like `azure`, `gcp`.
	Cloud string `json:"cloud,omitempty"`

	CloudResourceContainer *CloudResourceContainer `json:"cloud_resource_container,omitempty"`
	// The compute mode of the workspace.
	ComputeMode CustomerFacingComputeMode `json:"compute_mode,omitempty"`
	// Time in epoch milliseconds when the workspace was created.
	CreationTime int64 `json:"creation_time,omitempty"`
	// ID of the workspace's credential configuration object.
	CredentialsId string `json:"credentials_id,omitempty"`
	// The custom tags key-value pairing that is attached to this workspace. The
	// key-value pair is a string of utf-8 characters. The value can be an empty
	// string, with maximum length of 255 characters. The key can be of maximum
	// length of 127 characters, and cannot be empty.
	CustomTags map[string]string `json:"custom_tags,omitempty"`

	DeploymentName string `json:"deployment_name,omitempty"`
	// A client owned field used to indicate the workspace status that the
	// client expects to be in. For now this is only used to unblock Temporal
	// workflow for GCP least privileged workspace.
	ExpectedWorkspaceStatus WorkspaceStatus `json:"expected_workspace_status,omitempty"`

	GcpManagedNetworkConfig *GcpManagedNetworkConfig `json:"gcp_managed_network_config,omitempty"`

	GkeConfig *GkeConfig `json:"gke_config,omitempty"`
	// The Google Cloud region of the workspace data plane in your Google
	// account (for example, `us-east4`).
	Location string `json:"location,omitempty"`
	// ID of the key configuration for encrypting managed services.
	ManagedServicesCustomerManagedKeyId string `json:"managed_services_customer_managed_key_id,omitempty"`
	// The network configuration for the workspace.
	//
	// DEPRECATED. Use `network_id` instead.
	Network *WorkspaceNetwork `json:"network,omitempty"`
	// The object ID of network connectivity config.
	NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"`
	// If this workspace is BYO VPC, then the network_id will be populated. If
	// this workspace is not BYO VPC, then the network_id will be empty.
	NetworkId string `json:"network_id,omitempty"`

	PricingTier PricingTier `json:"pricing_tier,omitempty"`
	// ID of the workspace's private access settings object. Only used for
	// PrivateLink. You must specify this ID if you are using [AWS PrivateLink]
	// for either front-end (user-to-workspace connection), back-end (data plane
	// to control plane connection), or both connection types.
	//
	// Before configuring PrivateLink, read the [Databricks article about
	// PrivateLink].",
	//
	// [AWS PrivateLink]: https://aws.amazon.com/privatelink/
	// [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
	PrivateAccessSettingsId string `json:"private_access_settings_id,omitempty"`
	// ID of the workspace's storage configuration object.
	StorageConfigurationId string `json:"storage_configuration_id,omitempty"`
	// ID of the key configuration for encrypting workspace storage.
	StorageCustomerManagedKeyId string `json:"storage_customer_managed_key_id,omitempty"`
	// The storage mode of the workspace.
	StorageMode CustomerFacingStorageMode `json:"storage_mode,omitempty"`
	// A unique integer ID for the workspace
	WorkspaceId int64 `json:"workspace_id,omitempty"`
	// The human-readable name of the workspace.
	WorkspaceName string `json:"workspace_name,omitempty"`
	// The status of a workspace
	WorkspaceStatus WorkspaceStatus `json:"workspace_status,omitempty"`
	// Message describing the current workspace status.
	WorkspaceStatusMessage string `json:"workspace_status_message,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

func (Workspace) AzureResourceId added in v0.31.0

func (w Workspace) AzureResourceId() string

Return the AzureResourceID for the workspace, if it is an Azure workspace.

func (Workspace) MarshalJSON added in v0.23.0

func (s Workspace) MarshalJSON() ([]byte, error)

func (*Workspace) UnmarshalJSON added in v0.23.0

func (s *Workspace) UnmarshalJSON(b []byte) error

type WorkspaceNetwork added in v0.86.0

type WorkspaceNetwork struct {
	// The shared network config for GCP workspace. This object has common
	// network configurations that are network attributions of a workspace. This
	// object is input-only.
	GcpCommonNetworkConfig *GcpCommonNetworkConfig `json:"gcp_common_network_config,omitempty"`
	// The mutually exclusive network deployment modes. The option decides which
	// network mode the workspace will use. The network config for GCP workspace
	// with Databricks managed network. This object is input-only and will not
	// be provided when listing workspaces. See go/gcp-byovpc-alpha-design for
	// interface decisions.
	GcpManagedNetworkConfig *GcpManagedNetworkConfig `json:"gcp_managed_network_config,omitempty"`
	// The ID of the network object, if the workspace is a BYOVPC workspace.
	// This should apply to workspaces on all clouds in internal services. In
	// accounts-rest-api, user will use workspace.network_id for input and
	// output instead. Currently (2021-06-19) the network ID is only used by
	// GCP.
	NetworkId string `json:"network_id,omitempty"`

	ForceSendFields []string `json:"-" url:"-"`
}

The network configuration for workspaces.

func (WorkspaceNetwork) MarshalJSON added in v0.86.0

func (s WorkspaceNetwork) MarshalJSON() ([]byte, error)

func (*WorkspaceNetwork) UnmarshalJSON added in v0.86.0

func (s *WorkspaceNetwork) UnmarshalJSON(b []byte) error

type WorkspaceStatus

type WorkspaceStatus string

The different statuses of a workspace. The following represents the current set of valid transitions from status to status: NOT_PROVISIONED -> PROVISIONING -> CANCELLED PROVISIONING -> RUNNING -> FAILED -> CANCELLED (note that this transition is disallowed in the MultiWorkspace Project) RUNNING -> PROVISIONING -> BANNED -> CANCELLED FAILED -> PROVISIONING -> CANCELLED BANNED -> RUNNING -> CANCELLED Note that a transition from any state to itself is also valid. TODO(PLAT-5867): add a transition from CANCELLED to some other value (e.g. RECOVERING)

const WorkspaceStatusBanned WorkspaceStatus = `BANNED`
const WorkspaceStatusCancelling WorkspaceStatus = `CANCELLING`
const WorkspaceStatusFailed WorkspaceStatus = `FAILED`
const WorkspaceStatusNotProvisioned WorkspaceStatus = `NOT_PROVISIONED`
const WorkspaceStatusProvisioning WorkspaceStatus = `PROVISIONING`
const WorkspaceStatusRunning WorkspaceStatus = `RUNNING`

func (*WorkspaceStatus) Set

func (f *WorkspaceStatus) Set(v string) error

Set raw string value and validate it against allowed values

func (*WorkspaceStatus) String

func (f *WorkspaceStatus) String() string

String representation for fmt.Print

func (*WorkspaceStatus) Type

func (f *WorkspaceStatus) Type() string

Type always returns WorkspaceStatus to satisfy [pflag.Value] interface

func (*WorkspaceStatus) Values added in v0.72.0

func (f *WorkspaceStatus) Values() []WorkspaceStatus

Values returns all possible values for WorkspaceStatus.

There is no guarantee on the order of the values in the slice.

type WorkspacesAPI

type WorkspacesAPI struct {
	// contains filtered or unexported fields
}

These APIs manage workspaces for this account. A Databricks workspace is an environment for accessing all of your Databricks assets. The workspace organizes objects (notebooks, libraries, and experiments) into folders, and provides access to data and computational resources such as clusters and jobs.

These endpoints are available if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.

func NewWorkspaces

func NewWorkspaces(client *client.DatabricksClient) *WorkspacesAPI

func (*WorkspacesAPI) Create

func (a *WorkspacesAPI) Create(ctx context.Context, createWorkspaceRequest CreateWorkspaceRequest) (*WaitGetWorkspaceRunning[Workspace], error)

Creates a new workspace using a credential configuration and a storage configuration, an optional network configuration (if using a customer-managed VPC), an optional managed services key configuration (if using customer-managed keys for managed services), and an optional storage key configuration (if using customer-managed keys for storage). The key configurations used for managed services and storage encryption can be the same or different.

Important: This operation is asynchronous. A response with HTTP status code 200 means the request has been accepted and is in progress, but does not mean that the workspace deployed successfully and is running. The initial workspace status is typically PROVISIONING. Use the workspace ID (workspace_id) field in the response to identify the new workspace and make repeated GET requests with the workspace ID and check its status. The workspace becomes available when the status changes to RUNNING.

You can share one customer-managed VPC with multiple workspaces in a single account. It is not required to create a new VPC for each workspace. However, you cannot reuse subnets or Security Groups between workspaces. If you plan to share one VPC with multiple workspaces, make sure you size your VPC and subnets accordingly. Because a Databricks Account API network configuration encapsulates this information, you cannot reuse a Databricks Account API network configuration across workspaces.

For information about how to create a new workspace with this API including error handling, see Create a new workspace using the Account API.

Important: Customer-managed VPCs, PrivateLink, and customer-managed keys are supported on a limited set of deployment and subscription types. If you have questions about availability, contact your Databricks representative.

This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.

Example (Workspaces)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{
	StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	RootBucketInfo: provisioning.RootBucketInfo{
		BucketName: os.Getenv("TEST_ROOT_BUCKET"),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storage)

role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{
	CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsCredentials: provisioning.CreateCredentialAwsCredentials{
		StsRole: &provisioning.CreateCredentialStsRole{
			RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"),
		},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", role)

created, err := a.Workspaces.CreateAndWait(ctx, provisioning.CreateWorkspaceRequest{
	WorkspaceName:          fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsRegion:              os.Getenv("AWS_REGION"),
	CredentialsId:          role.CredentialsId,
	StorageConfigurationId: storage.StorageConfigurationId,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

// cleanup

_, err = a.Storage.DeleteByStorageConfigurationId(ctx, storage.StorageConfigurationId)
if err != nil {
	panic(err)
}
_, err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId)
if err != nil {
	panic(err)
}
_, err = a.Workspaces.DeleteByWorkspaceId(ctx, created.WorkspaceId)
if err != nil {
	panic(err)
}

func (*WorkspacesAPI) CreateAndWait deprecated

func (a *WorkspacesAPI) CreateAndWait(ctx context.Context, createWorkspaceRequest CreateWorkspaceRequest, options ...retries.Option[Workspace]) (*Workspace, error)

Calls WorkspacesAPI.Create and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[Workspace](60*time.Minute) functional option.

Deprecated: use WorkspacesAPI.Create.Get() or WorkspacesAPI.WaitGetWorkspaceRunning

func (*WorkspacesAPI) Delete

func (a *WorkspacesAPI) Delete(ctx context.Context, request DeleteWorkspaceRequest) (*Workspace, error)

func (*WorkspacesAPI) DeleteByWorkspaceId

func (a *WorkspacesAPI) DeleteByWorkspaceId(ctx context.Context, workspaceId int64) (*Workspace, error)

Deletes a Databricks workspace, both specified by ID.

func (*WorkspacesAPI) Get

func (a *WorkspacesAPI) Get(ctx context.Context, request GetWorkspaceRequest) (*Workspace, error)
Example (Workspaces)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{
	StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	RootBucketInfo: provisioning.RootBucketInfo{
		BucketName: os.Getenv("TEST_ROOT_BUCKET"),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storage)

role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{
	CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsCredentials: provisioning.CreateCredentialAwsCredentials{
		StsRole: &provisioning.CreateCredentialStsRole{
			RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"),
		},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", role)

created, err := a.Workspaces.CreateAndWait(ctx, provisioning.CreateWorkspaceRequest{
	WorkspaceName:          fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsRegion:              os.Getenv("AWS_REGION"),
	CredentialsId:          role.CredentialsId,
	StorageConfigurationId: storage.StorageConfigurationId,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

byId, err := a.Workspaces.GetByWorkspaceId(ctx, created.WorkspaceId)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", byId)

// cleanup

_, err = a.Storage.DeleteByStorageConfigurationId(ctx, storage.StorageConfigurationId)
if err != nil {
	panic(err)
}
_, err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId)
if err != nil {
	panic(err)
}
_, err = a.Workspaces.DeleteByWorkspaceId(ctx, created.WorkspaceId)
if err != nil {
	panic(err)
}

func (*WorkspacesAPI) GetByWorkspaceId

func (a *WorkspacesAPI) GetByWorkspaceId(ctx context.Context, workspaceId int64) (*Workspace, error)

Gets information including status for a Databricks workspace, specified by ID. In the response, the `workspace_status` field indicates the current status. After initial workspace creation (which is asynchronous), make repeated `GET` requests with the workspace ID and check its status. The workspace becomes available when the status changes to `RUNNING`. For information about how to create a new workspace with this API **including error handling**, see Create a new workspace using the Account API.

func (*WorkspacesAPI) GetByWorkspaceName

func (a *WorkspacesAPI) GetByWorkspaceName(ctx context.Context, name string) (*Workspace, error)

GetByWorkspaceName calls WorkspacesAPI.WorkspaceWorkspaceNameToWorkspaceIdMap and returns a single Workspace.

Returns an error if there's more than one Workspace with the same .WorkspaceName.

Note: All Workspace instances are loaded into memory before returning matching by name.

This method is generated by Databricks SDK Code Generator.

func (*WorkspacesAPI) List

func (a *WorkspacesAPI) List(ctx context.Context) ([]Workspace, error)
Example (Workspaces)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

all, err := a.Workspaces.List(ctx)
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", all)

func (*WorkspacesAPI) Update

func (a *WorkspacesAPI) Update(ctx context.Context, updateWorkspaceRequest UpdateWorkspaceRequest) (*WaitGetWorkspaceRunning[Workspace], error)

Updates a workspace.

Example (Workspaces)
ctx := context.Background()
a, err := databricks.NewAccountClient()
if err != nil {
	panic(err)
}

storage, err := a.Storage.Create(ctx, provisioning.CreateStorageConfigurationRequest{
	StorageConfigurationName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	RootBucketInfo: provisioning.RootBucketInfo{
		BucketName: os.Getenv("TEST_ROOT_BUCKET"),
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", storage)

role, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{
	CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsCredentials: provisioning.CreateCredentialAwsCredentials{
		StsRole: &provisioning.CreateCredentialStsRole{
			RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"),
		},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", role)

updateRole, err := a.Credentials.Create(ctx, provisioning.CreateCredentialRequest{
	CredentialsName: fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsCredentials: provisioning.CreateCredentialAwsCredentials{
		StsRole: &provisioning.CreateCredentialStsRole{
			RoleArn: os.Getenv("TEST_CROSSACCOUNT_ARN"),
		},
	},
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", updateRole)

created, err := a.Workspaces.CreateAndWait(ctx, provisioning.CreateWorkspaceRequest{
	WorkspaceName:          fmt.Sprintf("sdk-%x", time.Now().UnixNano()),
	AwsRegion:              os.Getenv("AWS_REGION"),
	CredentialsId:          role.CredentialsId,
	StorageConfigurationId: storage.StorageConfigurationId,
})
if err != nil {
	panic(err)
}
logger.Infof(ctx, "found %v", created)

_, err = a.Workspaces.UpdateAndWait(ctx, provisioning.UpdateWorkspaceRequest{
	WorkspaceId: created.WorkspaceId,
	CustomerFacingWorkspace: provisioning.Workspace{
		CredentialsId: updateRole.CredentialsId,
	},
})
if err != nil {
	panic(err)
}

// cleanup

_, err = a.Storage.DeleteByStorageConfigurationId(ctx, storage.StorageConfigurationId)
if err != nil {
	panic(err)
}
_, err = a.Credentials.DeleteByCredentialsId(ctx, role.CredentialsId)
if err != nil {
	panic(err)
}
_, err = a.Credentials.DeleteByCredentialsId(ctx, updateRole.CredentialsId)
if err != nil {
	panic(err)
}
_, err = a.Workspaces.DeleteByWorkspaceId(ctx, created.WorkspaceId)
if err != nil {
	panic(err)
}

func (*WorkspacesAPI) UpdateAndWait deprecated

func (a *WorkspacesAPI) UpdateAndWait(ctx context.Context, updateWorkspaceRequest UpdateWorkspaceRequest, options ...retries.Option[Workspace]) (*Workspace, error)

Calls WorkspacesAPI.Update and waits to reach RUNNING state

You can override the default timeout of 20 minutes by calling adding retries.Timeout[Workspace](60*time.Minute) functional option.

Deprecated: use WorkspacesAPI.Update.Get() or WorkspacesAPI.WaitGetWorkspaceRunning

func (*WorkspacesAPI) WaitGetWorkspaceRunning added in v0.10.0

func (a *WorkspacesAPI) WaitGetWorkspaceRunning(ctx context.Context, workspaceId int64,
	timeout time.Duration, callback func(*Workspace)) (*Workspace, error)

WaitGetWorkspaceRunning repeatedly calls WorkspacesAPI.Get and waits to reach RUNNING state

func (*WorkspacesAPI) WorkspaceWorkspaceNameToWorkspaceIdMap

func (a *WorkspacesAPI) WorkspaceWorkspaceNameToWorkspaceIdMap(ctx context.Context) (map[string]int64, error)

WorkspaceWorkspaceNameToWorkspaceIdMap calls WorkspacesAPI.List and creates a map of results with Workspace.WorkspaceName as key and Workspace.WorkspaceId as value.

Returns an error if there's more than one Workspace with the same .WorkspaceName.

Note: All Workspace instances are loaded into memory before creating a map.

This method is generated by Databricks SDK Code Generator.

type WorkspacesInterface added in v0.29.0

type WorkspacesInterface interface {

	// WaitGetWorkspaceRunning repeatedly calls [WorkspacesAPI.Get] and waits to reach RUNNING state
	WaitGetWorkspaceRunning(ctx context.Context, workspaceId int64,
		timeout time.Duration, callback func(*Workspace)) (*Workspace, error)

	// Creates a new workspace using a credential configuration and a storage
	// configuration, an optional network configuration (if using a customer-managed
	// VPC), an optional managed services key configuration (if using
	// customer-managed keys for managed services), and an optional storage key
	// configuration (if using customer-managed keys for storage). The key
	// configurations used for managed services and storage encryption can be the
	// same or different.
	//
	// Important: This operation is asynchronous. A response with HTTP status code
	// 200 means the request has been accepted and is in progress, but does not mean
	// that the workspace deployed successfully and is running. The initial
	// workspace status is typically PROVISIONING. Use the workspace ID
	// (workspace_id) field in the response to identify the new workspace and make
	// repeated GET requests with the workspace ID and check its status. The
	// workspace becomes available when the status changes to RUNNING.
	//
	// You can share one customer-managed VPC with multiple workspaces in a single
	// account. It is not required to create a new VPC for each workspace. However,
	// you cannot reuse subnets or Security Groups between workspaces. If you plan
	// to share one VPC with multiple workspaces, make sure you size your VPC and
	// subnets accordingly. Because a Databricks Account API network configuration
	// encapsulates this information, you cannot reuse a Databricks Account API
	// network configuration across workspaces.
	//
	// For information about how to create a new workspace with this API including
	// error handling, see [Create a new workspace using the Account API].
	//
	// Important: Customer-managed VPCs, PrivateLink, and customer-managed keys are
	// supported on a limited set of deployment and subscription types. If you have
	// questions about availability, contact your Databricks representative.
	//
	// This operation is available only if your account is on the E2 version of the
	// platform or on a select custom plan that allows multiple workspaces per
	// account.
	//
	// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
	Create(ctx context.Context, createWorkspaceRequest CreateWorkspaceRequest) (*WaitGetWorkspaceRunning[Workspace], error)

	// Calls [WorkspacesAPIInterface.Create] and waits to reach RUNNING state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[Workspace](60*time.Minute) functional option.
	//
	// Deprecated: use [WorkspacesAPIInterface.Create].Get() or [WorkspacesAPIInterface.WaitGetWorkspaceRunning]
	CreateAndWait(ctx context.Context, createWorkspaceRequest CreateWorkspaceRequest, options ...retries.Option[Workspace]) (*Workspace, error)

	// Deletes a Databricks workspace, both specified by ID.
	Delete(ctx context.Context, request DeleteWorkspaceRequest) (*Workspace, error)

	// Deletes a Databricks workspace, both specified by ID.
	DeleteByWorkspaceId(ctx context.Context, workspaceId int64) (*Workspace, error)

	// Gets information including status for a Databricks workspace, specified by
	// ID. In the response, the `workspace_status` field indicates the current
	// status. After initial workspace creation (which is asynchronous), make
	// repeated `GET` requests with the workspace ID and check its status. The
	// workspace becomes available when the status changes to `RUNNING`. For
	// information about how to create a new workspace with this API **including
	// error handling**, see [Create a new workspace using the Account API].
	//
	// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
	Get(ctx context.Context, request GetWorkspaceRequest) (*Workspace, error)

	// Gets information including status for a Databricks workspace, specified by
	// ID. In the response, the `workspace_status` field indicates the current
	// status. After initial workspace creation (which is asynchronous), make
	// repeated `GET` requests with the workspace ID and check its status. The
	// workspace becomes available when the status changes to `RUNNING`. For
	// information about how to create a new workspace with this API **including
	// error handling**, see [Create a new workspace using the Account API].
	//
	// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
	GetByWorkspaceId(ctx context.Context, workspaceId int64) (*Workspace, error)

	// Lists Databricks workspaces for an account.
	List(ctx context.Context) ([]Workspace, error)

	// WorkspaceWorkspaceNameToWorkspaceIdMap calls [WorkspacesAPI.List] and creates a map of results with [Workspace].WorkspaceName as key and [Workspace].WorkspaceId as value.
	//
	// Returns an error if there's more than one [Workspace] with the same .WorkspaceName.
	//
	// Note: All [Workspace] instances are loaded into memory before creating a map.
	//
	// This method is generated by Databricks SDK Code Generator.
	WorkspaceWorkspaceNameToWorkspaceIdMap(ctx context.Context) (map[string]int64, error)

	// GetByWorkspaceName calls [WorkspacesAPI.WorkspaceWorkspaceNameToWorkspaceIdMap] and returns a single [Workspace].
	//
	// Returns an error if there's more than one [Workspace] with the same .WorkspaceName.
	//
	// Note: All [Workspace] instances are loaded into memory before returning matching by name.
	//
	// This method is generated by Databricks SDK Code Generator.
	GetByWorkspaceName(ctx context.Context, name string) (*Workspace, error)

	// Updates a workspace.
	Update(ctx context.Context, updateWorkspaceRequest UpdateWorkspaceRequest) (*WaitGetWorkspaceRunning[Workspace], error)

	// Calls [WorkspacesAPIInterface.Update] and waits to reach RUNNING state
	//
	// You can override the default timeout of 20 minutes by calling adding
	// retries.Timeout[Workspace](60*time.Minute) functional option.
	//
	// Deprecated: use [WorkspacesAPIInterface.Update].Get() or [WorkspacesAPIInterface.WaitGetWorkspaceRunning]
	UpdateAndWait(ctx context.Context, updateWorkspaceRequest UpdateWorkspaceRequest, options ...retries.Option[Workspace]) (*Workspace, error)
}

type WorkspacesService deprecated

type WorkspacesService interface {

	// Creates a new workspace using a credential configuration and a storage
	// configuration, an optional network configuration (if using a
	// customer-managed VPC), an optional managed services key configuration (if
	// using customer-managed keys for managed services), and an optional
	// storage key configuration (if using customer-managed keys for storage).
	// The key configurations used for managed services and storage encryption
	// can be the same or different.
	//
	// Important: This operation is asynchronous. A response with HTTP status
	// code 200 means the request has been accepted and is in progress, but does
	// not mean that the workspace deployed successfully and is running. The
	// initial workspace status is typically PROVISIONING. Use the workspace ID
	// (workspace_id) field in the response to identify the new workspace and
	// make repeated GET requests with the workspace ID and check its status.
	// The workspace becomes available when the status changes to RUNNING.
	//
	// You can share one customer-managed VPC with multiple workspaces in a
	// single account. It is not required to create a new VPC for each
	// workspace. However, you cannot reuse subnets or Security Groups between
	// workspaces. If you plan to share one VPC with multiple workspaces, make
	// sure you size your VPC and subnets accordingly. Because a Databricks
	// Account API network configuration encapsulates this information, you
	// cannot reuse a Databricks Account API network configuration across
	// workspaces.
	//
	// For information about how to create a new workspace with this API
	// including error handling, see [Create a new workspace using the Account
	// API].
	//
	// Important: Customer-managed VPCs, PrivateLink, and customer-managed keys
	// are supported on a limited set of deployment and subscription types. If
	// you have questions about availability, contact your Databricks
	// representative.
	//
	// This operation is available only if your account is on the E2 version of
	// the platform or on a select custom plan that allows multiple workspaces
	// per account.
	//
	// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
	Create(ctx context.Context, request CreateWorkspaceRequest) (*Workspace, error)

	// Deletes a Databricks workspace, both specified by ID.
	Delete(ctx context.Context, request DeleteWorkspaceRequest) (*Workspace, error)

	// Gets information including status for a Databricks workspace, specified
	// by ID. In the response, the `workspace_status` field indicates the
	// current status. After initial workspace creation (which is asynchronous),
	// make repeated `GET` requests with the workspace ID and check its status.
	// The workspace becomes available when the status changes to `RUNNING`. For
	// information about how to create a new workspace with this API **including
	// error handling**, see [Create a new workspace using the Account API].
	//
	// [Create a new workspace using the Account API]: http://docs.databricks.com/administration-guide/account-api/new-workspace.html
	Get(ctx context.Context, request GetWorkspaceRequest) (*Workspace, error)

	// Lists Databricks workspaces for an account.
	List(ctx context.Context) ([]Workspace, error)

	// Updates a workspace.
	Update(ctx context.Context, request UpdateWorkspaceRequest) (*Workspace, error)
}

These APIs manage workspaces for this account. A Databricks workspace is an environment for accessing all of your Databricks assets. The workspace organizes objects (notebooks, libraries, and experiments) into folders, and provides access to data and computational resources such as clusters and jobs.

These endpoints are available if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account.

Deprecated: Do not use this interface, it will be removed in a future version of the SDK.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL