Documentation
¶
Overview ¶
Generate deepcopy object for dataproc/v1beta1 API group
Package v1beta1 contains API Schema definitions for the dataproc v1beta1 API group. +k8s:openapi-gen=true +k8s:deepcopy-gen=package,register +k8s:conversion-gen=github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/pkg/apis/dataproc +k8s:defaulter-gen=TypeMeta +groupName=dataproc.cnrm.cloud.google.com
Index ¶
- Variables
- type AutoscalingpolicyBasicAlgorithm
- type AutoscalingpolicySecondaryWorkerConfig
- type AutoscalingpolicyWorkerConfig
- type AutoscalingpolicyYarnConfig
- type ClusterAccelerators
- type ClusterAutoscalingConfig
- type ClusterConfig
- type ClusterConfigStatus
- type ClusterDiskConfig
- type ClusterEncryptionConfig
- type ClusterEndpointConfig
- type ClusterEndpointConfigStatus
- type ClusterGceClusterConfig
- type ClusterInitializationActions
- type ClusterKerberosConfig
- type ClusterLifecycleConfig
- type ClusterLifecycleConfigStatus
- type ClusterManagedGroupConfigStatus
- type ClusterMasterConfig
- type ClusterMasterConfigStatus
- type ClusterMetricsStatus
- type ClusterNodeGroupAffinity
- type ClusterReservationAffinity
- type ClusterSecondaryWorkerConfig
- type ClusterSecondaryWorkerConfigStatus
- type ClusterSecurityConfig
- type ClusterSoftwareConfig
- type ClusterStatusHistoryStatus
- type ClusterStatusStatus
- type ClusterWorkerConfig
- type ClusterWorkerConfigStatus
- type DataprocAutoscalingPolicy
- type DataprocAutoscalingPolicyList
- type DataprocAutoscalingPolicySpec
- type DataprocAutoscalingPolicyStatus
- type DataprocCluster
- type DataprocClusterList
- type DataprocClusterSpec
- type DataprocClusterStatus
- type DataprocWorkflowTemplate
- type DataprocWorkflowTemplateList
- type DataprocWorkflowTemplateSpec
- type DataprocWorkflowTemplateStatus
- type WorkflowtemplateAccelerators
- type WorkflowtemplateAutoscalingConfig
- type WorkflowtemplateClusterSelector
- type WorkflowtemplateConfig
- type WorkflowtemplateConfigStatus
- type WorkflowtemplateDiskConfig
- type WorkflowtemplateEncryptionConfig
- type WorkflowtemplateEndpointConfig
- type WorkflowtemplateEndpointConfigStatus
- type WorkflowtemplateGceClusterConfig
- type WorkflowtemplateHadoopJob
- type WorkflowtemplateHiveJob
- type WorkflowtemplateInitializationActions
- type WorkflowtemplateJobs
- type WorkflowtemplateKerberosConfig
- type WorkflowtemplateLifecycleConfig
- type WorkflowtemplateLifecycleConfigStatus
- type WorkflowtemplateLoggingConfig
- type WorkflowtemplateManagedCluster
- type WorkflowtemplateManagedClusterStatus
- type WorkflowtemplateManagedGroupConfigStatus
- type WorkflowtemplateMasterConfig
- type WorkflowtemplateMasterConfigStatus
- type WorkflowtemplateNodeGroupAffinity
- type WorkflowtemplateParameters
- type WorkflowtemplatePigJob
- type WorkflowtemplatePlacement
- type WorkflowtemplatePlacementStatus
- type WorkflowtemplatePrestoJob
- type WorkflowtemplatePysparkJob
- type WorkflowtemplateQueryList
- type WorkflowtemplateRegex
- type WorkflowtemplateReservationAffinity
- type WorkflowtemplateScheduling
- type WorkflowtemplateSecondaryWorkerConfig
- type WorkflowtemplateSecondaryWorkerConfigStatus
- type WorkflowtemplateSecurityConfig
- type WorkflowtemplateSoftwareConfig
- type WorkflowtemplateSparkJob
- type WorkflowtemplateSparkRJob
- type WorkflowtemplateSparkSqlJob
- type WorkflowtemplateValidation
- type WorkflowtemplateValues
- type WorkflowtemplateWorkerConfig
- type WorkflowtemplateWorkerConfigStatus
Constants ¶
This section is empty.
Variables ¶
var ( // SchemeGroupVersion is the group version used to register these objects. SchemeGroupVersion = schema.GroupVersion{Group: "dataproc.cnrm.cloud.google.com", Version: "v1beta1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme. SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} // AddToScheme is a global function that registers this API group & version to a scheme AddToScheme = SchemeBuilder.AddToScheme DataprocAutoscalingPolicyGVK = schema.GroupVersionKind{ Group: SchemeGroupVersion.Group, Version: SchemeGroupVersion.Version, Kind: reflect.TypeOf(DataprocAutoscalingPolicy{}).Name(), } DataprocClusterGVK = schema.GroupVersionKind{ Group: SchemeGroupVersion.Group, Version: SchemeGroupVersion.Version, Kind: reflect.TypeOf(DataprocCluster{}).Name(), } DataprocWorkflowTemplateGVK = schema.GroupVersionKind{ Group: SchemeGroupVersion.Group, Version: SchemeGroupVersion.Version, Kind: reflect.TypeOf(DataprocWorkflowTemplate{}).Name(), } )
Functions ¶
This section is empty.
Types ¶
type AutoscalingpolicyBasicAlgorithm ¶
type AutoscalingpolicyBasicAlgorithm struct {
/* Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed. Bounds: . Default: 2m. */
// +optional
CooldownPeriod *string `json:"cooldownPeriod,omitempty"`
/* Required. YARN autoscaling configuration. */
YarnConfig AutoscalingpolicyYarnConfig `json:"yarnConfig"`
}
func (*AutoscalingpolicyBasicAlgorithm) DeepCopy ¶
func (in *AutoscalingpolicyBasicAlgorithm) DeepCopy() *AutoscalingpolicyBasicAlgorithm
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingpolicyBasicAlgorithm.
func (*AutoscalingpolicyBasicAlgorithm) DeepCopyInto ¶
func (in *AutoscalingpolicyBasicAlgorithm) DeepCopyInto(out *AutoscalingpolicyBasicAlgorithm)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type AutoscalingpolicySecondaryWorkerConfig ¶
type AutoscalingpolicySecondaryWorkerConfig struct {
/* Optional. Maximum number of instances for this group. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0. */
// +optional
MaxInstances *int `json:"maxInstances,omitempty"`
/* Optional. Minimum number of instances for this group. Primary workers - Bounds: . Default: 0. */
// +optional
MinInstances *int `json:"minInstances,omitempty"`
/* Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if `max_instances` for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers. */
// +optional
Weight *int `json:"weight,omitempty"`
}
func (*AutoscalingpolicySecondaryWorkerConfig) DeepCopy ¶
func (in *AutoscalingpolicySecondaryWorkerConfig) DeepCopy() *AutoscalingpolicySecondaryWorkerConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingpolicySecondaryWorkerConfig.
func (*AutoscalingpolicySecondaryWorkerConfig) DeepCopyInto ¶
func (in *AutoscalingpolicySecondaryWorkerConfig) DeepCopyInto(out *AutoscalingpolicySecondaryWorkerConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type AutoscalingpolicyWorkerConfig ¶
type AutoscalingpolicyWorkerConfig struct {
/* Required. Maximum number of instances for this group. Required for primary workers. Note that by default, clusters will not use secondary workers. Required for secondary workers if the minimum secondary instances is set. Primary workers - Bounds: [min_instances, ). Secondary workers - Bounds: [min_instances, ). Default: 0. */
MaxInstances int `json:"maxInstances"`
/* Optional. Minimum number of instances for this group. Primary workers - Bounds: . Default: 0. */
// +optional
MinInstances *int `json:"minInstances,omitempty"`
/* Optional. Weight for the instance group, which is used to determine the fraction of total workers in the cluster from this instance group. For example, if primary workers have weight 2, and secondary workers have weight 1, the cluster will have approximately 2 primary workers for each secondary worker. The cluster may not reach the specified balance if constrained by min/max bounds or other autoscaling settings. For example, if `max_instances` for secondary workers is 0, then only primary workers will be added. The cluster can also be out of balance when created. If weight is not set on any instance group, the cluster will default to equal weight for all groups: the cluster will attempt to maintain an equal number of workers in each group within the configured size bounds for each group. If weight is set for one group only, the cluster will default to zero weight on the unset group. For example if weight is set only on primary workers, the cluster will use primary workers only and no secondary workers. */
// +optional
Weight *int `json:"weight,omitempty"`
}
func (*AutoscalingpolicyWorkerConfig) DeepCopy ¶
func (in *AutoscalingpolicyWorkerConfig) DeepCopy() *AutoscalingpolicyWorkerConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingpolicyWorkerConfig.
func (*AutoscalingpolicyWorkerConfig) DeepCopyInto ¶
func (in *AutoscalingpolicyWorkerConfig) DeepCopyInto(out *AutoscalingpolicyWorkerConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type AutoscalingpolicyYarnConfig ¶
type AutoscalingpolicyYarnConfig struct {
/* Required. Timeout for YARN graceful decommissioning of Node Managers. Specifies the duration to wait for jobs to complete before forcefully removing workers (and potentially interrupting jobs). Only applicable to downscaling operations. */
GracefulDecommissionTimeout string `json:"gracefulDecommissionTimeout"`
/* Required. Fraction of average YARN pending memory in the last cooldown period for which to remove workers. A scale-down factor of 1 will result in scaling down so that there is no available memory remaining after the update (more aggressive scaling). A scale-down factor of 0 disables removing workers, which can be beneficial for autoscaling a single job. See . */
ScaleDownFactor float64 `json:"scaleDownFactor"`
/* Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change. Bounds: . Default: 0.0. */
// +optional
ScaleDownMinWorkerFraction *float64 `json:"scaleDownMinWorkerFraction,omitempty"`
/* Required. Fraction of average YARN pending memory in the last cooldown period for which to add workers. A scale-up factor of 1.0 will result in scaling up so that there is no pending memory remaining after the update (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling). See . */
ScaleUpFactor float64 `json:"scaleUpFactor"`
/* Optional. Minimum scale-up threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2-worker scale-up for the cluster to scale. A threshold of 0 means the autoscaler will scale up on any recommended change. Bounds: . Default: 0.0. */
// +optional
ScaleUpMinWorkerFraction *float64 `json:"scaleUpMinWorkerFraction,omitempty"`
}
func (*AutoscalingpolicyYarnConfig) DeepCopy ¶
func (in *AutoscalingpolicyYarnConfig) DeepCopy() *AutoscalingpolicyYarnConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalingpolicyYarnConfig.
func (*AutoscalingpolicyYarnConfig) DeepCopyInto ¶
func (in *AutoscalingpolicyYarnConfig) DeepCopyInto(out *AutoscalingpolicyYarnConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterAccelerators ¶
type ClusterAccelerators struct {
/* Immutable. The number of the accelerator cards of this type exposed to this instance. */
// +optional
AcceleratorCount *int `json:"acceleratorCount,omitempty"`
/* Immutable. Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`. */
// +optional
AcceleratorType *string `json:"acceleratorType,omitempty"`
}
func (*ClusterAccelerators) DeepCopy ¶
func (in *ClusterAccelerators) DeepCopy() *ClusterAccelerators
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAccelerators.
func (*ClusterAccelerators) DeepCopyInto ¶
func (in *ClusterAccelerators) DeepCopyInto(out *ClusterAccelerators)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterAutoscalingConfig ¶
type ClusterAutoscalingConfig struct {
/* Immutable. */
// +optional
PolicyRef *v1alpha1.ResourceRef `json:"policyRef,omitempty"`
}
func (*ClusterAutoscalingConfig) DeepCopy ¶
func (in *ClusterAutoscalingConfig) DeepCopy() *ClusterAutoscalingConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAutoscalingConfig.
func (*ClusterAutoscalingConfig) DeepCopyInto ¶
func (in *ClusterAutoscalingConfig) DeepCopyInto(out *ClusterAutoscalingConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterConfig ¶
type ClusterConfig struct {
/* Immutable. Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset. */
// +optional
AutoscalingConfig *ClusterAutoscalingConfig `json:"autoscalingConfig,omitempty"`
/* Immutable. Optional. Encryption settings for the cluster. */
// +optional
EncryptionConfig *ClusterEncryptionConfig `json:"encryptionConfig,omitempty"`
/* Immutable. Optional. Port/endpoint configuration for this cluster */
// +optional
EndpointConfig *ClusterEndpointConfig `json:"endpointConfig,omitempty"`
/* Immutable. Optional. The shared Compute Engine config settings for all instances in a cluster. */
// +optional
GceClusterConfig *ClusterGceClusterConfig `json:"gceClusterConfig,omitempty"`
/* Immutable. Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi */
// +optional
InitializationActions []ClusterInitializationActions `json:"initializationActions,omitempty"`
/* Immutable. Optional. Lifecycle setting for the cluster. */
// +optional
LifecycleConfig *ClusterLifecycleConfig `json:"lifecycleConfig,omitempty"`
/* Immutable. Optional. The Compute Engine config settings for the master instance in a cluster. */
// +optional
MasterConfig *ClusterMasterConfig `json:"masterConfig,omitempty"`
/* Immutable. Optional. The Compute Engine config settings for additional worker instances in a cluster. */
// +optional
SecondaryWorkerConfig *ClusterSecondaryWorkerConfig `json:"secondaryWorkerConfig,omitempty"`
/* Immutable. Optional. Security settings for the cluster. */
// +optional
SecurityConfig *ClusterSecurityConfig `json:"securityConfig,omitempty"`
/* Immutable. Optional. The config settings for software inside the cluster. */
// +optional
SoftwareConfig *ClusterSoftwareConfig `json:"softwareConfig,omitempty"`
/* Immutable. */
// +optional
StagingBucketRef *v1alpha1.ResourceRef `json:"stagingBucketRef,omitempty"`
/* Immutable. */
// +optional
TempBucketRef *v1alpha1.ResourceRef `json:"tempBucketRef,omitempty"`
/* Immutable. Optional. The Compute Engine config settings for worker instances in a cluster. */
// +optional
WorkerConfig *ClusterWorkerConfig `json:"workerConfig,omitempty"`
}
func (*ClusterConfig) DeepCopy ¶
func (in *ClusterConfig) DeepCopy() *ClusterConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfig.
func (*ClusterConfig) DeepCopyInto ¶
func (in *ClusterConfig) DeepCopyInto(out *ClusterConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterConfigStatus ¶
type ClusterConfigStatus struct {
/* */
EndpointConfig ClusterEndpointConfigStatus `json:"endpointConfig,omitempty"`
/* */
LifecycleConfig ClusterLifecycleConfigStatus `json:"lifecycleConfig,omitempty"`
/* */
MasterConfig ClusterMasterConfigStatus `json:"masterConfig,omitempty"`
/* */
SecondaryWorkerConfig ClusterSecondaryWorkerConfigStatus `json:"secondaryWorkerConfig,omitempty"`
/* */
WorkerConfig ClusterWorkerConfigStatus `json:"workerConfig,omitempty"`
}
func (*ClusterConfigStatus) DeepCopy ¶
func (in *ClusterConfigStatus) DeepCopy() *ClusterConfigStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterConfigStatus.
func (*ClusterConfigStatus) DeepCopyInto ¶
func (in *ClusterConfigStatus) DeepCopyInto(out *ClusterConfigStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterDiskConfig ¶
type ClusterDiskConfig struct {
/* Immutable. Optional. Size in GB of the boot disk (default is 500GB). */
// +optional
BootDiskSizeGb *int `json:"bootDiskSizeGb,omitempty"`
/* Immutable. Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types). */
// +optional
BootDiskType *string `json:"bootDiskType,omitempty"`
/* Immutable. Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries. */
// +optional
NumLocalSsds *int `json:"numLocalSsds,omitempty"`
}
func (*ClusterDiskConfig) DeepCopy ¶
func (in *ClusterDiskConfig) DeepCopy() *ClusterDiskConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDiskConfig.
func (*ClusterDiskConfig) DeepCopyInto ¶
func (in *ClusterDiskConfig) DeepCopyInto(out *ClusterDiskConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterEncryptionConfig ¶
type ClusterEncryptionConfig struct {
/* Immutable. */
// +optional
GcePdKmsKeyRef *v1alpha1.ResourceRef `json:"gcePdKmsKeyRef,omitempty"`
}
func (*ClusterEncryptionConfig) DeepCopy ¶
func (in *ClusterEncryptionConfig) DeepCopy() *ClusterEncryptionConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterEncryptionConfig.
func (*ClusterEncryptionConfig) DeepCopyInto ¶
func (in *ClusterEncryptionConfig) DeepCopyInto(out *ClusterEncryptionConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterEndpointConfig ¶
type ClusterEndpointConfig struct {
/* Immutable. Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false. */
// +optional
EnableHttpPortAccess *bool `json:"enableHttpPortAccess,omitempty"`
}
func (*ClusterEndpointConfig) DeepCopy ¶
func (in *ClusterEndpointConfig) DeepCopy() *ClusterEndpointConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterEndpointConfig.
func (*ClusterEndpointConfig) DeepCopyInto ¶
func (in *ClusterEndpointConfig) DeepCopyInto(out *ClusterEndpointConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterEndpointConfigStatus ¶
type ClusterEndpointConfigStatus struct {
/* Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true. */
HttpPorts map[string]string `json:"httpPorts,omitempty"`
}
func (*ClusterEndpointConfigStatus) DeepCopy ¶
func (in *ClusterEndpointConfigStatus) DeepCopy() *ClusterEndpointConfigStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterEndpointConfigStatus.
func (*ClusterEndpointConfigStatus) DeepCopyInto ¶
func (in *ClusterEndpointConfigStatus) DeepCopyInto(out *ClusterEndpointConfigStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterGceClusterConfig ¶
type ClusterGceClusterConfig struct {
/* Immutable. Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. */
// +optional
InternalIPOnly *bool `json:"internalIPOnly,omitempty"`
/* Immutable. The Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). */
// +optional
Metadata map[string]string `json:"metadata,omitempty"`
/* Immutable. */
// +optional
NetworkRef *v1alpha1.ResourceRef `json:"networkRef,omitempty"`
/* Immutable. Optional. Node Group Affinity for sole-tenant clusters. */
// +optional
NodeGroupAffinity *ClusterNodeGroupAffinity `json:"nodeGroupAffinity,omitempty"`
/* Immutable. Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL */
// +optional
PrivateIPv6GoogleAccess *string `json:"privateIPv6GoogleAccess,omitempty"`
/* Immutable. Optional. Reservation Affinity for consuming Zonal reservation. */
// +optional
ReservationAffinity *ClusterReservationAffinity `json:"reservationAffinity,omitempty"`
/* Immutable. */
// +optional
ServiceAccountRef *v1alpha1.ResourceRef `json:"serviceAccountRef,omitempty"`
/* Immutable. Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control */
// +optional
ServiceAccountScopes []string `json:"serviceAccountScopes,omitempty"`
/* Immutable. */
// +optional
SubnetworkRef *v1alpha1.ResourceRef `json:"subnetworkRef,omitempty"`
/* Immutable. The Compute Engine tags to add to all instances (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). */
// +optional
Tags []string `json:"tags,omitempty"`
/* Immutable. Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` * `projects/[project_id]/zones/[zone]` * `us-central1-f` */
// +optional
Zone *string `json:"zone,omitempty"`
}
func (*ClusterGceClusterConfig) DeepCopy ¶
func (in *ClusterGceClusterConfig) DeepCopy() *ClusterGceClusterConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterGceClusterConfig.
func (*ClusterGceClusterConfig) DeepCopyInto ¶
func (in *ClusterGceClusterConfig) DeepCopyInto(out *ClusterGceClusterConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterInitializationActions ¶
type ClusterInitializationActions struct {
/* Immutable. Required. Cloud Storage URI of executable file. */
ExecutableFile string `json:"executableFile"`
/* Immutable. Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. */
// +optional
ExecutionTimeout *string `json:"executionTimeout,omitempty"`
}
func (*ClusterInitializationActions) DeepCopy ¶
func (in *ClusterInitializationActions) DeepCopy() *ClusterInitializationActions
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInitializationActions.
func (*ClusterInitializationActions) DeepCopyInto ¶
func (in *ClusterInitializationActions) DeepCopyInto(out *ClusterInitializationActions)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterKerberosConfig ¶
type ClusterKerberosConfig struct {
/* Immutable. Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship. */
// +optional
CrossRealmTrustAdminServer *string `json:"crossRealmTrustAdminServer,omitempty"`
/* Immutable. Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship. */
// +optional
CrossRealmTrustKdc *string `json:"crossRealmTrustKdc,omitempty"`
/* Immutable. Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust. */
// +optional
CrossRealmTrustRealm *string `json:"crossRealmTrustRealm,omitempty"`
// +optional
CrossRealmTrustSharedPassword *string `json:"crossRealmTrustSharedPassword,omitempty"`
/* Immutable. Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster. */
// +optional
EnableKerberos *bool `json:"enableKerberos,omitempty"`
/* Immutable. Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database. */
// +optional
KdcDbKey *string `json:"kdcDbKey,omitempty"`
/* Immutable. Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc. */
// +optional
KeyPassword *string `json:"keyPassword,omitempty"`
/* Immutable. Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. */
// +optional
Keystore *string `json:"keystore,omitempty"`
/* Immutable. Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc. */
// +optional
KeystorePassword *string `json:"keystorePassword,omitempty"`
/* Immutable. */
// +optional
KmsKeyRef *v1alpha1.ResourceRef `json:"kmsKeyRef,omitempty"`
/* Immutable. Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm. */
// +optional
Realm *string `json:"realm,omitempty"`
/* Immutable. Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password. */
// +optional
RootPrincipalPassword *string `json:"rootPrincipalPassword,omitempty"`
/* Immutable. Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used. */
// +optional
TgtLifetimeHours *int `json:"tgtLifetimeHours,omitempty"`
/* Immutable. Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. */
// +optional
Truststore *string `json:"truststore,omitempty"`
/* Immutable. Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc. */
// +optional
TruststorePassword *string `json:"truststorePassword,omitempty"`
}
func (*ClusterKerberosConfig) DeepCopy ¶
func (in *ClusterKerberosConfig) DeepCopy() *ClusterKerberosConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterKerberosConfig.
func (*ClusterKerberosConfig) DeepCopyInto ¶
func (in *ClusterKerberosConfig) DeepCopyInto(out *ClusterKerberosConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterLifecycleConfig ¶
type ClusterLifecycleConfig struct {
/* Immutable. Optional. The time when cluster will be auto-deleted (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
// +optional
AutoDeleteTime *string `json:"autoDeleteTime,omitempty"`
/* Immutable. Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
// +optional
AutoDeleteTtl *string `json:"autoDeleteTtl,omitempty"`
/* Immutable. Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
// +optional
IdleDeleteTtl *string `json:"idleDeleteTtl,omitempty"`
}
func (*ClusterLifecycleConfig) DeepCopy ¶
func (in *ClusterLifecycleConfig) DeepCopy() *ClusterLifecycleConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLifecycleConfig.
func (*ClusterLifecycleConfig) DeepCopyInto ¶
func (in *ClusterLifecycleConfig) DeepCopyInto(out *ClusterLifecycleConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterLifecycleConfigStatus ¶
type ClusterLifecycleConfigStatus struct {
/* Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
IdleStartTime string `json:"idleStartTime,omitempty"`
}
func (*ClusterLifecycleConfigStatus) DeepCopy ¶
func (in *ClusterLifecycleConfigStatus) DeepCopy() *ClusterLifecycleConfigStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterLifecycleConfigStatus.
func (*ClusterLifecycleConfigStatus) DeepCopyInto ¶
func (in *ClusterLifecycleConfigStatus) DeepCopyInto(out *ClusterLifecycleConfigStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterManagedGroupConfigStatus ¶
type ClusterManagedGroupConfigStatus struct {
/* Output only. The name of the Instance Group Manager for this group. */
InstanceGroupManagerName string `json:"instanceGroupManagerName,omitempty"`
/* Output only. The name of the Instance Template used for the Managed Instance Group. */
InstanceTemplateName string `json:"instanceTemplateName,omitempty"`
}
func (*ClusterManagedGroupConfigStatus) DeepCopy ¶
func (in *ClusterManagedGroupConfigStatus) DeepCopy() *ClusterManagedGroupConfigStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterManagedGroupConfigStatus.
func (*ClusterManagedGroupConfigStatus) DeepCopyInto ¶
func (in *ClusterManagedGroupConfigStatus) DeepCopyInto(out *ClusterManagedGroupConfigStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterMasterConfig ¶
type ClusterMasterConfig struct {
/* Immutable. Optional. The Compute Engine accelerator configuration for these instances. */
// +optional
Accelerators []ClusterAccelerators `json:"accelerators,omitempty"`
/* Immutable. Optional. Disk option config settings. */
// +optional
DiskConfig *ClusterDiskConfig `json:"diskConfig,omitempty"`
/* Immutable. */
// +optional
ImageRef *v1alpha1.ResourceRef `json:"imageRef,omitempty"`
/* Immutable. Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. */
// +optional
MachineType *string `json:"machineType,omitempty"`
/* Immutable. Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */
// +optional
MinCpuPlatform *string `json:"minCpuPlatform,omitempty"`
/* Immutable. Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**. */
// +optional
NumInstances *int `json:"numInstances,omitempty"`
/* Immutable. Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE */
// +optional
Preemptibility *string `json:"preemptibility,omitempty"`
}
func (*ClusterMasterConfig) DeepCopy ¶
func (in *ClusterMasterConfig) DeepCopy() *ClusterMasterConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMasterConfig.
func (*ClusterMasterConfig) DeepCopyInto ¶
func (in *ClusterMasterConfig) DeepCopyInto(out *ClusterMasterConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterMasterConfigStatus ¶
type ClusterMasterConfigStatus struct {
/* Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group. */
InstanceNames []string `json:"instanceNames,omitempty"`
/* Output only. Specifies that this instance group contains preemptible instances. */
IsPreemptible bool `json:"isPreemptible,omitempty"`
/* Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. */
ManagedGroupConfig ClusterManagedGroupConfigStatus `json:"managedGroupConfig,omitempty"`
}
func (*ClusterMasterConfigStatus) DeepCopy ¶
func (in *ClusterMasterConfigStatus) DeepCopy() *ClusterMasterConfigStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMasterConfigStatus.
func (*ClusterMasterConfigStatus) DeepCopyInto ¶
func (in *ClusterMasterConfigStatus) DeepCopyInto(out *ClusterMasterConfigStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterMetricsStatus ¶
type ClusterMetricsStatus struct {
/* The HDFS metrics. */
HdfsMetrics map[string]string `json:"hdfsMetrics,omitempty"`
/* The YARN metrics. */
YarnMetrics map[string]string `json:"yarnMetrics,omitempty"`
}
func (*ClusterMetricsStatus) DeepCopy ¶
func (in *ClusterMetricsStatus) DeepCopy() *ClusterMetricsStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMetricsStatus.
func (*ClusterMetricsStatus) DeepCopyInto ¶
func (in *ClusterMetricsStatus) DeepCopyInto(out *ClusterMetricsStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterNodeGroupAffinity ¶
type ClusterNodeGroupAffinity struct {
/* Immutable. */
NodeGroupRef v1alpha1.ResourceRef `json:"nodeGroupRef"`
}
func (*ClusterNodeGroupAffinity) DeepCopy ¶
func (in *ClusterNodeGroupAffinity) DeepCopy() *ClusterNodeGroupAffinity
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNodeGroupAffinity.
func (*ClusterNodeGroupAffinity) DeepCopyInto ¶
func (in *ClusterNodeGroupAffinity) DeepCopyInto(out *ClusterNodeGroupAffinity)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterReservationAffinity ¶
type ClusterReservationAffinity struct {
/* Immutable. Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION */
// +optional
ConsumeReservationType *string `json:"consumeReservationType,omitempty"`
/* Immutable. Optional. Corresponds to the label key of reservation resource. */
// +optional
Key *string `json:"key,omitempty"`
/* Immutable. Optional. Corresponds to the label values of reservation resource. */
// +optional
Values []string `json:"values,omitempty"`
}
func (*ClusterReservationAffinity) DeepCopy ¶
func (in *ClusterReservationAffinity) DeepCopy() *ClusterReservationAffinity
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterReservationAffinity.
func (*ClusterReservationAffinity) DeepCopyInto ¶
func (in *ClusterReservationAffinity) DeepCopyInto(out *ClusterReservationAffinity)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterSecondaryWorkerConfig ¶
type ClusterSecondaryWorkerConfig struct {
/* Immutable. Optional. The Compute Engine accelerator configuration for these instances. */
// +optional
Accelerators []ClusterAccelerators `json:"accelerators,omitempty"`
/* Immutable. Optional. Disk option config settings. */
// +optional
DiskConfig *ClusterDiskConfig `json:"diskConfig,omitempty"`
/* Immutable. */
// +optional
ImageRef *v1alpha1.ResourceRef `json:"imageRef,omitempty"`
/* Immutable. Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. */
// +optional
MachineType *string `json:"machineType,omitempty"`
/* Immutable. Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */
// +optional
MinCpuPlatform *string `json:"minCpuPlatform,omitempty"`
/* Immutable. Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**. */
// +optional
NumInstances *int `json:"numInstances,omitempty"`
/* Immutable. Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE */
// +optional
Preemptibility *string `json:"preemptibility,omitempty"`
}
func (*ClusterSecondaryWorkerConfig) DeepCopy ¶
func (in *ClusterSecondaryWorkerConfig) DeepCopy() *ClusterSecondaryWorkerConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSecondaryWorkerConfig.
func (*ClusterSecondaryWorkerConfig) DeepCopyInto ¶
func (in *ClusterSecondaryWorkerConfig) DeepCopyInto(out *ClusterSecondaryWorkerConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterSecondaryWorkerConfigStatus ¶
type ClusterSecondaryWorkerConfigStatus struct {
/* Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group. */
InstanceNames []string `json:"instanceNames,omitempty"`
/* Output only. Specifies that this instance group contains preemptible instances. */
IsPreemptible bool `json:"isPreemptible,omitempty"`
/* Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. */
ManagedGroupConfig ClusterManagedGroupConfigStatus `json:"managedGroupConfig,omitempty"`
}
func (*ClusterSecondaryWorkerConfigStatus) DeepCopy ¶
func (in *ClusterSecondaryWorkerConfigStatus) DeepCopy() *ClusterSecondaryWorkerConfigStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSecondaryWorkerConfigStatus.
func (*ClusterSecondaryWorkerConfigStatus) DeepCopyInto ¶
func (in *ClusterSecondaryWorkerConfigStatus) DeepCopyInto(out *ClusterSecondaryWorkerConfigStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterSecurityConfig ¶
type ClusterSecurityConfig struct {
/* Immutable. Optional. Kerberos related configuration. */
// +optional
KerberosConfig *ClusterKerberosConfig `json:"kerberosConfig,omitempty"`
}
func (*ClusterSecurityConfig) DeepCopy ¶
func (in *ClusterSecurityConfig) DeepCopy() *ClusterSecurityConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSecurityConfig.
func (*ClusterSecurityConfig) DeepCopyInto ¶
func (in *ClusterSecurityConfig) DeepCopyInto(out *ClusterSecurityConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterSoftwareConfig ¶
type ClusterSoftwareConfig struct {
/* Immutable. Optional. The version of software inside the cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the ["preview" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version. */
// +optional
ImageVersion *string `json:"imageVersion,omitempty"`
/* Immutable. Optional. The set of components to activate on the cluster. */
// +optional
OptionalComponents []string `json:"optionalComponents,omitempty"`
/* Immutable. Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties). */
// +optional
Properties map[string]string `json:"properties,omitempty"`
}
func (*ClusterSoftwareConfig) DeepCopy ¶
func (in *ClusterSoftwareConfig) DeepCopy() *ClusterSoftwareConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSoftwareConfig.
func (*ClusterSoftwareConfig) DeepCopyInto ¶
func (in *ClusterSoftwareConfig) DeepCopyInto(out *ClusterSoftwareConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterStatusHistoryStatus ¶
type ClusterStatusHistoryStatus struct {
/* Optional. Output only. Details of cluster's state. */
Detail string `json:"detail,omitempty"`
/* Output only. The cluster's state. Possible values: UNKNOWN, CREATING, RUNNING, ERROR, DELETING, UPDATING, STOPPING, STOPPED, STARTING */
State string `json:"state,omitempty"`
/* Output only. Time when this state was entered (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
StateStartTime string `json:"stateStartTime,omitempty"`
/* Output only. Additional state information that includes status reported by the agent. Possible values: UNSPECIFIED, UNHEALTHY, STALE_STATUS */
Substate string `json:"substate,omitempty"`
}
func (*ClusterStatusHistoryStatus) DeepCopy ¶
func (in *ClusterStatusHistoryStatus) DeepCopy() *ClusterStatusHistoryStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatusHistoryStatus.
func (*ClusterStatusHistoryStatus) DeepCopyInto ¶
func (in *ClusterStatusHistoryStatus) DeepCopyInto(out *ClusterStatusHistoryStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterStatusStatus ¶
type ClusterStatusStatus struct {
/* Optional. Output only. Details of cluster's state. */
Detail string `json:"detail,omitempty"`
/* Output only. The cluster's state. Possible values: UNKNOWN, CREATING, RUNNING, ERROR, DELETING, UPDATING, STOPPING, STOPPED, STARTING */
State string `json:"state,omitempty"`
/* Output only. Time when this state was entered (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
StateStartTime string `json:"stateStartTime,omitempty"`
/* Output only. Additional state information that includes status reported by the agent. Possible values: UNSPECIFIED, UNHEALTHY, STALE_STATUS */
Substate string `json:"substate,omitempty"`
}
func (*ClusterStatusStatus) DeepCopy ¶
func (in *ClusterStatusStatus) DeepCopy() *ClusterStatusStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatusStatus.
func (*ClusterStatusStatus) DeepCopyInto ¶
func (in *ClusterStatusStatus) DeepCopyInto(out *ClusterStatusStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterWorkerConfig ¶
type ClusterWorkerConfig struct {
/* Immutable. Optional. The Compute Engine accelerator configuration for these instances. */
// +optional
Accelerators []ClusterAccelerators `json:"accelerators,omitempty"`
/* Immutable. Optional. Disk option config settings. */
// +optional
DiskConfig *ClusterDiskConfig `json:"diskConfig,omitempty"`
/* Immutable. */
// +optional
ImageRef *v1alpha1.ResourceRef `json:"imageRef,omitempty"`
/* Immutable. Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. */
// +optional
MachineType *string `json:"machineType,omitempty"`
/* Immutable. Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */
// +optional
MinCpuPlatform *string `json:"minCpuPlatform,omitempty"`
/* Immutable. Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**. */
// +optional
NumInstances *int `json:"numInstances,omitempty"`
/* Immutable. Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE */
// +optional
Preemptibility *string `json:"preemptibility,omitempty"`
}
func (*ClusterWorkerConfig) DeepCopy ¶
func (in *ClusterWorkerConfig) DeepCopy() *ClusterWorkerConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterWorkerConfig.
func (*ClusterWorkerConfig) DeepCopyInto ¶
func (in *ClusterWorkerConfig) DeepCopyInto(out *ClusterWorkerConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterWorkerConfigStatus ¶
type ClusterWorkerConfigStatus struct {
/* Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group. */
InstanceNames []string `json:"instanceNames,omitempty"`
/* Output only. Specifies that this instance group contains preemptible instances. */
IsPreemptible bool `json:"isPreemptible,omitempty"`
/* Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. */
ManagedGroupConfig ClusterManagedGroupConfigStatus `json:"managedGroupConfig,omitempty"`
}
func (*ClusterWorkerConfigStatus) DeepCopy ¶
func (in *ClusterWorkerConfigStatus) DeepCopy() *ClusterWorkerConfigStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterWorkerConfigStatus.
func (*ClusterWorkerConfigStatus) DeepCopyInto ¶
func (in *ClusterWorkerConfigStatus) DeepCopyInto(out *ClusterWorkerConfigStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type DataprocAutoscalingPolicy ¶
type DataprocAutoscalingPolicy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DataprocAutoscalingPolicySpec `json:"spec,omitempty"`
Status DataprocAutoscalingPolicyStatus `json:"status,omitempty"`
}
DataprocAutoscalingPolicy is the Schema for the dataproc API +k8s:openapi-gen=true
func (*DataprocAutoscalingPolicy) DeepCopy ¶
func (in *DataprocAutoscalingPolicy) DeepCopy() *DataprocAutoscalingPolicy
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocAutoscalingPolicy.
func (*DataprocAutoscalingPolicy) DeepCopyInto ¶
func (in *DataprocAutoscalingPolicy) DeepCopyInto(out *DataprocAutoscalingPolicy)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*DataprocAutoscalingPolicy) DeepCopyObject ¶
func (in *DataprocAutoscalingPolicy) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
type DataprocAutoscalingPolicyList ¶
type DataprocAutoscalingPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []DataprocAutoscalingPolicy `json:"items"`
}
DataprocAutoscalingPolicyList contains a list of DataprocAutoscalingPolicy
func (*DataprocAutoscalingPolicyList) DeepCopy ¶
func (in *DataprocAutoscalingPolicyList) DeepCopy() *DataprocAutoscalingPolicyList
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocAutoscalingPolicyList.
func (*DataprocAutoscalingPolicyList) DeepCopyInto ¶
func (in *DataprocAutoscalingPolicyList) DeepCopyInto(out *DataprocAutoscalingPolicyList)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*DataprocAutoscalingPolicyList) DeepCopyObject ¶
func (in *DataprocAutoscalingPolicyList) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
type DataprocAutoscalingPolicySpec ¶
type DataprocAutoscalingPolicySpec struct {
/* */
BasicAlgorithm AutoscalingpolicyBasicAlgorithm `json:"basicAlgorithm"`
/* Immutable. The location for the resource */
Location string `json:"location"`
/* Immutable. The Project that this resource belongs to. */
// +optional
ProjectRef *v1alpha1.ResourceRef `json:"projectRef,omitempty"`
/* Immutable. Optional. The name of the resource. Used for creation and acquisition. When unset, the value of `metadata.name` is used as the default. */
// +optional
ResourceID *string `json:"resourceID,omitempty"`
/* Optional. Describes how the autoscaler will operate for secondary workers. */
// +optional
SecondaryWorkerConfig *AutoscalingpolicySecondaryWorkerConfig `json:"secondaryWorkerConfig,omitempty"`
/* Required. Describes how the autoscaler will operate for primary workers. */
WorkerConfig AutoscalingpolicyWorkerConfig `json:"workerConfig"`
}
func (*DataprocAutoscalingPolicySpec) DeepCopy ¶
func (in *DataprocAutoscalingPolicySpec) DeepCopy() *DataprocAutoscalingPolicySpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocAutoscalingPolicySpec.
func (*DataprocAutoscalingPolicySpec) DeepCopyInto ¶
func (in *DataprocAutoscalingPolicySpec) DeepCopyInto(out *DataprocAutoscalingPolicySpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type DataprocAutoscalingPolicyStatus ¶
type DataprocAutoscalingPolicyStatus struct {
/* Conditions represent the latest available observations of the
DataprocAutoscalingPolicy's current state. */
Conditions []v1alpha1.Condition `json:"conditions,omitempty"`
/* ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. If this is equal to metadata.generation, then that means that the current reported status reflects the most recent desired state of the resource. */
ObservedGeneration int `json:"observedGeneration,omitempty"`
}
func (*DataprocAutoscalingPolicyStatus) DeepCopy ¶
func (in *DataprocAutoscalingPolicyStatus) DeepCopy() *DataprocAutoscalingPolicyStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocAutoscalingPolicyStatus.
func (*DataprocAutoscalingPolicyStatus) DeepCopyInto ¶
func (in *DataprocAutoscalingPolicyStatus) DeepCopyInto(out *DataprocAutoscalingPolicyStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type DataprocCluster ¶
type DataprocCluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DataprocClusterSpec `json:"spec,omitempty"`
Status DataprocClusterStatus `json:"status,omitempty"`
}
DataprocCluster is the Schema for the dataproc API +k8s:openapi-gen=true
func (*DataprocCluster) DeepCopy ¶
func (in *DataprocCluster) DeepCopy() *DataprocCluster
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocCluster.
func (*DataprocCluster) DeepCopyInto ¶
func (in *DataprocCluster) DeepCopyInto(out *DataprocCluster)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*DataprocCluster) DeepCopyObject ¶
func (in *DataprocCluster) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
type DataprocClusterList ¶
type DataprocClusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []DataprocCluster `json:"items"`
}
DataprocClusterList contains a list of DataprocCluster
func (*DataprocClusterList) DeepCopy ¶
func (in *DataprocClusterList) DeepCopy() *DataprocClusterList
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocClusterList.
func (*DataprocClusterList) DeepCopyInto ¶
func (in *DataprocClusterList) DeepCopyInto(out *DataprocClusterList)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*DataprocClusterList) DeepCopyObject ¶
func (in *DataprocClusterList) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
type DataprocClusterSpec ¶
type DataprocClusterSpec struct {
/* Immutable. Required. The cluster config. Note that Dataproc may set default values, and values may change when clusters are updated. */
// +optional
Config *ClusterConfig `json:"config,omitempty"`
/* Immutable. The location for the resource, usually a GCP region. */
Location string `json:"location"`
/* Immutable. The Project that this resource belongs to. */
// +optional
ProjectRef *v1alpha1.ResourceRef `json:"projectRef,omitempty"`
/* Immutable. Optional. The name of the resource. Used for creation and acquisition. When unset, the value of `metadata.name` is used as the default. */
// +optional
ResourceID *string `json:"resourceID,omitempty"`
}
func (*DataprocClusterSpec) DeepCopy ¶
func (in *DataprocClusterSpec) DeepCopy() *DataprocClusterSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocClusterSpec.
func (*DataprocClusterSpec) DeepCopyInto ¶
func (in *DataprocClusterSpec) DeepCopyInto(out *DataprocClusterSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type DataprocClusterStatus ¶
type DataprocClusterStatus struct {
/* Conditions represent the latest available observations of the
DataprocCluster's current state. */
Conditions []v1alpha1.Condition `json:"conditions,omitempty"`
/* Output only. A cluster UUID (Unique Universal Identifier). Dataproc generates this value when it creates the cluster. */
ClusterUuid string `json:"clusterUuid,omitempty"`
/* */
Config ClusterConfigStatus `json:"config,omitempty"`
/* Output only. Contains cluster daemon metrics such as HDFS and YARN stats. **Beta Feature**: This report is available for testing purposes only. It may be changed before final release. */
Metrics ClusterMetricsStatus `json:"metrics,omitempty"`
/* ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. If this is equal to metadata.generation, then that means that the current reported status reflects the most recent desired state of the resource. */
ObservedGeneration int `json:"observedGeneration,omitempty"`
/* Output only. Cluster status. */
Status ClusterStatusStatus `json:"status,omitempty"`
/* Output only. The previous cluster status. */
StatusHistory []ClusterStatusHistoryStatus `json:"statusHistory,omitempty"`
}
func (*DataprocClusterStatus) DeepCopy ¶
func (in *DataprocClusterStatus) DeepCopy() *DataprocClusterStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocClusterStatus.
func (*DataprocClusterStatus) DeepCopyInto ¶
func (in *DataprocClusterStatus) DeepCopyInto(out *DataprocClusterStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type DataprocWorkflowTemplate ¶
type DataprocWorkflowTemplate struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DataprocWorkflowTemplateSpec `json:"spec,omitempty"`
Status DataprocWorkflowTemplateStatus `json:"status,omitempty"`
}
DataprocWorkflowTemplate is the Schema for the dataproc API +k8s:openapi-gen=true
func (*DataprocWorkflowTemplate) DeepCopy ¶
func (in *DataprocWorkflowTemplate) DeepCopy() *DataprocWorkflowTemplate
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocWorkflowTemplate.
func (*DataprocWorkflowTemplate) DeepCopyInto ¶
func (in *DataprocWorkflowTemplate) DeepCopyInto(out *DataprocWorkflowTemplate)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*DataprocWorkflowTemplate) DeepCopyObject ¶
func (in *DataprocWorkflowTemplate) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
type DataprocWorkflowTemplateList ¶
type DataprocWorkflowTemplateList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []DataprocWorkflowTemplate `json:"items"`
}
DataprocWorkflowTemplateList contains a list of DataprocWorkflowTemplate
func (*DataprocWorkflowTemplateList) DeepCopy ¶
func (in *DataprocWorkflowTemplateList) DeepCopy() *DataprocWorkflowTemplateList
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocWorkflowTemplateList.
func (*DataprocWorkflowTemplateList) DeepCopyInto ¶
func (in *DataprocWorkflowTemplateList) DeepCopyInto(out *DataprocWorkflowTemplateList)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*DataprocWorkflowTemplateList) DeepCopyObject ¶
func (in *DataprocWorkflowTemplateList) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
type DataprocWorkflowTemplateSpec ¶
type DataprocWorkflowTemplateSpec struct {
/* Immutable. Optional. Timeout duration for the DAG of jobs, expressed in seconds (see [JSON representation of duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes ("600s") to 24 hours ("86400s"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a [managed cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), the cluster is deleted. */
// +optional
DagTimeout *string `json:"dagTimeout,omitempty"`
/* Immutable. Required. The Directed Acyclic Graph of Jobs to submit. */
Jobs []WorkflowtemplateJobs `json:"jobs"`
/* Immutable. The location for the resource */
Location string `json:"location"`
/* Immutable. Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated. */
// +optional
Parameters []WorkflowtemplateParameters `json:"parameters,omitempty"`
/* Immutable. Required. WorkflowTemplate scheduling information. */
Placement WorkflowtemplatePlacement `json:"placement"`
/* Immutable. The Project that this resource belongs to. */
// +optional
ProjectRef *v1alpha1.ResourceRef `json:"projectRef,omitempty"`
/* Immutable. Optional. The name of the resource. Used for creation and acquisition. When unset, the value of `metadata.name` is used as the default. */
// +optional
ResourceID *string `json:"resourceID,omitempty"`
}
func (*DataprocWorkflowTemplateSpec) DeepCopy ¶
func (in *DataprocWorkflowTemplateSpec) DeepCopy() *DataprocWorkflowTemplateSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocWorkflowTemplateSpec.
func (*DataprocWorkflowTemplateSpec) DeepCopyInto ¶
func (in *DataprocWorkflowTemplateSpec) DeepCopyInto(out *DataprocWorkflowTemplateSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type DataprocWorkflowTemplateStatus ¶
type DataprocWorkflowTemplateStatus struct {
/* Conditions represent the latest available observations of the
DataprocWorkflowTemplate's current state. */
Conditions []v1alpha1.Condition `json:"conditions,omitempty"`
/* Output only. The time template was created. */
CreateTime string `json:"createTime,omitempty"`
/* ObservedGeneration is the generation of the resource that was most recently observed by the Config Connector controller. If this is equal to metadata.generation, then that means that the current reported status reflects the most recent desired state of the resource. */
ObservedGeneration int `json:"observedGeneration,omitempty"`
/* */
Placement WorkflowtemplatePlacementStatus `json:"placement,omitempty"`
/* Output only. The time template was last updated. */
UpdateTime string `json:"updateTime,omitempty"`
/* Output only. The current version of this workflow template. */
Version int `json:"version,omitempty"`
}
func (*DataprocWorkflowTemplateStatus) DeepCopy ¶
func (in *DataprocWorkflowTemplateStatus) DeepCopy() *DataprocWorkflowTemplateStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataprocWorkflowTemplateStatus.
func (*DataprocWorkflowTemplateStatus) DeepCopyInto ¶
func (in *DataprocWorkflowTemplateStatus) DeepCopyInto(out *DataprocWorkflowTemplateStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateAccelerators ¶
type WorkflowtemplateAccelerators struct {
/* Immutable. The number of the accelerator cards of this type exposed to this instance. */
// +optional
AcceleratorCount *int `json:"acceleratorCount,omitempty"`
/* Immutable. Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`. */
// +optional
AcceleratorType *string `json:"acceleratorType,omitempty"`
}
func (*WorkflowtemplateAccelerators) DeepCopy ¶
func (in *WorkflowtemplateAccelerators) DeepCopy() *WorkflowtemplateAccelerators
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateAccelerators.
func (*WorkflowtemplateAccelerators) DeepCopyInto ¶
func (in *WorkflowtemplateAccelerators) DeepCopyInto(out *WorkflowtemplateAccelerators)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateAutoscalingConfig ¶
type WorkflowtemplateAutoscalingConfig struct {
/* Immutable. */
// +optional
PolicyRef *v1alpha1.ResourceRef `json:"policyRef,omitempty"`
}
func (*WorkflowtemplateAutoscalingConfig) DeepCopy ¶
func (in *WorkflowtemplateAutoscalingConfig) DeepCopy() *WorkflowtemplateAutoscalingConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateAutoscalingConfig.
func (*WorkflowtemplateAutoscalingConfig) DeepCopyInto ¶
func (in *WorkflowtemplateAutoscalingConfig) DeepCopyInto(out *WorkflowtemplateAutoscalingConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateClusterSelector ¶
type WorkflowtemplateClusterSelector struct {
/* Immutable. Required. The cluster labels. Cluster must have all labels to match. */
ClusterLabels map[string]string `json:"clusterLabels"`
/* Immutable. Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used. */
// +optional
Zone *string `json:"zone,omitempty"`
}
func (*WorkflowtemplateClusterSelector) DeepCopy ¶
func (in *WorkflowtemplateClusterSelector) DeepCopy() *WorkflowtemplateClusterSelector
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateClusterSelector.
func (*WorkflowtemplateClusterSelector) DeepCopyInto ¶
func (in *WorkflowtemplateClusterSelector) DeepCopyInto(out *WorkflowtemplateClusterSelector)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateConfig ¶
type WorkflowtemplateConfig struct {
/* Immutable. Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset. */
// +optional
AutoscalingConfig *WorkflowtemplateAutoscalingConfig `json:"autoscalingConfig,omitempty"`
/* Immutable. Optional. Encryption settings for the cluster. */
// +optional
EncryptionConfig *WorkflowtemplateEncryptionConfig `json:"encryptionConfig,omitempty"`
/* Immutable. Optional. Port/endpoint configuration for this cluster */
// +optional
EndpointConfig *WorkflowtemplateEndpointConfig `json:"endpointConfig,omitempty"`
/* Immutable. Optional. The shared Compute Engine config settings for all instances in a cluster. */
// +optional
GceClusterConfig *WorkflowtemplateGceClusterConfig `json:"gceClusterConfig,omitempty"`
/* Immutable. Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi */
// +optional
InitializationActions []WorkflowtemplateInitializationActions `json:"initializationActions,omitempty"`
/* Immutable. Optional. Lifecycle setting for the cluster. */
// +optional
LifecycleConfig *WorkflowtemplateLifecycleConfig `json:"lifecycleConfig,omitempty"`
/* Immutable. Optional. The Compute Engine config settings for the master instance in a cluster. */
// +optional
MasterConfig *WorkflowtemplateMasterConfig `json:"masterConfig,omitempty"`
/* Immutable. Optional. The Compute Engine config settings for additional worker instances in a cluster. */
// +optional
SecondaryWorkerConfig *WorkflowtemplateSecondaryWorkerConfig `json:"secondaryWorkerConfig,omitempty"`
/* Immutable. Optional. Security settings for the cluster. */
// +optional
SecurityConfig *WorkflowtemplateSecurityConfig `json:"securityConfig,omitempty"`
/* Immutable. Optional. The config settings for software inside the cluster. */
// +optional
SoftwareConfig *WorkflowtemplateSoftwareConfig `json:"softwareConfig,omitempty"`
/* Immutable. */
// +optional
StagingBucketRef *v1alpha1.ResourceRef `json:"stagingBucketRef,omitempty"`
/* Immutable. */
// +optional
TempBucketRef *v1alpha1.ResourceRef `json:"tempBucketRef,omitempty"`
/* Immutable. Optional. The Compute Engine config settings for worker instances in a cluster. */
// +optional
WorkerConfig *WorkflowtemplateWorkerConfig `json:"workerConfig,omitempty"`
}
func (*WorkflowtemplateConfig) DeepCopy ¶
func (in *WorkflowtemplateConfig) DeepCopy() *WorkflowtemplateConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateConfig.
func (*WorkflowtemplateConfig) DeepCopyInto ¶
func (in *WorkflowtemplateConfig) DeepCopyInto(out *WorkflowtemplateConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateConfigStatus ¶
type WorkflowtemplateConfigStatus struct {
/* */
EndpointConfig WorkflowtemplateEndpointConfigStatus `json:"endpointConfig,omitempty"`
/* */
LifecycleConfig WorkflowtemplateLifecycleConfigStatus `json:"lifecycleConfig,omitempty"`
/* */
MasterConfig WorkflowtemplateMasterConfigStatus `json:"masterConfig,omitempty"`
/* */
SecondaryWorkerConfig WorkflowtemplateSecondaryWorkerConfigStatus `json:"secondaryWorkerConfig,omitempty"`
/* */
WorkerConfig WorkflowtemplateWorkerConfigStatus `json:"workerConfig,omitempty"`
}
func (*WorkflowtemplateConfigStatus) DeepCopy ¶
func (in *WorkflowtemplateConfigStatus) DeepCopy() *WorkflowtemplateConfigStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateConfigStatus.
func (*WorkflowtemplateConfigStatus) DeepCopyInto ¶
func (in *WorkflowtemplateConfigStatus) DeepCopyInto(out *WorkflowtemplateConfigStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateDiskConfig ¶
type WorkflowtemplateDiskConfig struct {
/* Immutable. Optional. Size in GB of the boot disk (default is 500GB). */
// +optional
BootDiskSizeGb *int `json:"bootDiskSizeGb,omitempty"`
/* Immutable. Optional. Type of the boot disk (default is "pd-standard"). Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive), "pd-ssd" (Persistent Disk Solid State Drive), or "pd-standard" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types). */
// +optional
BootDiskType *string `json:"bootDiskType,omitempty"`
/* Immutable. Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries. */
// +optional
NumLocalSsds *int `json:"numLocalSsds,omitempty"`
}
func (*WorkflowtemplateDiskConfig) DeepCopy ¶
func (in *WorkflowtemplateDiskConfig) DeepCopy() *WorkflowtemplateDiskConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateDiskConfig.
func (*WorkflowtemplateDiskConfig) DeepCopyInto ¶
func (in *WorkflowtemplateDiskConfig) DeepCopyInto(out *WorkflowtemplateDiskConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateEncryptionConfig ¶
type WorkflowtemplateEncryptionConfig struct {
/* Immutable. */
// +optional
GcePdKmsKeyRef *v1alpha1.ResourceRef `json:"gcePdKmsKeyRef,omitempty"`
}
func (*WorkflowtemplateEncryptionConfig) DeepCopy ¶
func (in *WorkflowtemplateEncryptionConfig) DeepCopy() *WorkflowtemplateEncryptionConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateEncryptionConfig.
func (*WorkflowtemplateEncryptionConfig) DeepCopyInto ¶
func (in *WorkflowtemplateEncryptionConfig) DeepCopyInto(out *WorkflowtemplateEncryptionConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateEndpointConfig ¶
type WorkflowtemplateEndpointConfig struct {
/* Immutable. Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false. */
// +optional
EnableHttpPortAccess *bool `json:"enableHttpPortAccess,omitempty"`
}
func (*WorkflowtemplateEndpointConfig) DeepCopy ¶
func (in *WorkflowtemplateEndpointConfig) DeepCopy() *WorkflowtemplateEndpointConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateEndpointConfig.
func (*WorkflowtemplateEndpointConfig) DeepCopyInto ¶
func (in *WorkflowtemplateEndpointConfig) DeepCopyInto(out *WorkflowtemplateEndpointConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateEndpointConfigStatus ¶
type WorkflowtemplateEndpointConfigStatus struct {
/* Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true. */
HttpPorts map[string]string `json:"httpPorts,omitempty"`
}
func (*WorkflowtemplateEndpointConfigStatus) DeepCopy ¶
func (in *WorkflowtemplateEndpointConfigStatus) DeepCopy() *WorkflowtemplateEndpointConfigStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateEndpointConfigStatus.
func (*WorkflowtemplateEndpointConfigStatus) DeepCopyInto ¶
func (in *WorkflowtemplateEndpointConfigStatus) DeepCopyInto(out *WorkflowtemplateEndpointConfigStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateGceClusterConfig ¶
type WorkflowtemplateGceClusterConfig struct {
/* Immutable. Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses. */
// +optional
InternalIPOnly *bool `json:"internalIPOnly,omitempty"`
/* Immutable. The Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). */
// +optional
Metadata map[string]string `json:"metadata,omitempty"`
/* Immutable. */
// +optional
NetworkRef *v1alpha1.ResourceRef `json:"networkRef,omitempty"`
/* Immutable. Optional. Node Group Affinity for sole-tenant clusters. */
// +optional
NodeGroupAffinity *WorkflowtemplateNodeGroupAffinity `json:"nodeGroupAffinity,omitempty"`
/* Immutable. Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL */
// +optional
PrivateIPv6GoogleAccess *string `json:"privateIPv6GoogleAccess,omitempty"`
/* Immutable. Optional. Reservation Affinity for consuming Zonal reservation. */
// +optional
ReservationAffinity *WorkflowtemplateReservationAffinity `json:"reservationAffinity,omitempty"`
/* Immutable. */
// +optional
ServiceAccountRef *v1alpha1.ResourceRef `json:"serviceAccountRef,omitempty"`
/* Immutable. Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control */
// +optional
ServiceAccountScopes []string `json:"serviceAccountScopes,omitempty"`
/* Immutable. */
// +optional
SubnetworkRef *v1alpha1.ResourceRef `json:"subnetworkRef,omitempty"`
/* Immutable. The Compute Engine tags to add to all instances (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)). */
// +optional
Tags []string `json:"tags,omitempty"`
/* Immutable. Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the "global" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` * `projects/[project_id]/zones/[zone]` * `us-central1-f` */
// +optional
Zone *string `json:"zone,omitempty"`
}
func (*WorkflowtemplateGceClusterConfig) DeepCopy ¶
func (in *WorkflowtemplateGceClusterConfig) DeepCopy() *WorkflowtemplateGceClusterConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateGceClusterConfig.
func (*WorkflowtemplateGceClusterConfig) DeepCopyInto ¶
func (in *WorkflowtemplateGceClusterConfig) DeepCopyInto(out *WorkflowtemplateGceClusterConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateHadoopJob ¶
type WorkflowtemplateHadoopJob struct {
/* Immutable. Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip. */
// +optional
ArchiveUris []string `json:"archiveUris,omitempty"`
/* Immutable. Optional. The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission. */
// +optional
Args []string `json:"args,omitempty"`
/* Immutable. Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks. */
// +optional
FileUris []string `json:"fileUris,omitempty"`
/* Immutable. Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks. */
// +optional
JarFileUris []string `json:"jarFileUris,omitempty"`
/* Immutable. Optional. The runtime log config for job execution. */
// +optional
LoggingConfig *WorkflowtemplateLoggingConfig `json:"loggingConfig,omitempty"`
/* Immutable. The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jar_file_uris`. */
// +optional
MainClass *string `json:"mainClass,omitempty"`
/* Immutable. The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' */
// +optional
MainJarFileUri *string `json:"mainJarFileUri,omitempty"`
/* Immutable. Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code. */
// +optional
Properties map[string]string `json:"properties,omitempty"`
}
func (*WorkflowtemplateHadoopJob) DeepCopy ¶
func (in *WorkflowtemplateHadoopJob) DeepCopy() *WorkflowtemplateHadoopJob
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateHadoopJob.
func (*WorkflowtemplateHadoopJob) DeepCopyInto ¶
func (in *WorkflowtemplateHadoopJob) DeepCopyInto(out *WorkflowtemplateHadoopJob)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateHiveJob ¶
type WorkflowtemplateHiveJob struct {
/* Immutable. Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries. */
// +optional
ContinueOnFailure *bool `json:"continueOnFailure,omitempty"`
/* Immutable. Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. */
// +optional
JarFileUris []string `json:"jarFileUris,omitempty"`
/* Immutable. Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code. */
// +optional
Properties map[string]string `json:"properties,omitempty"`
/* Immutable. The HCFS URI of the script that contains Hive queries. */
// +optional
QueryFileUri *string `json:"queryFileUri,omitempty"`
/* Immutable. A list of queries. */
// +optional
QueryList *WorkflowtemplateQueryList `json:"queryList,omitempty"`
/* Immutable. Optional. Mapping of query variable names to values (equivalent to the Hive command: `SET name="value";`). */
// +optional
ScriptVariables map[string]string `json:"scriptVariables,omitempty"`
}
func (*WorkflowtemplateHiveJob) DeepCopy ¶
func (in *WorkflowtemplateHiveJob) DeepCopy() *WorkflowtemplateHiveJob
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateHiveJob.
func (*WorkflowtemplateHiveJob) DeepCopyInto ¶
func (in *WorkflowtemplateHiveJob) DeepCopyInto(out *WorkflowtemplateHiveJob)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateInitializationActions ¶
type WorkflowtemplateInitializationActions struct {
/* Immutable. Required. Cloud Storage URI of executable file. */
// +optional
ExecutableFile *string `json:"executableFile,omitempty"`
/* Immutable. Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period. */
// +optional
ExecutionTimeout *string `json:"executionTimeout,omitempty"`
}
func (*WorkflowtemplateInitializationActions) DeepCopy ¶
func (in *WorkflowtemplateInitializationActions) DeepCopy() *WorkflowtemplateInitializationActions
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateInitializationActions.
func (*WorkflowtemplateInitializationActions) DeepCopyInto ¶
func (in *WorkflowtemplateInitializationActions) DeepCopyInto(out *WorkflowtemplateInitializationActions)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateJobs ¶
type WorkflowtemplateJobs struct {
/* Immutable. Optional. Job is a Hadoop job. */
// +optional
HadoopJob *WorkflowtemplateHadoopJob `json:"hadoopJob,omitempty"`
/* Immutable. Optional. Job is a Hive job. */
// +optional
HiveJob *WorkflowtemplateHiveJob `json:"hiveJob,omitempty"`
/* Immutable. Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given job. */
// +optional
Labels map[string]string `json:"labels,omitempty"`
/* Immutable. Optional. Job is a Pig job. */
// +optional
PigJob *WorkflowtemplatePigJob `json:"pigJob,omitempty"`
/* Immutable. Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow. */
// +optional
PrerequisiteStepIds []string `json:"prerequisiteStepIds,omitempty"`
/* Immutable. Optional. Job is a Presto job. */
// +optional
PrestoJob *WorkflowtemplatePrestoJob `json:"prestoJob,omitempty"`
/* Immutable. Optional. Job is a PySpark job. */
// +optional
PysparkJob *WorkflowtemplatePysparkJob `json:"pysparkJob,omitempty"`
/* Immutable. Optional. Job scheduling configuration. */
// +optional
Scheduling *WorkflowtemplateScheduling `json:"scheduling,omitempty"`
/* Immutable. Optional. Job is a Spark job. */
// +optional
SparkJob *WorkflowtemplateSparkJob `json:"sparkJob,omitempty"`
/* Immutable. Optional. Job is a SparkR job. */
// +optional
SparkRJob *WorkflowtemplateSparkRJob `json:"sparkRJob,omitempty"`
/* Immutable. Optional. Job is a SparkSql job. */
// +optional
SparkSqlJob *WorkflowtemplateSparkSqlJob `json:"sparkSqlJob,omitempty"`
/* Immutable. Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job `goog-dataproc-workflow-step-id` label, and in prerequisiteStepIds field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters. */
StepId string `json:"stepId"`
}
func (*WorkflowtemplateJobs) DeepCopy ¶
func (in *WorkflowtemplateJobs) DeepCopy() *WorkflowtemplateJobs
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateJobs.
func (*WorkflowtemplateJobs) DeepCopyInto ¶
func (in *WorkflowtemplateJobs) DeepCopyInto(out *WorkflowtemplateJobs)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateKerberosConfig ¶
type WorkflowtemplateKerberosConfig struct {
/* Immutable. Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship. */
// +optional
CrossRealmTrustAdminServer *string `json:"crossRealmTrustAdminServer,omitempty"`
/* Immutable. Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship. */
// +optional
CrossRealmTrustKdc *string `json:"crossRealmTrustKdc,omitempty"`
/* Immutable. Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust. */
// +optional
CrossRealmTrustRealm *string `json:"crossRealmTrustRealm,omitempty"`
// +optional
CrossRealmTrustSharedPassword *string `json:"crossRealmTrustSharedPassword,omitempty"`
/* Immutable. Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster. */
// +optional
EnableKerberos *bool `json:"enableKerberos,omitempty"`
/* Immutable. Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database. */
// +optional
KdcDbKey *string `json:"kdcDbKey,omitempty"`
/* Immutable. Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc. */
// +optional
KeyPassword *string `json:"keyPassword,omitempty"`
/* Immutable. Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. */
// +optional
Keystore *string `json:"keystore,omitempty"`
/* Immutable. Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc. */
// +optional
KeystorePassword *string `json:"keystorePassword,omitempty"`
/* Immutable. */
// +optional
KmsKeyRef *v1alpha1.ResourceRef `json:"kmsKeyRef,omitempty"`
/* Immutable. Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm. */
// +optional
Realm *string `json:"realm,omitempty"`
/* Immutable. Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password. */
// +optional
RootPrincipalPassword *string `json:"rootPrincipalPassword,omitempty"`
/* Immutable. Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used. */
// +optional
TgtLifetimeHours *int `json:"tgtLifetimeHours,omitempty"`
/* Immutable. Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate. */
// +optional
Truststore *string `json:"truststore,omitempty"`
/* Immutable. Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc. */
// +optional
TruststorePassword *string `json:"truststorePassword,omitempty"`
}
func (*WorkflowtemplateKerberosConfig) DeepCopy ¶
func (in *WorkflowtemplateKerberosConfig) DeepCopy() *WorkflowtemplateKerberosConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateKerberosConfig.
func (*WorkflowtemplateKerberosConfig) DeepCopyInto ¶
func (in *WorkflowtemplateKerberosConfig) DeepCopyInto(out *WorkflowtemplateKerberosConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateLifecycleConfig ¶
type WorkflowtemplateLifecycleConfig struct {
/* Immutable. Optional. The time when cluster will be auto-deleted (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
// +optional
AutoDeleteTime *string `json:"autoDeleteTime,omitempty"`
/* Immutable. Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
// +optional
AutoDeleteTtl *string `json:"autoDeleteTtl,omitempty"`
/* Immutable. Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
// +optional
IdleDeleteTtl *string `json:"idleDeleteTtl,omitempty"`
}
func (*WorkflowtemplateLifecycleConfig) DeepCopy ¶
func (in *WorkflowtemplateLifecycleConfig) DeepCopy() *WorkflowtemplateLifecycleConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateLifecycleConfig.
func (*WorkflowtemplateLifecycleConfig) DeepCopyInto ¶
func (in *WorkflowtemplateLifecycleConfig) DeepCopyInto(out *WorkflowtemplateLifecycleConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateLifecycleConfigStatus ¶
type WorkflowtemplateLifecycleConfigStatus struct {
/* Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)). */
IdleStartTime string `json:"idleStartTime,omitempty"`
}
func (*WorkflowtemplateLifecycleConfigStatus) DeepCopy ¶
func (in *WorkflowtemplateLifecycleConfigStatus) DeepCopy() *WorkflowtemplateLifecycleConfigStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateLifecycleConfigStatus.
func (*WorkflowtemplateLifecycleConfigStatus) DeepCopyInto ¶
func (in *WorkflowtemplateLifecycleConfigStatus) DeepCopyInto(out *WorkflowtemplateLifecycleConfigStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateLoggingConfig ¶
type WorkflowtemplateLoggingConfig struct {
/* Immutable. The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' */
// +optional
DriverLogLevels map[string]string `json:"driverLogLevels,omitempty"`
}
func (*WorkflowtemplateLoggingConfig) DeepCopy ¶
func (in *WorkflowtemplateLoggingConfig) DeepCopy() *WorkflowtemplateLoggingConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateLoggingConfig.
func (*WorkflowtemplateLoggingConfig) DeepCopyInto ¶
func (in *WorkflowtemplateLoggingConfig) DeepCopyInto(out *WorkflowtemplateLoggingConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateManagedCluster ¶
type WorkflowtemplateManagedCluster struct {
/* Immutable. Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters. */
ClusterName string `json:"clusterName"`
/* Immutable. Required. The cluster configuration. */
Config WorkflowtemplateConfig `json:"config"`
/* Immutable. Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given cluster. */
// +optional
Labels map[string]string `json:"labels,omitempty"`
}
func (*WorkflowtemplateManagedCluster) DeepCopy ¶
func (in *WorkflowtemplateManagedCluster) DeepCopy() *WorkflowtemplateManagedCluster
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateManagedCluster.
func (*WorkflowtemplateManagedCluster) DeepCopyInto ¶
func (in *WorkflowtemplateManagedCluster) DeepCopyInto(out *WorkflowtemplateManagedCluster)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateManagedClusterStatus ¶
type WorkflowtemplateManagedClusterStatus struct {
/* */
Config WorkflowtemplateConfigStatus `json:"config,omitempty"`
}
func (*WorkflowtemplateManagedClusterStatus) DeepCopy ¶
func (in *WorkflowtemplateManagedClusterStatus) DeepCopy() *WorkflowtemplateManagedClusterStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateManagedClusterStatus.
func (*WorkflowtemplateManagedClusterStatus) DeepCopyInto ¶
func (in *WorkflowtemplateManagedClusterStatus) DeepCopyInto(out *WorkflowtemplateManagedClusterStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateManagedGroupConfigStatus ¶
type WorkflowtemplateManagedGroupConfigStatus struct {
/* Output only. The name of the Instance Group Manager for this group. */
InstanceGroupManagerName string `json:"instanceGroupManagerName,omitempty"`
/* Output only. The name of the Instance Template used for the Managed Instance Group. */
InstanceTemplateName string `json:"instanceTemplateName,omitempty"`
}
func (*WorkflowtemplateManagedGroupConfigStatus) DeepCopy ¶
func (in *WorkflowtemplateManagedGroupConfigStatus) DeepCopy() *WorkflowtemplateManagedGroupConfigStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateManagedGroupConfigStatus.
func (*WorkflowtemplateManagedGroupConfigStatus) DeepCopyInto ¶
func (in *WorkflowtemplateManagedGroupConfigStatus) DeepCopyInto(out *WorkflowtemplateManagedGroupConfigStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateMasterConfig ¶
type WorkflowtemplateMasterConfig struct {
/* Immutable. Optional. The Compute Engine accelerator configuration for these instances. */
// +optional
Accelerators []WorkflowtemplateAccelerators `json:"accelerators,omitempty"`
/* Immutable. Optional. Disk option config settings. */
// +optional
DiskConfig *WorkflowtemplateDiskConfig `json:"diskConfig,omitempty"`
/* Immutable. */
// +optional
ImageRef *v1alpha1.ResourceRef `json:"imageRef,omitempty"`
/* Immutable. Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. */
// +optional
MachineType *string `json:"machineType,omitempty"`
/* Immutable. Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */
// +optional
MinCpuPlatform *string `json:"minCpuPlatform,omitempty"`
/* Immutable. Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**. */
// +optional
NumInstances *int `json:"numInstances,omitempty"`
/* Immutable. Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE */
// +optional
Preemptibility *string `json:"preemptibility,omitempty"`
}
func (*WorkflowtemplateMasterConfig) DeepCopy ¶
func (in *WorkflowtemplateMasterConfig) DeepCopy() *WorkflowtemplateMasterConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateMasterConfig.
func (*WorkflowtemplateMasterConfig) DeepCopyInto ¶
func (in *WorkflowtemplateMasterConfig) DeepCopyInto(out *WorkflowtemplateMasterConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateMasterConfigStatus ¶
type WorkflowtemplateMasterConfigStatus struct {
/* Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group. */
InstanceNames []string `json:"instanceNames,omitempty"`
/* Output only. Specifies that this instance group contains preemptible instances. */
IsPreemptible bool `json:"isPreemptible,omitempty"`
/* Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. */
ManagedGroupConfig WorkflowtemplateManagedGroupConfigStatus `json:"managedGroupConfig,omitempty"`
}
func (*WorkflowtemplateMasterConfigStatus) DeepCopy ¶
func (in *WorkflowtemplateMasterConfigStatus) DeepCopy() *WorkflowtemplateMasterConfigStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateMasterConfigStatus.
func (*WorkflowtemplateMasterConfigStatus) DeepCopyInto ¶
func (in *WorkflowtemplateMasterConfigStatus) DeepCopyInto(out *WorkflowtemplateMasterConfigStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateNodeGroupAffinity ¶
type WorkflowtemplateNodeGroupAffinity struct {
/* Immutable. */
NodeGroupRef v1alpha1.ResourceRef `json:"nodeGroupRef"`
}
func (*WorkflowtemplateNodeGroupAffinity) DeepCopy ¶
func (in *WorkflowtemplateNodeGroupAffinity) DeepCopy() *WorkflowtemplateNodeGroupAffinity
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateNodeGroupAffinity.
func (*WorkflowtemplateNodeGroupAffinity) DeepCopyInto ¶
func (in *WorkflowtemplateNodeGroupAffinity) DeepCopyInto(out *WorkflowtemplateNodeGroupAffinity)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateParameters ¶
type WorkflowtemplateParameters struct {
/* Immutable. Optional. Brief description of the parameter. Must not exceed 1024 characters. */
// +optional
Description *string `json:"description,omitempty"`
/* Immutable. Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as `placement.clusterSelector.zone`. Also, field paths can reference fields using the following syntax: * Values in maps can be referenced by key: * labels['key'] * placement.clusterSelector.clusterLabels['key'] * placement.managedCluster.labels['key'] * placement.clusterSelector.clusterLabels['key'] * jobs['step-id'].labels['key'] * Jobs in the jobs list can be referenced by step-id: * jobs['step-id'].hadoopJob.mainJarFileUri * jobs['step-id'].hiveJob.queryFileUri * jobs['step-id'].pySparkJob.mainPythonFileUri * jobs['step-id'].hadoopJob.jarFileUris[0] * jobs['step-id'].hadoopJob.archiveUris[0] * jobs['step-id'].hadoopJob.fileUris[0] * jobs['step-id'].pySparkJob.pythonFileUris[0] * Items in repeated fields can be referenced by a zero-based index: * jobs['step-id'].sparkJob.args[0] * Other examples: * jobs['step-id'].hadoopJob.properties['key'] * jobs['step-id'].hadoopJob.args[0] * jobs['step-id'].hiveJob.scriptVariables['key'] * jobs['step-id'].hadoopJob.mainJarFileUri * placement.clusterSelector.zone It may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: - placement.clusterSelector.clusterLabels - jobs['step-id'].sparkJob.args */
Fields []string `json:"fields"`
/* Immutable. Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters. */
Name string `json:"name"`
/* Immutable. Optional. Validation rules to be applied to this parameter's value. */
// +optional
Validation *WorkflowtemplateValidation `json:"validation,omitempty"`
}
func (*WorkflowtemplateParameters) DeepCopy ¶
func (in *WorkflowtemplateParameters) DeepCopy() *WorkflowtemplateParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateParameters.
func (*WorkflowtemplateParameters) DeepCopyInto ¶
func (in *WorkflowtemplateParameters) DeepCopyInto(out *WorkflowtemplateParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplatePigJob ¶
type WorkflowtemplatePigJob struct {
/* Immutable. Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries. */
// +optional
ContinueOnFailure *bool `json:"continueOnFailure,omitempty"`
/* Immutable. Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. */
// +optional
JarFileUris []string `json:"jarFileUris,omitempty"`
/* Immutable. Optional. The runtime log config for job execution. */
// +optional
LoggingConfig *WorkflowtemplateLoggingConfig `json:"loggingConfig,omitempty"`
/* Immutable. Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code. */
// +optional
Properties map[string]string `json:"properties,omitempty"`
/* Immutable. The HCFS URI of the script that contains the Pig queries. */
// +optional
QueryFileUri *string `json:"queryFileUri,omitempty"`
/* Immutable. A list of queries. */
// +optional
QueryList *WorkflowtemplateQueryList `json:"queryList,omitempty"`
/* Immutable. Optional. Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`). */
// +optional
ScriptVariables map[string]string `json:"scriptVariables,omitempty"`
}
func (*WorkflowtemplatePigJob) DeepCopy ¶
func (in *WorkflowtemplatePigJob) DeepCopy() *WorkflowtemplatePigJob
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplatePigJob.
func (*WorkflowtemplatePigJob) DeepCopyInto ¶
func (in *WorkflowtemplatePigJob) DeepCopyInto(out *WorkflowtemplatePigJob)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplatePlacement ¶
type WorkflowtemplatePlacement struct {
/* Immutable. Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted. */
// +optional
ClusterSelector *WorkflowtemplateClusterSelector `json:"clusterSelector,omitempty"`
/* Immutable. A cluster that is managed by the workflow. */
// +optional
ManagedCluster *WorkflowtemplateManagedCluster `json:"managedCluster,omitempty"`
}
func (*WorkflowtemplatePlacement) DeepCopy ¶
func (in *WorkflowtemplatePlacement) DeepCopy() *WorkflowtemplatePlacement
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplatePlacement.
func (*WorkflowtemplatePlacement) DeepCopyInto ¶
func (in *WorkflowtemplatePlacement) DeepCopyInto(out *WorkflowtemplatePlacement)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplatePlacementStatus ¶
type WorkflowtemplatePlacementStatus struct {
/* */
ManagedCluster WorkflowtemplateManagedClusterStatus `json:"managedCluster,omitempty"`
}
func (*WorkflowtemplatePlacementStatus) DeepCopy ¶
func (in *WorkflowtemplatePlacementStatus) DeepCopy() *WorkflowtemplatePlacementStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplatePlacementStatus.
func (*WorkflowtemplatePlacementStatus) DeepCopyInto ¶
func (in *WorkflowtemplatePlacementStatus) DeepCopyInto(out *WorkflowtemplatePlacementStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplatePrestoJob ¶
type WorkflowtemplatePrestoJob struct {
/* Immutable. Optional. Presto client tags to attach to this query */
// +optional
ClientTags []string `json:"clientTags,omitempty"`
/* Immutable. Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries. */
// +optional
ContinueOnFailure *bool `json:"continueOnFailure,omitempty"`
/* Immutable. Optional. The runtime log config for job execution. */
// +optional
LoggingConfig *WorkflowtemplateLoggingConfig `json:"loggingConfig,omitempty"`
/* Immutable. Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats */
// +optional
OutputFormat *string `json:"outputFormat,omitempty"`
/* Immutable. Optional. A mapping of property names to values. Used to set Presto [session properties](https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI */
// +optional
Properties map[string]string `json:"properties,omitempty"`
/* Immutable. The HCFS URI of the script that contains SQL queries. */
// +optional
QueryFileUri *string `json:"queryFileUri,omitempty"`
/* Immutable. A list of queries. */
// +optional
QueryList *WorkflowtemplateQueryList `json:"queryList,omitempty"`
}
func (*WorkflowtemplatePrestoJob) DeepCopy ¶
func (in *WorkflowtemplatePrestoJob) DeepCopy() *WorkflowtemplatePrestoJob
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplatePrestoJob.
func (*WorkflowtemplatePrestoJob) DeepCopyInto ¶
func (in *WorkflowtemplatePrestoJob) DeepCopyInto(out *WorkflowtemplatePrestoJob)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplatePysparkJob ¶
type WorkflowtemplatePysparkJob struct {
/* Immutable. Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */
// +optional
ArchiveUris []string `json:"archiveUris,omitempty"`
/* Immutable. Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission. */
// +optional
Args []string `json:"args,omitempty"`
/* Immutable. Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */
// +optional
FileUris []string `json:"fileUris,omitempty"`
/* Immutable. Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks. */
// +optional
JarFileUris []string `json:"jarFileUris,omitempty"`
/* Immutable. Optional. The runtime log config for job execution. */
// +optional
LoggingConfig *WorkflowtemplateLoggingConfig `json:"loggingConfig,omitempty"`
/* Immutable. Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file. */
MainPythonFileUri string `json:"mainPythonFileUri"`
/* Immutable. Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. */
// +optional
Properties map[string]string `json:"properties,omitempty"`
/* Immutable. Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. */
// +optional
PythonFileUris []string `json:"pythonFileUris,omitempty"`
}
func (*WorkflowtemplatePysparkJob) DeepCopy ¶
func (in *WorkflowtemplatePysparkJob) DeepCopy() *WorkflowtemplatePysparkJob
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplatePysparkJob.
func (*WorkflowtemplatePysparkJob) DeepCopyInto ¶
func (in *WorkflowtemplatePysparkJob) DeepCopyInto(out *WorkflowtemplatePysparkJob)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateQueryList ¶
type WorkflowtemplateQueryList struct {
/* Immutable. Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: "hiveJob" { "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } */
Queries []string `json:"queries"`
}
func (*WorkflowtemplateQueryList) DeepCopy ¶
func (in *WorkflowtemplateQueryList) DeepCopy() *WorkflowtemplateQueryList
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateQueryList.
func (*WorkflowtemplateQueryList) DeepCopyInto ¶
func (in *WorkflowtemplateQueryList) DeepCopyInto(out *WorkflowtemplateQueryList)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateRegex ¶
type WorkflowtemplateRegex struct {
/* Immutable. Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient). */
Regexes []string `json:"regexes"`
}
func (*WorkflowtemplateRegex) DeepCopy ¶
func (in *WorkflowtemplateRegex) DeepCopy() *WorkflowtemplateRegex
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateRegex.
func (*WorkflowtemplateRegex) DeepCopyInto ¶
func (in *WorkflowtemplateRegex) DeepCopyInto(out *WorkflowtemplateRegex)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateReservationAffinity ¶
type WorkflowtemplateReservationAffinity struct {
/* Immutable. Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION */
// +optional
ConsumeReservationType *string `json:"consumeReservationType,omitempty"`
/* Immutable. Optional. Corresponds to the label key of reservation resource. */
// +optional
Key *string `json:"key,omitempty"`
/* Immutable. Optional. Corresponds to the label values of reservation resource. */
// +optional
Values []WorkflowtemplateValues `json:"values,omitempty"`
}
func (*WorkflowtemplateReservationAffinity) DeepCopy ¶
func (in *WorkflowtemplateReservationAffinity) DeepCopy() *WorkflowtemplateReservationAffinity
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateReservationAffinity.
func (*WorkflowtemplateReservationAffinity) DeepCopyInto ¶
func (in *WorkflowtemplateReservationAffinity) DeepCopyInto(out *WorkflowtemplateReservationAffinity)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateScheduling ¶
type WorkflowtemplateScheduling struct {
/* Immutable. Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10. */
// +optional
MaxFailuresPerHour *int `json:"maxFailuresPerHour,omitempty"`
/* Immutable. Optional. Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240. */
// +optional
MaxFailuresTotal *int `json:"maxFailuresTotal,omitempty"`
}
func (*WorkflowtemplateScheduling) DeepCopy ¶
func (in *WorkflowtemplateScheduling) DeepCopy() *WorkflowtemplateScheduling
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateScheduling.
func (*WorkflowtemplateScheduling) DeepCopyInto ¶
func (in *WorkflowtemplateScheduling) DeepCopyInto(out *WorkflowtemplateScheduling)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateSecondaryWorkerConfig ¶
type WorkflowtemplateSecondaryWorkerConfig struct {
/* Immutable. Optional. The Compute Engine accelerator configuration for these instances. */
// +optional
Accelerators []WorkflowtemplateAccelerators `json:"accelerators,omitempty"`
/* Immutable. Optional. Disk option config settings. */
// +optional
DiskConfig *WorkflowtemplateDiskConfig `json:"diskConfig,omitempty"`
/* Immutable. */
// +optional
ImageRef *v1alpha1.ResourceRef `json:"imageRef,omitempty"`
/* Immutable. Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. */
// +optional
MachineType *string `json:"machineType,omitempty"`
/* Immutable. Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */
// +optional
MinCpuPlatform *string `json:"minCpuPlatform,omitempty"`
/* Immutable. Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**. */
// +optional
NumInstances *int `json:"numInstances,omitempty"`
/* Immutable. Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE */
// +optional
Preemptibility *string `json:"preemptibility,omitempty"`
}
func (*WorkflowtemplateSecondaryWorkerConfig) DeepCopy ¶
func (in *WorkflowtemplateSecondaryWorkerConfig) DeepCopy() *WorkflowtemplateSecondaryWorkerConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateSecondaryWorkerConfig.
func (*WorkflowtemplateSecondaryWorkerConfig) DeepCopyInto ¶
func (in *WorkflowtemplateSecondaryWorkerConfig) DeepCopyInto(out *WorkflowtemplateSecondaryWorkerConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateSecondaryWorkerConfigStatus ¶
type WorkflowtemplateSecondaryWorkerConfigStatus struct {
/* Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group. */
InstanceNames []string `json:"instanceNames,omitempty"`
/* Output only. Specifies that this instance group contains preemptible instances. */
IsPreemptible bool `json:"isPreemptible,omitempty"`
/* Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. */
ManagedGroupConfig WorkflowtemplateManagedGroupConfigStatus `json:"managedGroupConfig,omitempty"`
}
func (*WorkflowtemplateSecondaryWorkerConfigStatus) DeepCopy ¶
func (in *WorkflowtemplateSecondaryWorkerConfigStatus) DeepCopy() *WorkflowtemplateSecondaryWorkerConfigStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateSecondaryWorkerConfigStatus.
func (*WorkflowtemplateSecondaryWorkerConfigStatus) DeepCopyInto ¶
func (in *WorkflowtemplateSecondaryWorkerConfigStatus) DeepCopyInto(out *WorkflowtemplateSecondaryWorkerConfigStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateSecurityConfig ¶
type WorkflowtemplateSecurityConfig struct {
/* Immutable. Optional. Kerberos related configuration. */
// +optional
KerberosConfig *WorkflowtemplateKerberosConfig `json:"kerberosConfig,omitempty"`
}
func (*WorkflowtemplateSecurityConfig) DeepCopy ¶
func (in *WorkflowtemplateSecurityConfig) DeepCopy() *WorkflowtemplateSecurityConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateSecurityConfig.
func (*WorkflowtemplateSecurityConfig) DeepCopyInto ¶
func (in *WorkflowtemplateSecurityConfig) DeepCopyInto(out *WorkflowtemplateSecurityConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateSoftwareConfig ¶
type WorkflowtemplateSoftwareConfig struct {
/* Immutable. Optional. The version of software inside the cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as "1.2" (including a subminor version, such as "1.2.29"), or the ["preview" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version. */
// +optional
ImageVersion *string `json:"imageVersion,omitempty"`
/* Immutable. Optional. The set of components to activate on the cluster. */
// +optional
OptionalComponents []string `json:"optionalComponents,omitempty"`
/* Immutable. Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties). */
// +optional
Properties map[string]string `json:"properties,omitempty"`
}
func (*WorkflowtemplateSoftwareConfig) DeepCopy ¶
func (in *WorkflowtemplateSoftwareConfig) DeepCopy() *WorkflowtemplateSoftwareConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateSoftwareConfig.
func (*WorkflowtemplateSoftwareConfig) DeepCopyInto ¶
func (in *WorkflowtemplateSoftwareConfig) DeepCopyInto(out *WorkflowtemplateSoftwareConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateSparkJob ¶
type WorkflowtemplateSparkJob struct {
/* Immutable. Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */
// +optional
ArchiveUris []string `json:"archiveUris,omitempty"`
/* Immutable. Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission. */
// +optional
Args []string `json:"args,omitempty"`
/* Immutable. Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */
// +optional
FileUris []string `json:"fileUris,omitempty"`
/* Immutable. Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks. */
// +optional
JarFileUris []string `json:"jarFileUris,omitempty"`
/* Immutable. Optional. The runtime log config for job execution. */
// +optional
LoggingConfig *WorkflowtemplateLoggingConfig `json:"loggingConfig,omitempty"`
/* Immutable. The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jar_file_uris`. */
// +optional
MainClass *string `json:"mainClass,omitempty"`
/* Immutable. The HCFS URI of the jar file that contains the main class. */
// +optional
MainJarFileUri *string `json:"mainJarFileUri,omitempty"`
/* Immutable. Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. */
// +optional
Properties map[string]string `json:"properties,omitempty"`
}
func (*WorkflowtemplateSparkJob) DeepCopy ¶
func (in *WorkflowtemplateSparkJob) DeepCopy() *WorkflowtemplateSparkJob
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateSparkJob.
func (*WorkflowtemplateSparkJob) DeepCopyInto ¶
func (in *WorkflowtemplateSparkJob) DeepCopyInto(out *WorkflowtemplateSparkJob)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateSparkRJob ¶
type WorkflowtemplateSparkRJob struct {
/* Immutable. Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. */
// +optional
ArchiveUris []string `json:"archiveUris,omitempty"`
/* Immutable. Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission. */
// +optional
Args []string `json:"args,omitempty"`
/* Immutable. Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. */
// +optional
FileUris []string `json:"fileUris,omitempty"`
/* Immutable. Optional. The runtime log config for job execution. */
// +optional
LoggingConfig *WorkflowtemplateLoggingConfig `json:"loggingConfig,omitempty"`
/* Immutable. Required. The HCFS URI of the main R file to use as the driver. Must be a .R file. */
MainRFileUri string `json:"mainRFileUri"`
/* Immutable. Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code. */
// +optional
Properties map[string]string `json:"properties,omitempty"`
}
func (*WorkflowtemplateSparkRJob) DeepCopy ¶
func (in *WorkflowtemplateSparkRJob) DeepCopy() *WorkflowtemplateSparkRJob
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateSparkRJob.
func (*WorkflowtemplateSparkRJob) DeepCopyInto ¶
func (in *WorkflowtemplateSparkRJob) DeepCopyInto(out *WorkflowtemplateSparkRJob)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateSparkSqlJob ¶
type WorkflowtemplateSparkSqlJob struct {
/* Immutable. Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. */
// +optional
JarFileUris []string `json:"jarFileUris,omitempty"`
/* Immutable. Optional. The runtime log config for job execution. */
// +optional
LoggingConfig *WorkflowtemplateLoggingConfig `json:"loggingConfig,omitempty"`
/* Immutable. Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten. */
// +optional
Properties map[string]string `json:"properties,omitempty"`
/* Immutable. The HCFS URI of the script that contains SQL queries. */
// +optional
QueryFileUri *string `json:"queryFileUri,omitempty"`
/* Immutable. A list of queries. */
// +optional
QueryList *WorkflowtemplateQueryList `json:"queryList,omitempty"`
/* Immutable. Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name="value";`). */
// +optional
ScriptVariables map[string]string `json:"scriptVariables,omitempty"`
}
func (*WorkflowtemplateSparkSqlJob) DeepCopy ¶
func (in *WorkflowtemplateSparkSqlJob) DeepCopy() *WorkflowtemplateSparkSqlJob
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateSparkSqlJob.
func (*WorkflowtemplateSparkSqlJob) DeepCopyInto ¶
func (in *WorkflowtemplateSparkSqlJob) DeepCopyInto(out *WorkflowtemplateSparkSqlJob)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateValidation ¶
type WorkflowtemplateValidation struct {
/* Immutable. Validation based on regular expressions. */
// +optional
Regex *WorkflowtemplateRegex `json:"regex,omitempty"`
/* Immutable. Validation based on a list of allowed values. */
// +optional
Values *WorkflowtemplateValues `json:"values,omitempty"`
}
func (*WorkflowtemplateValidation) DeepCopy ¶
func (in *WorkflowtemplateValidation) DeepCopy() *WorkflowtemplateValidation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateValidation.
func (*WorkflowtemplateValidation) DeepCopyInto ¶
func (in *WorkflowtemplateValidation) DeepCopyInto(out *WorkflowtemplateValidation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateValues ¶
type WorkflowtemplateValues struct {
/* Immutable. Required. List of allowed values for the parameter. */
Values []WorkflowtemplateValues `json:"values"`
}
func (*WorkflowtemplateValues) DeepCopy ¶
func (in *WorkflowtemplateValues) DeepCopy() *WorkflowtemplateValues
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateValues.
func (*WorkflowtemplateValues) DeepCopyInto ¶
func (in *WorkflowtemplateValues) DeepCopyInto(out *WorkflowtemplateValues)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateWorkerConfig ¶
type WorkflowtemplateWorkerConfig struct {
/* Immutable. Optional. The Compute Engine accelerator configuration for these instances. */
// +optional
Accelerators []WorkflowtemplateAccelerators `json:"accelerators,omitempty"`
/* Immutable. Optional. Disk option config settings. */
// +optional
DiskConfig *WorkflowtemplateDiskConfig `json:"diskConfig,omitempty"`
/* Immutable. */
// +optional
ImageRef *v1alpha1.ResourceRef `json:"imageRef,omitempty"`
/* Immutable. Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`. */
// +optional
MachineType *string `json:"machineType,omitempty"`
/* Immutable. Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu). */
// +optional
MinCpuPlatform *string `json:"minCpuPlatform,omitempty"`
/* Immutable. Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**. */
// +optional
NumInstances *int `json:"numInstances,omitempty"`
/* Immutable. Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE */
// +optional
Preemptibility *string `json:"preemptibility,omitempty"`
}
func (*WorkflowtemplateWorkerConfig) DeepCopy ¶
func (in *WorkflowtemplateWorkerConfig) DeepCopy() *WorkflowtemplateWorkerConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateWorkerConfig.
func (*WorkflowtemplateWorkerConfig) DeepCopyInto ¶
func (in *WorkflowtemplateWorkerConfig) DeepCopyInto(out *WorkflowtemplateWorkerConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type WorkflowtemplateWorkerConfigStatus ¶
type WorkflowtemplateWorkerConfigStatus struct {
/* Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group. */
InstanceNames []string `json:"instanceNames,omitempty"`
/* Output only. Specifies that this instance group contains preemptible instances. */
IsPreemptible bool `json:"isPreemptible,omitempty"`
/* Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups. */
ManagedGroupConfig WorkflowtemplateManagedGroupConfigStatus `json:"managedGroupConfig,omitempty"`
}
func (*WorkflowtemplateWorkerConfigStatus) DeepCopy ¶
func (in *WorkflowtemplateWorkerConfigStatus) DeepCopy() *WorkflowtemplateWorkerConfigStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowtemplateWorkerConfigStatus.
func (*WorkflowtemplateWorkerConfigStatus) DeepCopyInto ¶
func (in *WorkflowtemplateWorkerConfigStatus) DeepCopyInto(out *WorkflowtemplateWorkerConfigStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.