 Documentation
      ¶
      Documentation
      ¶
    
    
  
    
  
    Overview ¶
Package manualv1alpha1 is the v1alpha1 version of the batch.aws.crossplane.io API. +kubebuilder:object:generate=true +groupName=batch.aws.crossplane.io +versionName=v1alpha1
Index ¶
- Constants
- Variables
- type ArrayProperties
- type ArrayPropertiesDetail
- type AttemptContainerDetail
- type AttemptDetail
- type ContainerOverrides
- type ContainerProperties
- type Device
- type EFSAuthorizationConfig
- type EFSVolumeConfiguration
- type EvaluateOnExit
- type FargatePlatformConfiguration
- type Host
- type Job
- func (in *Job) DeepCopy() *Job
- func (in *Job) DeepCopyInto(out *Job)
- func (in *Job) DeepCopyObject() runtime.Object
- func (mg *Job) GetCondition(ct xpv1.ConditionType) xpv1.Condition
- func (mg *Job) GetDeletionPolicy() xpv1.DeletionPolicy
- func (mg *Job) GetProviderConfigReference() *xpv1.Reference
- func (mg *Job) GetProviderReference() *xpv1.Reference
- func (mg *Job) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo
- func (mg *Job) GetWriteConnectionSecretToReference() *xpv1.SecretReference
- func (mg *Job) ResolveReferences(ctx context.Context, c client.Reader) error
- func (mg *Job) SetConditions(c ...xpv1.Condition)
- func (mg *Job) SetDeletionPolicy(r xpv1.DeletionPolicy)
- func (mg *Job) SetProviderConfigReference(r *xpv1.Reference)
- func (mg *Job) SetProviderReference(r *xpv1.Reference)
- func (mg *Job) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo)
- func (mg *Job) SetWriteConnectionSecretToReference(r *xpv1.SecretReference)
 
- type JobDefinition
- func (in *JobDefinition) DeepCopy() *JobDefinition
- func (in *JobDefinition) DeepCopyInto(out *JobDefinition)
- func (in *JobDefinition) DeepCopyObject() runtime.Object
- func (mg *JobDefinition) GetCondition(ct xpv1.ConditionType) xpv1.Condition
- func (mg *JobDefinition) GetDeletionPolicy() xpv1.DeletionPolicy
- func (mg *JobDefinition) GetProviderConfigReference() *xpv1.Reference
- func (mg *JobDefinition) GetProviderReference() *xpv1.Reference
- func (mg *JobDefinition) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo
- func (mg *JobDefinition) GetWriteConnectionSecretToReference() *xpv1.SecretReference
- func (mg *JobDefinition) ResolveReferences(ctx context.Context, c client.Reader) error
- func (mg *JobDefinition) SetConditions(c ...xpv1.Condition)
- func (mg *JobDefinition) SetDeletionPolicy(r xpv1.DeletionPolicy)
- func (mg *JobDefinition) SetProviderConfigReference(r *xpv1.Reference)
- func (mg *JobDefinition) SetProviderReference(r *xpv1.Reference)
- func (mg *JobDefinition) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo)
- func (mg *JobDefinition) SetWriteConnectionSecretToReference(r *xpv1.SecretReference)
 
- type JobDefinitionList
- type JobDefinitionObservation
- type JobDefinitionParameters
- type JobDefinitionSpec
- type JobDefinitionStatus
- type JobDependency
- type JobList
- type JobObservation
- type JobParameters
- type JobSpec
- type JobStatus
- type JobTimeout
- type KeyValuePair
- type LinuxParameters
- type LogConfiguration
- type MountPoint
- type NetworkConfiguration
- type NetworkInterface
- type NodeOverrides
- type NodeProperties
- type NodePropertyOverride
- type NodeRangeProperty
- type ResourceRequirement
- type RetryStrategy
- type Secret
- type Tmpfs
- type Ulimit
- type Volume
Constants ¶
const ( Group = "batch.aws.crossplane.io" Version = "v1alpha1" )
Package type metadata.
Variables ¶
var ( // SchemeGroupVersion is group version used to register these objects SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} )
var ( JobDefinitionKind = reflect.TypeOf(JobDefinition{}).Name() JobDefinitionGroupKind = schema.GroupKind{Group: Group, Kind: JobDefinitionKind}.String() JobDefinitionKindAPIVersion = JobDefinitionKind + "." + SchemeGroupVersion.String() JobDefinitionGroupVersionKind = SchemeGroupVersion.WithKind(JobDefinitionKind) )
JobDefinition type metadata.
var ( JobKind = reflect.TypeOf(Job{}).Name() JobGroupKind = schema.GroupKind{Group: Group, Kind: JobKind}.String() JobKindAPIVersion = JobKind + "." + SchemeGroupVersion.String() JobGroupVersionKind = SchemeGroupVersion.WithKind(JobKind) )
Job type metadata.
Functions ¶
This section is empty.
Types ¶
type ArrayProperties ¶
type ArrayProperties struct {
	// The size of the array job.
	Size *int64 `json:"size,omitempty"`
}
    ArrayProperties define an Batch array job.
func (*ArrayProperties) DeepCopy ¶
func (in *ArrayProperties) DeepCopy() *ArrayProperties
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArrayProperties.
func (*ArrayProperties) DeepCopyInto ¶
func (in *ArrayProperties) DeepCopyInto(out *ArrayProperties)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ArrayPropertiesDetail ¶
type ArrayPropertiesDetail struct {
	// The job index within the array that's associated with this job. This parameter
	// is returned for array job children.
	Index *int64 `json:"index,omitempty"`
	// The size of the array job. This parameter is returned for parent array jobs.
	Size *int64 `json:"size,omitempty"`
	// A summary of the number of array job children in each available job status.
	// This parameter is returned for parent array jobs.
	StatusSummary map[string]*int64 `json:"statusSummary,omitempty"`
}
    ArrayPropertiesDetail defines the array properties of a job for observation.
func (*ArrayPropertiesDetail) DeepCopy ¶
func (in *ArrayPropertiesDetail) DeepCopy() *ArrayPropertiesDetail
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArrayPropertiesDetail.
func (*ArrayPropertiesDetail) DeepCopyInto ¶
func (in *ArrayPropertiesDetail) DeepCopyInto(out *ArrayPropertiesDetail)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type AttemptContainerDetail ¶
type AttemptContainerDetail struct {
	// The Amazon Resource Name (ARN) of the Amazon ECS container instance that
	// hosts the job attempt.
	ContainerInstanceArn *string `json:"containerInstanceArn,omitempty"`
	// The exit code for the job attempt. A non-zero exit code is considered a failure.
	ExitCode *int64 `json:"exitCode,omitempty"`
	// The name of the CloudWatch Logs log stream associated with the container.
	// The log group for Batch jobs is /aws/batch/job. Each container attempt receives
	// a log stream name when they reach the RUNNING status.
	LogStreamName *string `json:"logStreamName,omitempty"`
	// The network interfaces associated with the job attempt.
	NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"`
	// A short (255 max characters) human-readable string to provide additional
	// details about a running or stopped container.
	Reason *string `json:"reason,omitempty"`
	// The Amazon Resource Name (ARN) of the Amazon ECS task that's associated with
	// the job attempt. Each container attempt receives a task ARN when they reach
	// the STARTING status.
	TaskArn *string `json:"taskArn,omitempty"`
}
    AttemptContainerDetail defines the details of a container that's part of a job attempt for observation
func (*AttemptContainerDetail) DeepCopy ¶
func (in *AttemptContainerDetail) DeepCopy() *AttemptContainerDetail
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttemptContainerDetail.
func (*AttemptContainerDetail) DeepCopyInto ¶
func (in *AttemptContainerDetail) DeepCopyInto(out *AttemptContainerDetail)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type AttemptDetail ¶
type AttemptDetail struct {
	// Details about the container in this job attempt.
	Container *AttemptContainerDetail `json:"container,omitempty"`
	// The Unix timestamp (in milliseconds) for when the attempt was started (when
	// the attempt transitioned from the STARTING state to the RUNNING state).
	StartedAt *int64 `json:"startedAt,omitempty"`
	// A short, human-readable string to provide additional details about the current
	// status of the job attempt.
	StatusReason *string `json:"statusReason,omitempty"`
	// The Unix timestamp (in milliseconds) for when the attempt was stopped (when
	// the attempt transitioned from the RUNNING state to a terminal state, such
	// as SUCCEEDED or FAILED).
	StoppedAt *int64 `json:"stoppedAt,omitempty"`
}
    AttemptDetail defines a job attempt for observation
func (*AttemptDetail) DeepCopy ¶
func (in *AttemptDetail) DeepCopy() *AttemptDetail
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttemptDetail.
func (*AttemptDetail) DeepCopyInto ¶
func (in *AttemptDetail) DeepCopyInto(out *AttemptDetail)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ContainerOverrides ¶
type ContainerOverrides struct {
	// The command to send to the container that overrides the default command from
	// the Docker image or the job definition.
	Command []*string `json:"command,omitempty"`
	// The environment variables to send to the container. You can add new environment
	// variables, which are added to the container at launch, or you can override
	// the existing environment variables from the Docker image or the job definition.
	//
	// Environment variables must not start with AWS_BATCH; this naming convention
	// is reserved for variables that are set by the Batch service.
	Environment []*KeyValuePair `json:"environment,omitempty"`
	// The instance type to use for a multi-node parallel job.
	//
	// This parameter isn't applicable to single-node container jobs or jobs that
	// run on Fargate resources, and shouldn't be provided.
	InstanceType *string `json:"instanceType,omitempty"`
	// The type and amount of resources to assign to a container. This overrides
	// the settings in the job definition. The supported resources include GPU,
	// MEMORY, and VCPU.
	ResourceRequirements []*ResourceRequirement `json:"resourceRequirements,omitempty"`
}
    ContainerOverrides define the overrides that should be sent to a container.
func (*ContainerOverrides) DeepCopy ¶
func (in *ContainerOverrides) DeepCopy() *ContainerOverrides
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerOverrides.
func (*ContainerOverrides) DeepCopyInto ¶
func (in *ContainerOverrides) DeepCopyInto(out *ContainerOverrides)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ContainerProperties ¶
type ContainerProperties struct {
	// The command that's passed to the container. This parameter maps to Cmd in
	// the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the COMMAND parameter to docker run (https://docs.docker.com/engine/reference/run/).
	// For more information, see https://docs.docker.com/engine/reference/builder/#cmd
	// (https://docs.docker.com/engine/reference/builder/#cmd).
	Command []*string `json:"command,omitempty"`
	// The environment variables to pass to a container. This parameter maps to
	// Env in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --env option to docker run (https://docs.docker.com/engine/reference/run/).
	//
	// We don't recommend using plaintext environment variables for sensitive information,
	// such as credential data.
	//
	// Environment variables must not start with AWS_BATCH; this naming convention
	// is reserved for variables that are set by the Batch service.
	Environment []*KeyValuePair `json:"environment,omitempty"`
	// The Amazon Resource Name (ARN) of the execution role that Batch can assume.
	// For jobs that run on Fargate resources, you must provide an execution role.
	// For more information, see Batch execution IAM role (https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html)
	// in the Batch User Guide.
	// +crossplane:generate:reference:type=github.com/crossplane-contrib/provider-aws/apis/iam/v1beta1.Role
	// +crossplane:generate:reference:extractor=github.com/crossplane-contrib/provider-aws/apis/iam/v1beta1.RoleARN()
	// +crossplane:generate:reference:refFieldName=ExecutionRoleARNRef
	// +crossplane:generate:reference:selectorFieldName=ExecutionRoleARNSelector
	ExecutionRoleArn *string `json:"executionRoleArn,omitempty"`
	// ExecutionRoleARNRef is a reference to an ARN of the IAM role used to set
	// the ExecutionRoleARN.
	// +optional
	ExecutionRoleARNRef *xpv1.Reference `json:"executionRoleARNRef,omitempty"`
	// ExecutionRoleARNSelector selects references to an ARN of the IAM role used
	// to set the ExecutionRoleARN.
	// +optional
	ExecutionRoleARNSelector *xpv1.Selector `json:"executionRoleARNSelector,omitempty"`
	// The platform configuration for jobs that are running on Fargate resources.
	// Jobs that are running on EC2 resources must not specify this parameter.
	FargatePlatformConfiguration *FargatePlatformConfiguration `json:"fargatePlatformConfiguration,omitempty"`
	// The image used to start a container. This string is passed directly to the
	// Docker daemon. Images in the Docker Hub registry are available by default.
	// Other repositories are specified with repository-url/image:tag . Up to 255
	// letters (uppercase and lowercase), numbers, hyphens, underscores, colons,
	// periods, forward slashes, and number signs are allowed. This parameter maps
	// to Image in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the IMAGE parameter of docker run (https://docs.docker.com/engine/reference/run/).
	//
	// Docker image architecture must match the processor architecture of the compute
	// resources that they're scheduled on. For example, ARM-based Docker images
	// can only run on ARM-based compute resources.
	//
	//    * Images in Amazon ECR repositories use the full registry and repository
	//    URI (for example, 012345678910.dkr.ecr.<region-name>.amazonaws.com/<repository-name>).
	//
	//    * Images in official repositories on Docker Hub use a single name (for
	//    example, ubuntu or mongo).
	//
	//    * Images in other repositories on Docker Hub are qualified with an organization
	//    name (for example, amazon/amazon-ecs-agent).
	//
	//    * Images in other online repositories are qualified further by a domain
	//    name (for example, quay.io/assemblyline/ubuntu).
	Image *string `json:"image,omitempty"`
	// The instance type to use for a multi-node parallel job. All node groups in
	// a multi-node parallel job must use the same instance type.
	//
	// This parameter isn't applicable to single-node container jobs or jobs that
	// run on Fargate resources, and shouldn't be provided.
	InstanceType *string `json:"instanceType,omitempty"`
	// The Amazon Resource Name (ARN) of the IAM role that the container can assume
	// for Amazon Web Services permissions. For more information, see IAM Roles
	// for Tasks (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html)
	// in the Amazon Elastic Container Service Developer Guide.
	// +crossplane:generate:reference:type=github.com/crossplane-contrib/provider-aws/apis/iam/v1beta1.Role
	// +crossplane:generate:reference:extractor=github.com/crossplane-contrib/provider-aws/apis/iam/v1beta1.RoleARN()
	// +crossplane:generate:reference:refFieldName=JobRoleARNRef
	// +crossplane:generate:reference:selectorFieldName=JobRoleARNSelector
	JobRoleArn *string `json:"jobRoleArn,omitempty"`
	// JobRoleARNRef is a reference to an ARN of the IAM role used to set
	// the JobRoleARN.
	// +optional
	JobRoleARNRef *xpv1.Reference `json:"jobRoleARNRef,omitempty"`
	// JobRoleARNSelector selects references to an ARN of the IAM role used
	// to set the JobRoleARN.
	// +optional
	JobRoleARNSelector *xpv1.Selector `json:"jobRoleARNSelector,omitempty"`
	// Linux-specific modifications that are applied to the container, such as details
	// for device mappings.
	LinuxParameters *LinuxParameters `json:"linuxParameters,omitempty"`
	// The log configuration specification for the container.
	//
	// This parameter maps to LogConfig in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --log-driver option to docker run (https://docs.docker.com/engine/reference/run/).
	// By default, containers use the same logging driver that the Docker daemon
	// uses. However the container might use a different logging driver than the
	// Docker daemon by specifying a log driver with this parameter in the container
	// definition. To use a different logging driver for a container, the log system
	// must be configured properly on the container instance (or on a different
	// log server for remote logging options). For more information on the options
	// for different supported log drivers, see Configure logging drivers (https://docs.docker.com/engine/admin/logging/overview/)
	// in the Docker documentation.
	//
	// Batch currently supports a subset of the logging drivers available to the
	// Docker daemon (shown in the LogConfiguration data type).
	//
	// This parameter requires version 1.18 of the Docker Remote API or greater
	// on your container instance. To check the Docker Remote API version on your
	// container instance, log into your container instance and run the following
	// command: sudo docker version | grep "Server API version"
	//
	// The Amazon ECS container agent running on a container instance must register
	// the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS
	// environment variable before containers placed on that instance can use these
	// log configuration options. For more information, see Amazon ECS Container
	// Agent Configuration (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html)
	// in the Amazon Elastic Container Service Developer Guide.
	LogConfiguration *LogConfiguration `json:"logConfiguration,omitempty"`
	// The mount points for data volumes in your container. This parameter maps
	// to Volumes in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --volume option to docker run (https://docs.docker.com/engine/reference/run/).
	MountPoints []*MountPoint `json:"mountPoints,omitempty"`
	// The network configuration for jobs that are running on Fargate resources.
	// Jobs that are running on EC2 resources must not specify this parameter.
	NetworkConfiguration *NetworkConfiguration `json:"networkConfiguration,omitempty"`
	// When this parameter is true, the container is given elevated permissions
	// on the host container instance (similar to the root user). This parameter
	// maps to Privileged in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --privileged option to docker run (https://docs.docker.com/engine/reference/run/).
	// The default value is false.
	//
	// This parameter isn't applicable to jobs that are running on Fargate resources
	// and shouldn't be provided, or specified as false.
	Privileged *bool `json:"privileged,omitempty"`
	// When this parameter is true, the container is given read-only access to its
	// root file system. This parameter maps to ReadonlyRootfs in the Create a container
	// (https://docs.docker.com/engine/api/v1.23/#create-a-container) section of
	// the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) and the
	// --read-only option to docker run.
	ReadonlyRootFilesystem *bool `json:"readonlyRootFilesystem,omitempty"`
	// The type and amount of resources to assign to a container. The supported
	// resources include GPU, MEMORY, and VCPU.
	ResourceRequirements []*ResourceRequirement `json:"resourceRequirements,omitempty"`
	// The secrets for the container. For more information, see Specifying sensitive
	// data (https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html)
	// in the Batch User Guide.
	Secrets []*Secret `json:"secrets,omitempty"`
	// A list of ulimits to set in the container. This parameter maps to Ulimits
	// in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --ulimit option to docker run (https://docs.docker.com/engine/reference/run/).
	//
	// This parameter isn't applicable to jobs that are running on Fargate resources
	// and shouldn't be provided.
	Ulimits []*Ulimit `json:"ulimits,omitempty"`
	// The user name to use inside the container. This parameter maps to User in
	// the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --user option to docker run (https://docs.docker.com/engine/reference/run/).
	User *string `json:"user,omitempty"`
	// A list of data volumes used in a job.
	Volumes []*Volume `json:"volumes,omitempty"`
}
    ContainerProperties defines the container that's launched as part of a job.
func (*ContainerProperties) DeepCopy ¶
func (in *ContainerProperties) DeepCopy() *ContainerProperties
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerProperties.
func (*ContainerProperties) DeepCopyInto ¶
func (in *ContainerProperties) DeepCopyInto(out *ContainerProperties)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Device ¶
type Device struct {
	// The path inside the container that's used to expose the host device. By default,
	// the hostPath value is used.
	ContainerPath *string `json:"containerPath,omitempty"`
	// The path for the device on the host container instance.
	//
	// HostPath is a required field
	// +kubebuilder:validation:Required
	HostPath string `json:"hostPath"`
	// The explicit permissions to provide to the container for the device. By default,
	// the container has permissions for read, write, and mknod for the device.
	Permissions []*string `json:"permissions,omitempty"`
}
    Device defines a container instance host device.
This object isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.
func (*Device) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device.
func (*Device) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type EFSAuthorizationConfig ¶
type EFSAuthorizationConfig struct {
	// The Amazon EFS access point ID to use. If an access point is specified, the
	// root directory value specified in the EFSVolumeConfiguration must either
	// be omitted or set to / which will enforce the path set on the EFS access
	// point. If an access point is used, transit encryption must be enabled in
	// the EFSVolumeConfiguration. For more information, see Working with Amazon
	// EFS Access Points (https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html)
	// in the Amazon Elastic File System User Guide.
	// +crossplane:generate:reference:type=github.com/crossplane-contrib/provider-aws/apis/efs/v1alpha1.AccessPoint
	// +crossplane:generate:reference:refFieldName=AccessPointIDRef
	// +crossplane:generate:reference:selectorFieldName=AccessPointIDSelector
	AccessPointID *string `json:"accessPointId,omitempty"`
	// AccessPointIDRef are references to AccessPoint used to set
	// the AccessPointID.
	// +optional
	AccessPointIDRef *xpv1.Reference `json:"accessPointIdRef,omitempty"`
	// AccessPointIDSelector selects references to AccessPoint used
	// to set the AccessPointID.
	// +optional
	AccessPointIDSelector *xpv1.Selector `json:"accessPointIdSelector,omitempty"`
	// Whether or not to use the Batch job IAM role defined in a job definition
	// when mounting the Amazon EFS file system. If enabled, transit encryption
	// must be enabled in the EFSVolumeConfiguration. If this parameter is omitted,
	// the default value of DISABLED is used. For more information, see Using Amazon
	// EFS Access Points (https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html#efs-volume-accesspoints)
	// in the Batch User Guide. EFS IAM authorization requires that TransitEncryption
	// be ENABLED and that a JobRoleArn is specified.
	// +kubebuilder:validation:Enum=ENABLED;DISABLED
	IAM *string `json:"iam,omitempty"`
}
    EFSAuthorizationConfig defines the authorization configuration details for the Amazon EFS file system.
func (*EFSAuthorizationConfig) DeepCopy ¶
func (in *EFSAuthorizationConfig) DeepCopy() *EFSAuthorizationConfig
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EFSAuthorizationConfig.
func (*EFSAuthorizationConfig) DeepCopyInto ¶
func (in *EFSAuthorizationConfig) DeepCopyInto(out *EFSAuthorizationConfig)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type EFSVolumeConfiguration ¶
type EFSVolumeConfiguration struct {
	// The authorization configuration details for the Amazon EFS file system.
	AuthorizationConfig *EFSAuthorizationConfig `json:"authorizationConfig,omitempty"`
	// The Amazon EFS file system ID to use.
	//
	// FileSystemID is a required field
	// +crossplane:generate:reference:type=github.com/crossplane-contrib/provider-aws/apis/efs/v1alpha1.FileSystem
	// +crossplane:generate:reference:refFieldName=FileSystemIDRef
	// +crossplane:generate:reference:selectorFieldName=FileSystemIDSelector
	FileSystemID string `json:"fileSystemId,omitempty"`
	// FileSystemIDRef are references to Filesystem used to set
	// the FileSystemID.
	// +optional
	FileSystemIDRef *xpv1.Reference `json:"fileSystemIdRef,omitempty"`
	// FileSystemIDSelector selects references to Filesystem used
	// to set the FileSystemID.
	// +optional
	FileSystemIDSelector *xpv1.Selector `json:"fileSystemIdSelector,omitempty"`
	// The directory within the Amazon EFS file system to mount as the root directory
	// inside the host. If this parameter is omitted, the root of the Amazon EFS
	// volume is used instead. Specifying / has the same effect as omitting this
	// parameter. The maximum length is 4,096 characters.
	//
	// If an EFS access point is specified in the authorizationConfig, the root
	// directory parameter must either be omitted or set to /, which enforces the
	// path set on the Amazon EFS access point.
	RootDirectory *string `json:"rootDirectory,omitempty"`
	// Determines whether to enable encryption for Amazon EFS data in transit between
	// the Amazon ECS host and the Amazon EFS server. Transit encryption must be
	// enabled if Amazon EFS IAM authorization is used. If this parameter is omitted,
	// the default value of DISABLED is used. For more information, see Encrypting
	// data in transit (https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html)
	// in the Amazon Elastic File System User Guide.
	// +kubebuilder:validation:Enum=ENABLED;DISABLED
	TransitEncryption *string `json:"transitEncryption,omitempty"`
	// The port to use when sending encrypted data between the Amazon ECS host and
	// the Amazon EFS server. If you don't specify a transit encryption port, it
	// uses the port selection strategy that the Amazon EFS mount helper uses. The
	// value must be between 0 and 65,535. For more information, see EFS Mount Helper
	// (https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html) in the
	// Amazon Elastic File System User Guide.
	TransitEncryptionPort *int64 `json:"transitEncryptionPort,omitempty"`
}
    EFSVolumeConfiguration is used when you're using an Amazon Elastic File System file system for job storage. For more information, see Amazon EFS Volumes (https://docs.aws.amazon.com/batch/latest/userguide/efs-volumes.html) in the Batch User Guide.
func (*EFSVolumeConfiguration) DeepCopy ¶
func (in *EFSVolumeConfiguration) DeepCopy() *EFSVolumeConfiguration
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EFSVolumeConfiguration.
func (*EFSVolumeConfiguration) DeepCopyInto ¶
func (in *EFSVolumeConfiguration) DeepCopyInto(out *EFSVolumeConfiguration)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type EvaluateOnExit ¶
type EvaluateOnExit struct {
	// Specifies the action to take if all of the specified conditions (onStatusReason,
	// onReason, and onExitCode) are met. The values aren't case sensitive.
	// (AWS gives lowercase back!)
	// Action is a required field
	// +kubebuilder:validation:Required
	// +kubebuilder:validation:Enum=retry;exit
	Action string `json:"action"`
	// Contains a glob pattern to match against the decimal representation of the
	// ExitCode returned for a job. The pattern can be up to 512 characters in length.
	// It can contain only numbers, and can optionally end with an asterisk (*)
	// so that only the start of the string needs to be an exact match.
	OnExitCode *string `json:"onExitCode,omitempty"`
	// Contains a glob pattern to match against the Reason returned for a job. The
	// pattern can be up to 512 characters in length. It can contain letters, numbers,
	// periods (.), colons (:), and white space (including spaces and tabs). It
	// can optionally end with an asterisk (*) so that only the start of the string
	// needs to be an exact match.
	OnReason *string `json:"onReason,omitempty"`
	// Contains a glob pattern to match against the StatusReason returned for a
	// job. The pattern can be up to 512 characters in length. It can contain letters,
	// numbers, periods (.), colons (:), and white space (including spaces or tabs).
	// It can optionally end with an asterisk (*) so that only the start of the
	// string needs to be an exact match.
	OnStatusReason *string `json:"onStatusReason,omitempty"`
}
    EvaluateOnExit specifies a set of conditions to be met, and an action to take (RETRY or EXIT) if all conditions are met.
func (*EvaluateOnExit) DeepCopy ¶
func (in *EvaluateOnExit) DeepCopy() *EvaluateOnExit
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EvaluateOnExit.
func (*EvaluateOnExit) DeepCopyInto ¶
func (in *EvaluateOnExit) DeepCopyInto(out *EvaluateOnExit)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type FargatePlatformConfiguration ¶
type FargatePlatformConfiguration struct {
	// The Fargate platform version where the jobs are running. A platform version
	// is specified only for jobs that are running on Fargate resources. If one
	// isn't specified, the LATEST platform version is used by default. This uses
	// a recent, approved version of the Fargate platform for compute resources.
	// For more information, see Fargate platform versions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html)
	// in the Amazon Elastic Container Service Developer Guide.
	PlatformVersion *string `json:"platformVersion,omitempty"`
}
    FargatePlatformConfiguration defines the platform configuration for jobs that are running on Fargate resources. Jobs that run on EC2 resources must not specify this parameter.
func (*FargatePlatformConfiguration) DeepCopy ¶
func (in *FargatePlatformConfiguration) DeepCopy() *FargatePlatformConfiguration
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FargatePlatformConfiguration.
func (*FargatePlatformConfiguration) DeepCopyInto ¶
func (in *FargatePlatformConfiguration) DeepCopyInto(out *FargatePlatformConfiguration)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Host ¶
type Host struct {
	// The path on the host container instance that's presented to the container.
	// If this parameter is empty, then the Docker daemon has assigned a host path
	// for you. If this parameter contains a file location, then the data volume
	// persists at the specified location on the host container instance until you
	// delete it manually. If the source path location doesn't exist on the host
	// container instance, the Docker daemon creates it. If the location does exist,
	// the contents of the source path folder are exported.
	//
	// This parameter isn't applicable to jobs that run on Fargate resources and
	// shouldn't be provided.
	SourcePath *string `json:"sourcePath,omitempty"`
}
    Host determines whether your data volume persists on the host container instance and where it is stored. If this parameter is empty, then the Docker daemon assigns a host path for your data volume, but the data isn't guaranteed to persist after the containers associated with it stop running.
func (*Host) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Host.
func (*Host) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Job ¶
type Job struct {
	metav1.TypeMeta   `json:",inline"`
	metav1.ObjectMeta `json:"metadata,omitempty"`
	Spec   JobSpec   `json:"spec"`
	Status JobStatus `json:"status,omitempty"`
}
    A Job is a managed resource that represents an AWS Batch Job. +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +kubebuilder:subresource:status +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws}
func (*Job) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Job.
func (*Job) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*Job) DeepCopyObject ¶
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (*Job) GetCondition ¶
func (mg *Job) GetCondition(ct xpv1.ConditionType) xpv1.Condition
GetCondition of this Job.
func (*Job) GetDeletionPolicy ¶
func (mg *Job) GetDeletionPolicy() xpv1.DeletionPolicy
GetDeletionPolicy of this Job.
func (*Job) GetProviderConfigReference ¶
GetProviderConfigReference of this Job.
func (*Job) GetProviderReference ¶
GetProviderReference of this Job. Deprecated: Use GetProviderConfigReference.
func (*Job) GetPublishConnectionDetailsTo ¶
func (mg *Job) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo
GetPublishConnectionDetailsTo of this Job.
func (*Job) GetWriteConnectionSecretToReference ¶
func (mg *Job) GetWriteConnectionSecretToReference() *xpv1.SecretReference
GetWriteConnectionSecretToReference of this Job.
func (*Job) ResolveReferences ¶
ResolveReferences of this Job.
func (*Job) SetConditions ¶
SetConditions of this Job.
func (*Job) SetDeletionPolicy ¶
func (mg *Job) SetDeletionPolicy(r xpv1.DeletionPolicy)
SetDeletionPolicy of this Job.
func (*Job) SetProviderConfigReference ¶
SetProviderConfigReference of this Job.
func (*Job) SetProviderReference ¶
SetProviderReference of this Job. Deprecated: Use SetProviderConfigReference.
func (*Job) SetPublishConnectionDetailsTo ¶
func (mg *Job) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo)
SetPublishConnectionDetailsTo of this Job.
func (*Job) SetWriteConnectionSecretToReference ¶
func (mg *Job) SetWriteConnectionSecretToReference(r *xpv1.SecretReference)
SetWriteConnectionSecretToReference of this Job.
type JobDefinition ¶
type JobDefinition struct {
	metav1.TypeMeta   `json:",inline"`
	metav1.ObjectMeta `json:"metadata,omitempty"`
	Spec   JobDefinitionSpec   `json:"spec"`
	Status JobDefinitionStatus `json:"status,omitempty"`
}
    A JobDefinition is a managed resource that represents an AWS Batch JobDefinition. +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +kubebuilder:subresource:status +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws}
func (*JobDefinition) DeepCopy ¶
func (in *JobDefinition) DeepCopy() *JobDefinition
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobDefinition.
func (*JobDefinition) DeepCopyInto ¶
func (in *JobDefinition) DeepCopyInto(out *JobDefinition)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*JobDefinition) DeepCopyObject ¶
func (in *JobDefinition) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (*JobDefinition) GetCondition ¶
func (mg *JobDefinition) GetCondition(ct xpv1.ConditionType) xpv1.Condition
GetCondition of this JobDefinition.
func (*JobDefinition) GetDeletionPolicy ¶
func (mg *JobDefinition) GetDeletionPolicy() xpv1.DeletionPolicy
GetDeletionPolicy of this JobDefinition.
func (*JobDefinition) GetProviderConfigReference ¶
func (mg *JobDefinition) GetProviderConfigReference() *xpv1.Reference
GetProviderConfigReference of this JobDefinition.
func (*JobDefinition) GetProviderReference ¶
func (mg *JobDefinition) GetProviderReference() *xpv1.Reference
GetProviderReference of this JobDefinition. Deprecated: Use GetProviderConfigReference.
func (*JobDefinition) GetPublishConnectionDetailsTo ¶
func (mg *JobDefinition) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo
GetPublishConnectionDetailsTo of this JobDefinition.
func (*JobDefinition) GetWriteConnectionSecretToReference ¶
func (mg *JobDefinition) GetWriteConnectionSecretToReference() *xpv1.SecretReference
GetWriteConnectionSecretToReference of this JobDefinition.
func (*JobDefinition) ResolveReferences ¶
ResolveReferences of this JobDefinition.
func (*JobDefinition) SetConditions ¶
func (mg *JobDefinition) SetConditions(c ...xpv1.Condition)
SetConditions of this JobDefinition.
func (*JobDefinition) SetDeletionPolicy ¶
func (mg *JobDefinition) SetDeletionPolicy(r xpv1.DeletionPolicy)
SetDeletionPolicy of this JobDefinition.
func (*JobDefinition) SetProviderConfigReference ¶
func (mg *JobDefinition) SetProviderConfigReference(r *xpv1.Reference)
SetProviderConfigReference of this JobDefinition.
func (*JobDefinition) SetProviderReference ¶
func (mg *JobDefinition) SetProviderReference(r *xpv1.Reference)
SetProviderReference of this JobDefinition. Deprecated: Use SetProviderConfigReference.
func (*JobDefinition) SetPublishConnectionDetailsTo ¶
func (mg *JobDefinition) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo)
SetPublishConnectionDetailsTo of this JobDefinition.
func (*JobDefinition) SetWriteConnectionSecretToReference ¶
func (mg *JobDefinition) SetWriteConnectionSecretToReference(r *xpv1.SecretReference)
SetWriteConnectionSecretToReference of this JobDefinition.
type JobDefinitionList ¶
type JobDefinitionList struct {
	metav1.TypeMeta `json:",inline"`
	metav1.ListMeta `json:"metadata,omitempty"`
	Items           []JobDefinition `json:"items"`
}
    JobDefinitionList contains a list of JobDefinitions
func (*JobDefinitionList) DeepCopy ¶
func (in *JobDefinitionList) DeepCopy() *JobDefinitionList
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobDefinitionList.
func (*JobDefinitionList) DeepCopyInto ¶
func (in *JobDefinitionList) DeepCopyInto(out *JobDefinitionList)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*JobDefinitionList) DeepCopyObject ¶
func (in *JobDefinitionList) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (*JobDefinitionList) GetItems ¶
func (l *JobDefinitionList) GetItems() []resource.Managed
GetItems of this JobDefinitionList.
type JobDefinitionObservation ¶
type JobDefinitionObservation struct {
	// The Amazon Resource Name (ARN) for the job definition.
	JobDefinitionArn *string `json:"jobDefinitionArn,omitempty"`
	// The revision of the job definition.
	Revision *int64 `json:"revision,omitempty"`
	// The status of the job definition.
	Status *string `json:"status,omitempty"`
}
    JobDefinitionObservation keeps the state for the external resource
func (*JobDefinitionObservation) DeepCopy ¶
func (in *JobDefinitionObservation) DeepCopy() *JobDefinitionObservation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobDefinitionObservation.
func (*JobDefinitionObservation) DeepCopyInto ¶
func (in *JobDefinitionObservation) DeepCopyInto(out *JobDefinitionObservation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type JobDefinitionParameters ¶
type JobDefinitionParameters struct {
	// Region is which region the Function will be created.
	// +kubebuilder:validation:Required
	Region string `json:"region"`
	// An object with various properties specific to single-node container-based
	// jobs. If the job definition's type parameter is container, then you must
	// specify either containerProperties or nodeProperties.
	//
	// If the job runs on Fargate resources, then you must not specify nodeProperties;
	// use only containerProperties.
	ContainerProperties *ContainerProperties `json:"containerProperties,omitempty"`
	// An object with various properties specific to multi-node parallel jobs.
	//
	// If the job runs on Fargate resources, then you must not specify nodeProperties;
	// use containerProperties instead.
	NodeProperties *NodeProperties `json:"nodeProperties,omitempty"`
	// Default parameter substitution placeholders to set in the job definition.
	// Parameters are specified as a key-value pair mapping. Parameters in a SubmitJob
	// request override any corresponding parameter defaults from the job definition.
	Parameters map[string]*string `json:"parameters,omitempty"`
	// The platform capabilities required by the job definition. If no value is
	// specified, it defaults to EC2. To run the job on Fargate resources, specify
	// FARGATE.
	PlatformCapabilities []*string `json:"platformCapabilities,omitempty"`
	// Specifies whether to propagate the tags from the job or job definition to
	// the corresponding Amazon ECS task. If no value is specified, the tags are
	// not propagated. Tags can only be propagated to the tasks during task creation.
	// For tags with the same name, job tags are given priority over job definitions
	// tags. If the total number of combined tags from the job and job definition
	// is over 50, the job is moved to the FAILED state.
	PropagateTags *bool `json:"propagateTags,omitempty"`
	// The retry strategy to use for failed jobs that are submitted with this job
	// definition. Any retry strategy that's specified during a SubmitJob operation
	// overrides the retry strategy defined here. If a job is terminated due to
	// a timeout, it isn't retried.
	RetryStrategy *RetryStrategy `json:"retryStrategy,omitempty"`
	// The tags that you apply to the job definition to help you categorize and
	// organize your resources. Each tag consists of a key and an optional value.
	// For more information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/batch/latest/userguide/using-tags.html)
	// in Batch User Guide.
	Tags map[string]*string `json:"tags,omitempty"`
	// The timeout configuration for jobs that are submitted with this job definition,
	// after which Batch terminates your jobs if they have not finished. If a job
	// is terminated due to a timeout, it isn't retried. The minimum value for the
	// timeout is 60 seconds. Any timeout configuration that's specified during
	// a SubmitJob operation overrides the timeout configuration defined here. For
	// more information, see Job Timeouts (https://docs.aws.amazon.com/batch/latest/userguide/job_timeouts.html)
	// in the Batch User Guide.
	Timeout *JobTimeout `json:"timeout,omitempty"`
	// The type of job definition. For more information about multi-node parallel
	// jobs, see Creating a multi-node parallel job definition (https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html)
	// in the Batch User Guide.
	//
	// If the job is run on Fargate resources, then multinode isn't supported.
	//
	// Type is a required field
	// +kubebuilder:validation:Required
	// +kubebuilder:validation:Enum=container;multinode
	JobDefinitionType string `json:"jobDefinitionType"` // renamed from Type bc json:"type_"
}
    JobDefinitionParameters define the desired state of a Batch JobDefinition
func (*JobDefinitionParameters) DeepCopy ¶
func (in *JobDefinitionParameters) DeepCopy() *JobDefinitionParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobDefinitionParameters.
func (*JobDefinitionParameters) DeepCopyInto ¶
func (in *JobDefinitionParameters) DeepCopyInto(out *JobDefinitionParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type JobDefinitionSpec ¶
type JobDefinitionSpec struct {
	xpv1.ResourceSpec `json:",inline"`
	ForProvider       JobDefinitionParameters `json:"forProvider"`
}
    A JobDefinitionSpec defines the desired state of a JobDefinition.
func (*JobDefinitionSpec) DeepCopy ¶
func (in *JobDefinitionSpec) DeepCopy() *JobDefinitionSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobDefinitionSpec.
func (*JobDefinitionSpec) DeepCopyInto ¶
func (in *JobDefinitionSpec) DeepCopyInto(out *JobDefinitionSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type JobDefinitionStatus ¶
type JobDefinitionStatus struct {
	xpv1.ResourceStatus `json:",inline"`
	AtProvider          JobDefinitionObservation `json:"atProvider,omitempty"`
}
    A JobDefinitionStatus represents the observed state of a JobDefinition.
func (*JobDefinitionStatus) DeepCopy ¶
func (in *JobDefinitionStatus) DeepCopy() *JobDefinitionStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobDefinitionStatus.
func (*JobDefinitionStatus) DeepCopyInto ¶
func (in *JobDefinitionStatus) DeepCopyInto(out *JobDefinitionStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type JobDependency ¶
type JobDependency struct {
	// The job ID of the Batch job associated with this dependency.
	//
	// +crossplane:generate:reference:type=github.com/crossplane-contrib/provider-aws/apis/batch/manualv1alpha1.Job
	// +crossplane:generate:reference:refFieldName=JobIDRef
	// +crossplane:generate:reference:selectorFieldName=JobIDSelector
	JobID *string `json:"jobId,omitempty"`
	// JobIDRef is a reference to an JobID.
	// +optional
	JobIDRef *xpv1.Reference `json:"jobIdRef,omitempty"`
	// JobIDSelector selects references to an JobID.
	// +optional
	JobIDSelector *xpv1.Selector `json:"jobIdSelector,omitempty"`
	// The type of the job dependency.
	// +kubebuilder:validation:Enum=N_TO_N;SEQUENTIAL
	Type *string `json:"type,omitempty"`
}
    JobDependency defines an Batch job dependency.
func (*JobDependency) DeepCopy ¶
func (in *JobDependency) DeepCopy() *JobDependency
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobDependency.
func (*JobDependency) DeepCopyInto ¶
func (in *JobDependency) DeepCopyInto(out *JobDependency)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type JobList ¶
type JobList struct {
	metav1.TypeMeta `json:",inline"`
	metav1.ListMeta `json:"metadata,omitempty"`
	Items           []Job `json:"items"`
}
    JobList contains a list of Jobs
func (*JobList) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobList.
func (*JobList) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*JobList) DeepCopyObject ¶
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
type JobObservation ¶
type JobObservation struct {
	// The array properties of the job, if it is an array job.
	ArrayProperties *ArrayPropertiesDetail `json:"arrayProperties,omitempty"`
	// A list of job attempts associated with this job.
	Attempts []*AttemptDetail `json:"attempts,omitempty"`
	// The Unix timestamp (in milliseconds) for when the job was created. For non-array
	// jobs and parent array jobs, this is when the job entered the SUBMITTED state
	// (at the time SubmitJob was called). For array child jobs, this is when the
	// child job was spawned by its parent and entered the PENDING state.
	CreatedAt *int64 `json:"createdAt,omitempty"`
	// The Amazon Resource Name (ARN) of the job.
	JobArn *string `json:"jobArn,omitempty"`
	// The ID for the job.
	JobID *string `json:"jobId,omitempty"`
	// The Unix timestamp (in milliseconds) for when the job was started (when the
	// job transitioned from the STARTING state to the RUNNING state). This parameter
	// isn't provided for child jobs of array jobs or multi-node parallel jobs.
	StartedAt *int64 `json:"startedAt,omitempty"`
	// The current status for the job.
	//
	// If your jobs don't progress to STARTING, see Jobs Stuck in RUNNABLE Status
	// (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#job_stuck_in_runnable)
	// in the troubleshooting section of the Batch User Guide.
	Status *string `json:"status,omitempty"`
	// A short, human-readable string to provide additional details about the current
	// status of the job.
	StatusReason *string `json:"statusReason,omitempty"`
	// The Unix timestamp (in milliseconds) for when the job was stopped (when the
	// job transitioned from the RUNNING state to a terminal state, such as SUCCEEDED
	// or FAILED).
	StoppedAt *int64 `json:"stoppedAt,omitempty"`
}
    JobObservation keeps the state for the external resource
func (*JobObservation) DeepCopy ¶
func (in *JobObservation) DeepCopy() *JobObservation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobObservation.
func (*JobObservation) DeepCopyInto ¶
func (in *JobObservation) DeepCopyInto(out *JobObservation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type JobParameters ¶
type JobParameters struct {
	// Region is which region the Function will be created.
	// +kubebuilder:validation:Required
	Region string `json:"region"`
	// The array properties for the submitted job, such as the size of the array.
	// The array size can be between 2 and 10,000. If you specify array properties
	// for a job, it becomes an array job. For more information, see Array Jobs
	// (https://docs.aws.amazon.com/batch/latest/userguide/array_jobs.html) in the
	// Batch User Guide.
	ArrayProperties *ArrayProperties `json:"arrayProperties,omitempty"`
	// A list of container overrides in the JSON format that specify the name of
	// a container in the specified job definition and the overrides it should receive.
	// You can override the default command for a container, which is specified
	// in the job definition or the Docker image, with a command override. You can
	// also override existing environment variables on a container or add new environment
	// variables to it with an environment override.
	ContainerOverrides *ContainerOverrides `json:"containerOverrides,omitempty"`
	// A list of dependencies for the job. A job can depend upon a maximum of 20
	// jobs. You can specify a SEQUENTIAL type dependency without specifying a job
	// ID for array jobs so that each child array job completes sequentially, starting
	// at index 0. You can also specify an N_TO_N type dependency with a job ID
	// for array jobs. In that case, each index child of this job must wait for
	// the corresponding index child of each dependency to complete before it can
	// begin.
	DependsOn []*JobDependency `json:"dependsOn,omitempty"`
	// The job definition used by this job. This value can be one of name, name:revision,
	// or the Amazon Resource Name (ARN) for the job definition. If name is specified
	// without a revision then the latest active revision is used.
	//
	// JobDefinition is a required field
	// +crossplane:generate:reference:type=github.com/crossplane-contrib/provider-aws/apis/batch/manualv1alpha1.JobDefinition
	// +crossplane:generate:reference:refFieldName=JobDefinitionRef
	// +crossplane:generate:reference:selectorFieldName=JobDefinitionSelector
	JobDefinition string `json:"jobDefinition,omitempty"`
	// JobDefinitionRef is a reference to an JobDefinition.
	// +optional
	JobDefinitionRef *xpv1.Reference `json:"jobDefinitionRef,omitempty"`
	// JobDefinitionSelector selects references to an JobDefinition.
	// +optional
	JobDefinitionSelector *xpv1.Selector `json:"jobDefinitionSelector,omitempty"`
	// The job queue where the job is submitted. You can specify either the name
	// or the Amazon Resource Name (ARN) of the queue.
	//
	// JobQueue is a required field
	// +crossplane:generate:reference:type=github.com/crossplane-contrib/provider-aws/apis/batch/v1alpha1.JobQueue
	// +crossplane:generate:reference:refFieldName=JobQueueRef
	// +crossplane:generate:reference:selectorFieldName=JobQueueSelector
	JobQueue string `json:"jobQueue,omitempty"`
	// JobQueueRef is a reference to an JobQueue.
	// +optional
	JobQueueRef *xpv1.Reference `json:"jobQueueRef,omitempty"`
	// JobQueueSelector selects references to an JobQueue.
	// +optional
	JobQueueSelector *xpv1.Selector `json:"jobQueueSelector,omitempty"`
	// A list of node overrides in JSON format that specify the node range to target
	// and the container overrides for that node range.
	//
	// This parameter isn't applicable to jobs that are running on Fargate resources;
	// use containerOverrides instead.
	NodeOverrides *NodeOverrides `json:"nodeOverrides,omitempty"`
	// Additional parameters passed to the job that replace parameter substitution
	// placeholders that are set in the job definition. Parameters are specified
	// as a key and value pair mapping. Parameters in a SubmitJob request override
	// any corresponding parameter defaults from the job definition.
	Parameters map[string]*string `json:"parameters,omitempty"`
	// Specifies whether to propagate the tags from the job or job definition to
	// the corresponding Amazon ECS task. If no value is specified, the tags aren't
	// propagated. Tags can only be propagated to the tasks during task creation.
	// For tags with the same name, job tags are given priority over job definitions
	// tags. If the total number of combined tags from the job and job definition
	// is over 50, the job is moved to the FAILED state. When specified, this overrides
	// the tag propagation setting in the job definition.
	PropagateTags *bool `json:"propagateTags,omitempty"`
	// The retry strategy to use for failed jobs from this SubmitJob operation.
	// When a retry strategy is specified here, it overrides the retry strategy
	// defined in the job definition.
	RetryStrategy *RetryStrategy `json:"retryStrategy,omitempty"`
	// The tags that you apply to the job request to help you categorize and organize
	// your resources. Each tag consists of a key and an optional value. For more
	// information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html)
	// in Amazon Web Services General Reference.
	Tags map[string]*string `json:"tags,omitempty"`
	// The timeout configuration for this SubmitJob operation. You can specify a
	// timeout duration after which Batch terminates your jobs if they haven't finished.
	// If a job is terminated due to a timeout, it isn't retried. The minimum value
	// for the timeout is 60 seconds. This configuration overrides any timeout configuration
	// specified in the job definition. For array jobs, child jobs have the same
	// timeout configuration as the parent job. For more information, see Job Timeouts
	// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/job_timeouts.html)
	// in the Amazon Elastic Container Service Developer Guide.
	Timeout *JobTimeout `json:"timeout,omitempty"`
}
    JobParameters define the desired state of a Batch Job
func (*JobParameters) DeepCopy ¶
func (in *JobParameters) DeepCopy() *JobParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobParameters.
func (*JobParameters) DeepCopyInto ¶
func (in *JobParameters) DeepCopyInto(out *JobParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type JobSpec ¶
type JobSpec struct {
	xpv1.ResourceSpec `json:",inline"`
	ForProvider       JobParameters `json:"forProvider"`
}
    A JobSpec defines the desired state of a Job.
func (*JobSpec) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSpec.
func (*JobSpec) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type JobStatus ¶
type JobStatus struct {
	xpv1.ResourceStatus `json:",inline"`
	AtProvider          JobObservation `json:"atProvider,omitempty"`
}
    A JobStatus represents the observed state of a Job.
func (*JobStatus) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStatus.
func (*JobStatus) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type JobTimeout ¶
type JobTimeout struct {
	// The time duration in seconds (measured from the job attempt's startedAt timestamp)
	// after which Batch terminates your jobs if they have not finished. The minimum
	// value for the timeout is 60 seconds.
	AttemptDurationSeconds *int64 `json:"attemptDurationSeconds,omitempty"`
}
    JobTimeout defines an object representing a job timeout configuration.
func (*JobTimeout) DeepCopy ¶
func (in *JobTimeout) DeepCopy() *JobTimeout
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTimeout.
func (*JobTimeout) DeepCopyInto ¶
func (in *JobTimeout) DeepCopyInto(out *JobTimeout)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type KeyValuePair ¶
type KeyValuePair struct {
	// The name of the key-value pair. For environment variables, this is the name
	// of the environment variable.
	Name *string `json:"name,omitempty"`
	// The value of the key-value pair. For environment variables, this is the value
	// of the environment variable.
	Value *string `json:"value,omitempty"`
}
    KeyValuePair defines a key-value pair object.
func (*KeyValuePair) DeepCopy ¶
func (in *KeyValuePair) DeepCopy() *KeyValuePair
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyValuePair.
func (*KeyValuePair) DeepCopyInto ¶
func (in *KeyValuePair) DeepCopyInto(out *KeyValuePair)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type LinuxParameters ¶
type LinuxParameters struct {
	// Any host devices to expose to the container. This parameter maps to Devices
	// in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --device option to docker run (https://docs.docker.com/engine/reference/run/).
	//
	// This parameter isn't applicable to jobs that are running on Fargate resources
	// and shouldn't be provided.
	Devices []*Device `json:"devices,omitempty"`
	// If true, run an init process inside the container that forwards signals and
	// reaps processes. This parameter maps to the --init option to docker run (https://docs.docker.com/engine/reference/run/).
	// This parameter requires version 1.25 of the Docker Remote API or greater
	// on your container instance. To check the Docker Remote API version on your
	// container instance, log into your container instance and run the following
	// command: sudo docker version | grep "Server API version"
	InitProcessEnabled *bool `json:"initProcessEnabled,omitempty"`
	// The total amount of swap memory (in MiB) a container can use. This parameter
	// is translated to the --memory-swap option to docker run (https://docs.docker.com/engine/reference/run/)
	// where the value is the sum of the container memory plus the maxSwap value.
	// For more information, see --memory-swap details (https://docs.docker.com/config/containers/resource_constraints/#--memory-swap-details)
	// in the Docker documentation.
	//
	// If a maxSwap value of 0 is specified, the container doesn't use swap. Accepted
	// values are 0 or any positive integer. If the maxSwap parameter is omitted,
	// the container doesn't use the swap configuration for the container instance
	// it is running on. A maxSwap value must be set for the swappiness parameter
	// to be used.
	//
	// This parameter isn't applicable to jobs that are running on Fargate resources
	// and shouldn't be provided.
	MaxSwap *int64 `json:"maxSwap,omitempty"`
	// to the --shm-size option to docker run (https://docs.docker.com/engine/reference/run/).
	//
	// This parameter isn't applicable to jobs that are running on Fargate resources
	// and shouldn't be provided.
	SharedMemorySize *int64 `json:"sharedMemorySize,omitempty"`
	// This allows you to tune a container's memory swappiness behavior. A swappiness
	// value of 0 causes swapping not to happen unless absolutely necessary. A swappiness
	// value of 100 causes pages to be swapped very aggressively. Accepted values
	// are whole numbers between 0 and 100. If the swappiness parameter isn't specified,
	// a default value of 60 is used. If a value isn't specified for maxSwap, then
	// this parameter is ignored. If maxSwap is set to 0, the container doesn't
	// use swap. This parameter maps to the --memory-swappiness option to docker
	// run (https://docs.docker.com/engine/reference/run/).
	//
	// Consider the following when you use a per-container swap configuration.
	//
	//    * Swap space must be enabled and allocated on the container instance for
	//    the containers to use. The Amazon ECS optimized AMIs don't have swap enabled
	//    by default. You must enable swap on the instance to use this feature.
	//    For more information, see Instance Store Swap Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-store-swap-volumes.html)
	//    in the Amazon EC2 User Guide for Linux Instances or How do I allocate
	//    memory to work as swap space in an Amazon EC2 instance by using a swap
	//    file? (http://aws.amazon.com/premiumsupport/knowledge-center/ec2-memory-swap-file/)
	//
	//    * The swap space parameters are only supported for job definitions using
	//    EC2 resources.
	//
	//    * If the maxSwap and swappiness parameters are omitted from a job definition,
	//    each container will have a default swappiness value of 60, and the total
	//    swap usage will be limited to two times the memory reservation of the
	//    container.
	//
	// This parameter isn't applicable to jobs that are running on Fargate resources
	// and shouldn't be provided.
	Swappiness *int64 `json:"swappiness,omitempty"`
	// The container path, mount options, and size (in MiB) of the tmpfs mount.
	// This parameter maps to the --tmpfs option to docker run (https://docs.docker.com/engine/reference/run/).
	//
	// This parameter isn't applicable to jobs that are running on Fargate resources
	// and shouldn't be provided.
	Tmpfs []*Tmpfs `json:"tmpfs,omitempty"`
}
    LinuxParameters define linux-specific modifications that are applied to the container, such as details for device mappings.
func (*LinuxParameters) DeepCopy ¶
func (in *LinuxParameters) DeepCopy() *LinuxParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxParameters.
func (*LinuxParameters) DeepCopyInto ¶
func (in *LinuxParameters) DeepCopyInto(out *LinuxParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type LogConfiguration ¶
type LogConfiguration struct {
	// The log driver to use for the container. The valid values listed for this
	// parameter are log drivers that the Amazon ECS container agent can communicate
	// with by default.
	//
	// The supported log drivers are awslogs, fluentd, gelf, json-file, journald,
	// logentries, syslog, and splunk.
	//
	// Jobs that are running on Fargate resources are restricted to the awslogs
	// and splunk log drivers.
	//
	// awslogs
	//
	// Specifies the Amazon CloudWatch Logs logging driver. For more information,
	// see Using the awslogs Log Driver (https://docs.aws.amazon.com/batch/latest/userguide/using_awslogs.html)
	// in the Batch User Guide and Amazon CloudWatch Logs logging driver (https://docs.docker.com/config/containers/logging/awslogs/)
	// in the Docker documentation.
	//
	// fluentd
	//
	// Specifies the Fluentd logging driver. For more information, including usage
	// and options, see Fluentd logging driver (https://docs.docker.com/config/containers/logging/fluentd/)
	// in the Docker documentation.
	//
	// gelf
	//
	// Specifies the Graylog Extended Format (GELF) logging driver. For more information,
	// including usage and options, see Graylog Extended Format logging driver (https://docs.docker.com/config/containers/logging/gelf/)
	// in the Docker documentation.
	//
	// journald
	//
	// Specifies the journald logging driver. For more information, including usage
	// and options, see Journald logging driver (https://docs.docker.com/config/containers/logging/journald/)
	// in the Docker documentation.
	//
	// json-file
	//
	// Specifies the JSON file logging driver. For more information, including usage
	// and options, see JSON File logging driver (https://docs.docker.com/config/containers/logging/json-file/)
	// in the Docker documentation.
	//
	// splunk
	//
	// Specifies the Splunk logging driver. For more information, including usage
	// and options, see Splunk logging driver (https://docs.docker.com/config/containers/logging/splunk/)
	// in the Docker documentation.
	//
	// syslog
	//
	// Specifies the syslog logging driver. For more information, including usage
	// and options, see Syslog logging driver (https://docs.docker.com/config/containers/logging/syslog/)
	// in the Docker documentation.
	//
	// If you have a custom driver that's not listed earlier that you want to work
	// with the Amazon ECS container agent, you can fork the Amazon ECS container
	// agent project that's available on GitHub (https://github.com/aws/amazon-ecs-agent)
	// and customize it to work with that driver. We encourage you to submit pull
	// requests for changes that you want to have included. However, Amazon Web
	// Services doesn't currently support running modified copies of this software.
	//
	// This parameter requires version 1.18 of the Docker Remote API or greater
	// on your container instance. To check the Docker Remote API version on your
	// container instance, log into your container instance and run the following
	// command: sudo docker version | grep "Server API version"
	//
	// LogDriver is a required field
	// +kubebuilder:validation:Required
	// +kubebuilder:validation:Enum=json-file;syslog;journald;gelf;fluentd;awslogs;splunk
	LogDriver string `json:"logDriver"`
	// The configuration options to send to the log driver. This parameter requires
	// version 1.19 of the Docker Remote API or greater on your container instance.
	// To check the Docker Remote API version on your container instance, log into
	// your container instance and run the following command: sudo docker version
	// | grep "Server API version"
	Options map[string]*string `json:"options,omitempty"`
	// The secrets to pass to the log configuration. For more information, see Specifying
	// Sensitive Data (https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html)
	// in the Batch User Guide.
	SecretOptions []*Secret `json:"secretOptions,omitempty"`
}
    LogConfiguration defines the log configuration options to send to a custom log driver for the container.
func (*LogConfiguration) DeepCopy ¶
func (in *LogConfiguration) DeepCopy() *LogConfiguration
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogConfiguration.
func (*LogConfiguration) DeepCopyInto ¶
func (in *LogConfiguration) DeepCopyInto(out *LogConfiguration)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type MountPoint ¶
type MountPoint struct {
	// The path on the container where the host volume is mounted.
	ContainerPath *string `json:"containerPath,omitempty"`
	// If this value is true, the container has read-only access to the volume.
	// Otherwise, the container can write to the volume. The default value is false.
	ReadOnly *bool `json:"readOnly,omitempty"`
	// The name of the volume to mount.
	SourceVolume *string `json:"sourceVolume,omitempty"`
}
    MountPoint defines the details on a Docker volume mount point that's used in a job's container properties. This parameter maps to Volumes in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.19/#create-a-container) section of the Docker Remote API and the --volume option to docker run.
func (*MountPoint) DeepCopy ¶
func (in *MountPoint) DeepCopy() *MountPoint
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MountPoint.
func (*MountPoint) DeepCopyInto ¶
func (in *MountPoint) DeepCopyInto(out *MountPoint)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type NetworkConfiguration ¶
type NetworkConfiguration struct {
	// Indicates whether the job should have a public IP address. For a job that
	// is running on Fargate resources in a private subnet to send outbound traffic
	// to the internet (for example, to pull container images), the private subnet
	// requires a NAT gateway be attached to route requests to the internet. For
	// more information, see Amazon ECS task networking (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html).
	// The default value is "DISABLED".
	// +kubebuilder:validation:Enum=ENABLED;DISABLED
	AssignPublicIP *string `json:"assignPublicIp,omitempty"`
}
    NetworkConfiguration defines the network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.
func (*NetworkConfiguration) DeepCopy ¶
func (in *NetworkConfiguration) DeepCopy() *NetworkConfiguration
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfiguration.
func (*NetworkConfiguration) DeepCopyInto ¶
func (in *NetworkConfiguration) DeepCopyInto(out *NetworkConfiguration)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type NetworkInterface ¶
type NetworkInterface struct {
	// The attachment ID for the network interface.
	AttachmentID *string `json:"attachmentId,omitempty"`
	// The private IPv6 address for the network interface.
	Ipv6Address *string `json:"ipv6Address,omitempty"`
	// The private IPv4 address for the network interface.
	PrivateIpv4Address *string `json:"privateIpv4Address,omitempty"`
}
    NetworkInterface defines the elastic network interface for a multi-node parallel job node for observation.
func (*NetworkInterface) DeepCopy ¶
func (in *NetworkInterface) DeepCopy() *NetworkInterface
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterface.
func (*NetworkInterface) DeepCopyInto ¶
func (in *NetworkInterface) DeepCopyInto(out *NetworkInterface)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type NodeOverrides ¶
type NodeOverrides struct {
	// The node property overrides for the job.
	NodePropertyOverrides []*NodePropertyOverride `json:"nodePropertyOverrides,omitempty"`
	// The number of nodes to use with a multi-node parallel job. This value overrides
	// the number of nodes that are specified in the job definition. To use this
	// override:
	//
	//    * There must be at least one node range in your job definition that has
	//    an open upper boundary (such as : or n:).
	//
	//    * The lower boundary of the node range specified in the job definition
	//    must be fewer than the number of nodes specified in the override.
	//
	//    * The main node index specified in the job definition must be fewer than
	//    the number of nodes specified in the override.
	NumNodes *int64 `json:"numNodes,omitempty"`
}
    NodeOverrides define any node overrides to a job definition that's used in a SubmitJob API operation.
This isn't applicable to jobs that are running on Fargate resources and shouldn't be provided; use containerOverrides instead.
func (*NodeOverrides) DeepCopy ¶
func (in *NodeOverrides) DeepCopy() *NodeOverrides
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeOverrides.
func (*NodeOverrides) DeepCopyInto ¶
func (in *NodeOverrides) DeepCopyInto(out *NodeOverrides)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type NodeProperties ¶
type NodeProperties struct {
	// Specifies the node index for the main node of a multi-node parallel job.
	// This node index value must be fewer than the number of nodes.
	//
	// MainNode is a required field
	// +kubebuilder:validation:Required
	MainNode int64 `json:"mainNode"`
	// A list of node ranges and their properties associated with a multi-node parallel
	// job.
	//
	// NodeRangeProperties is a required field
	// +kubebuilder:validation:Required
	NodeRangeProperties []NodeRangeProperty `json:"nodeRangeProperties"`
	// The number of nodes associated with a multi-node parallel job.
	//
	// NumNodes is a required field
	// +kubebuilder:validation:Required
	NumNodes int64 `json:"numNodes"`
}
    NodeProperties define the node properties of a multi-node parallel job.
func (*NodeProperties) DeepCopy ¶
func (in *NodeProperties) DeepCopy() *NodeProperties
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProperties.
func (*NodeProperties) DeepCopyInto ¶
func (in *NodeProperties) DeepCopyInto(out *NodeProperties)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type NodePropertyOverride ¶
type NodePropertyOverride struct {
	// The overrides that should be sent to a node range.
	ContainerOverrides *ContainerOverrides `json:"containerOverrides,omitempty"`
	// The range of nodes, using node index values, that's used to override. A range
	// of 0:3 indicates nodes with index values of 0 through 3. If the starting
	// range value is omitted (:n), then 0 is used to start the range. If the ending
	// range value is omitted (n:), then the highest possible node index is used
	// to end the range.
	//
	// TargetNodes is a required field
	// +kubebuilder:validation:Required
	TargetNodes string `json:"targetNodes"`
}
    NodePropertyOverride defines any node overrides to a job definition that's used in a SubmitJob API operation.
func (*NodePropertyOverride) DeepCopy ¶
func (in *NodePropertyOverride) DeepCopy() *NodePropertyOverride
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePropertyOverride.
func (*NodePropertyOverride) DeepCopyInto ¶
func (in *NodePropertyOverride) DeepCopyInto(out *NodePropertyOverride)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type NodeRangeProperty ¶
type NodeRangeProperty struct {
	// The container details for the node range.
	Container *ContainerProperties `json:"container,omitempty"`
	// The range of nodes, using node index values. A range of 0:3 indicates nodes
	// with index values of 0 through 3. If the starting range value is omitted
	// (:n), then 0 is used to start the range. If the ending range value is omitted
	// (n:), then the highest possible node index is used to end the range. Your
	// accumulative node ranges must account for all nodes (0:n). You can nest node
	// ranges, for example 0:10 and 4:5, in which case the 4:5 range properties
	// override the 0:10 properties.
	//
	// TargetNodes is a required field
	// +kubebuilder:validation:Required
	TargetNodes string `json:"targetNodes"`
}
    NodeRangeProperty defines the properties of the node range for a multi-node parallel job.
func (*NodeRangeProperty) DeepCopy ¶
func (in *NodeRangeProperty) DeepCopy() *NodeRangeProperty
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeRangeProperty.
func (*NodeRangeProperty) DeepCopyInto ¶
func (in *NodeRangeProperty) DeepCopyInto(out *NodeRangeProperty)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ResourceRequirement ¶
type ResourceRequirement struct {
	// The type of resource to assign to a container. The supported resources include
	// GPU, MEMORY, and VCPU.
	//
	// Type is a required field
	// +kubebuilder:validation:Required
	// +kubebuilder:validation:Enum=GPU;MEMORY;VCPU
	ResourceType string `json:"resourceType"` // renamed from Type bc json:"type_"
	// The quantity of the specified resource to reserve for the container. The
	// values vary based on the type specified.
	//
	// type="GPU"
	//
	// The number of physical GPUs to reserve for the container. The number of GPUs
	// reserved for all containers in a job shouldn't exceed the number of available
	// GPUs on the compute resource that the job is launched on.
	//
	// GPUs are not available for jobs that are running on Fargate resources.
	//
	// type="MEMORY"
	//
	// The memory hard limit (in MiB) present to the container. This parameter is
	// supported for jobs that are running on EC2 resources. If your container attempts
	// to exceed the memory specified, the container is terminated. This parameter
	// maps to Memory in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --memory option to docker run (https://docs.docker.com/engine/reference/run/).
	// You must specify at least 4 MiB of memory for a job. This is required but
	// can be specified in several places for multi-node parallel (MNP) jobs. It
	// must be specified for each node at least once. This parameter maps to Memory
	// in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --memory option to docker run (https://docs.docker.com/engine/reference/run/).
	//
	// If you're trying to maximize your resource utilization by providing your
	// jobs as much memory as possible for a particular instance type, see Memory
	// Management (https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html)
	// in the Batch User Guide.
	//
	// For jobs that are running on Fargate resources, then value is the hard limit
	// (in MiB), and must match one of the supported values and the VCPU values
	// must be one of the values supported for that memory value.
	//
	// value = 512
	//
	// VCPU = 0.25
	//
	// value = 1024
	//
	// VCPU = 0.25 or 0.5
	//
	// value = 2048
	//
	// VCPU = 0.25, 0.5, or 1
	//
	// value = 3072
	//
	// VCPU = 0.5, or 1
	//
	// value = 4096
	//
	// VCPU = 0.5, 1, or 2
	//
	// value = 5120, 6144, or 7168
	//
	// VCPU = 1 or 2
	//
	// value = 8192
	//
	// VCPU = 1, 2, or 4
	//
	// value = 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384
	//
	// VCPU = 2 or 4
	//
	// value = 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624,
	// 27648, 28672, 29696, or 30720
	//
	// VCPU = 4
	//
	// type="VCPU"
	//
	// The number of vCPUs reserved for the container. This parameter maps to CpuShares
	// in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container)
	// section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/)
	// and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/).
	// Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must
	// specify at least one vCPU. This is required but can be specified in several
	// places; it must be specified for each node at least once.
	//
	// For jobs that are running on Fargate resources, then value must match one
	// of the supported values and the MEMORY values must be one of the values supported
	// for that VCPU value. The supported values are 0.25, 0.5, 1, 2, and 4
	//
	// value = 0.25
	//
	// MEMORY = 512, 1024, or 2048
	//
	// value = 0.5
	//
	// MEMORY = 1024, 2048, 3072, or 4096
	//
	// value = 1
	//
	// MEMORY = 2048, 3072, 4096, 5120, 6144, 7168, or 8192
	//
	// value = 2
	//
	// MEMORY = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312,
	// 14336, 15360, or 16384
	//
	// value = 4
	//
	// MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408,
	// 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672,
	// 29696, or 30720
	//
	// Value is a required field
	// +kubebuilder:validation:Required
	Value string `json:"value"`
}
    ResourceRequirement defines the type and amount of a resource to assign to a container. The supported resources include GPU, MEMORY, and VCPU.
func (*ResourceRequirement) DeepCopy ¶
func (in *ResourceRequirement) DeepCopy() *ResourceRequirement
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirement.
func (*ResourceRequirement) DeepCopyInto ¶
func (in *ResourceRequirement) DeepCopyInto(out *ResourceRequirement)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type RetryStrategy ¶
type RetryStrategy struct {
	// The number of times to move a job to the RUNNABLE status. You can specify
	// between 1 and 10 attempts. If the value of attempts is greater than one,
	// the job is retried on failure the same number of attempts as the value.
	Attempts *int64 `json:"attempts,omitempty"`
	// Array of up to 5 objects that specify conditions under which the job should
	// be retried or failed. If this parameter is specified, then the attempts parameter
	// must also be specified.
	EvaluateOnExit []*EvaluateOnExit `json:"evaluateOnExit,omitempty"`
}
    RetryStrategy defines the retry strategy associated with a job. For more information, see Automated job retries (https://docs.aws.amazon.com/batch/latest/userguide/job_retries.html) in the Batch User Guide.
func (*RetryStrategy) DeepCopy ¶
func (in *RetryStrategy) DeepCopy() *RetryStrategy
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetryStrategy.
func (*RetryStrategy) DeepCopyInto ¶
func (in *RetryStrategy) DeepCopyInto(out *RetryStrategy)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Secret ¶
type Secret struct {
	// The name of the secret.
	//
	// Name is a required field
	// +kubebuilder:validation:Required
	Name string `json:"name"`
	// The secret to expose to the container. The supported values are either the
	// full ARN of the Secrets Manager secret or the full ARN of the parameter in
	// the Amazon Web Services Systems Manager Parameter Store.
	//
	// If the Amazon Web Services Systems Manager Parameter Store parameter exists
	// in the same Region as the job you're launching, then you can use either the
	// full ARN or name of the parameter. If the parameter exists in a different
	// Region, then the full ARN must be specified.
	//
	// ValueFrom is a required field
	// +kubebuilder:validation:Required
	ValueFrom string `json:"valueFrom"`
}
    Secret defines the secret to expose to your container. Secrets can be exposed to a container in the following ways:
- To inject sensitive data into your containers as environment variables, use the secrets container definition parameter. 
- To reference sensitive information in the log configuration of a container, use the secretOptions container definition parameter. 
For more information, see Specifying sensitive data (https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html) in the Batch User Guide.
func (*Secret) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret.
func (*Secret) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Tmpfs ¶
type Tmpfs struct {
	// The absolute file path in the container where the tmpfs volume is mounted.
	//
	// ContainerPath is a required field
	// +kubebuilder:validation:Required
	ContainerPath string `json:"containerPath"`
	// The list of tmpfs volume mount options.
	//
	// Valid values: "defaults" | "ro" | "rw" | "suid" | "nosuid" | "dev" | "nodev"
	// | "exec" | "noexec" | "sync" | "async" | "dirsync" | "remount" | "mand" |
	// "nomand" | "atime" | "noatime" | "diratime" | "nodiratime" | "bind" | "rbind"
	// | "unbindable" | "runbindable" | "private" | "rprivate" | "shared" | "rshared"
	// | "slave" | "rslave" | "relatime" | "norelatime" | "strictatime" | "nostrictatime"
	// | "mode" | "uid" | "gid" | "nr_inodes" | "nr_blocks" | "mpol"
	MountOptions []*string `json:"mountOptions,omitempty"`
	// The size (in MiB) of the tmpfs volume.
	//
	// Size is a required field
	// +kubebuilder:validation:Required
	Size int64 `json:"size"`
}
    Tmpfs defines the container path, mount options, and size of the tmpfs mount.
This object isn't applicable to jobs that are running on Fargate resources.
func (*Tmpfs) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tmpfs.
func (*Tmpfs) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Ulimit ¶
type Ulimit struct {
	// The hard limit for the ulimit type.
	//
	// HardLimit is a required field
	// +kubebuilder:validation:Required
	HardLimit int64 `json:"hardLimit"`
	// The type of the ulimit.
	//
	// Name is a required field
	// +kubebuilder:validation:Required
	Name string `json:"name"`
	// The soft limit for the ulimit type.
	//
	// SoftLimit is a required field
	// +kubebuilder:validation:Required
	SoftLimit int64 `json:"softLimit"`
}
    Ulimit defines the ulimit settings to pass to the container.
This object isn't applicable to jobs that are running on Fargate resources.
func (*Ulimit) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ulimit.
func (*Ulimit) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Volume ¶
type Volume struct {
	// This parameter is specified when you are using an Amazon Elastic File System
	// file system for job storage. Jobs that are running on Fargate resources must
	// specify a platformVersion of at least 1.4.0.
	EfsVolumeConfiguration *EFSVolumeConfiguration `json:"efsVolumeConfiguration,omitempty"`
	// The contents of the host parameter determine whether your data volume persists
	// on the host container instance and where it is stored. If the host parameter
	// is empty, then the Docker daemon assigns a host path for your data volume.
	// However, the data isn't guaranteed to persist after the containers associated
	// with it stop running.
	//
	// This parameter isn't applicable to jobs that are running on Fargate resources
	// and shouldn't be provided.
	Host *Host `json:"host,omitempty"`
	// The name of the volume. Up to 255 letters (uppercase and lowercase), numbers,
	// hyphens, and underscores are allowed. This name is referenced in the sourceVolume
	// parameter of container definition mountPoints.
	Name *string `json:"name,omitempty"`
}
    Volume defines a data volume used in a job's container properties.
func (*Volume) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume.
func (*Volume) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.