example

package
v65.101.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Sep 23, 2025 License: Apache-2.0, UPL-1.0 Imports: 6 Imported by: 0

Documentation

Overview

Example (ChangeCompartment)
// Example code for changing compartment.
// This script provides a basic example of how to move an instance from one compartment to another using Go SDK.
// This script will:
//
//    * Read user configuration
//    * Construct ComputeClient using user configuration
//    * Construct ChangeInstanceCompartmentDetails()
//    * Call ChangeInstanceCompartment() in core.ComputeClient()
//    * List Instance and its attached resources before and after move operation
//
//  This script takes the following values from environment variables
//
//    * INSTANCE_ID    - The instance id of an instance
//    * COMPARTMENT_ID - The target compartment id
//    * IF_MATCH (Optional)
//          The Instance will be moved only if the etag you provide matches the resource's current etag value.
//    * OPC_RETRY_TOKEN (Optional)
//          A token that uniquely identifies a request so it can be retried in case of a timeout or server error
//          without risk of executing that same action again. Retry tokens expire after 24 hours, but can be
//          invalidated before then due to conflicting operations
//
//

package main

import (
	"context"
	"fmt"
	"log"
	"math"
	"os"
	"time"

	"github.com/oracle/oci-go-sdk/v65/common"
	"github.com/oracle/oci-go-sdk/v65/core"
	"github.com/oracle/oci-go-sdk/v65/example/helpers"
	"github.com/oracle/oci-go-sdk/v65/workrequests"
)

var (
	instanceId, targetCompartmentId, ifMatch, opcRetryToken string
	retryPolicy                                             common.RetryPolicy
)

func main() {

	// Parse environment variables to get instanceId, targetCompartmentId, ifMatch and opcRetryToken
	parseEnvironmentVariables()

	// Create ComputeClient with default configuration
	computeClient, err := core.NewComputeClientWithConfigurationProvider(common.DefaultConfigProvider())
	helpers.FatalIfError(err)
	ctx := context.Background()

	// Get Compartment Id of the Instance
	request := core.GetInstanceRequest{
		InstanceId: common.String(instanceId),
	}
	r, err := computeClient.GetInstance(ctx, request)
	helpers.FatalIfError(err)
	availabilityDomain := *r.AvailabilityDomain
	sourceCompartmentID := *r.Instance.CompartmentId
	// Do not attempt compartment move, if the source and target compartment ids are same
	if sourceCompartmentID == targetCompartmentId {
		log.Printf("Source and target compartment ids are same !")
		os.Exit(1)
	}

	log.Printf(" ")
	log.Printf("Instance info before compartment move : ")
	if r.Etag != nil {
		log.Printf("   ETag : %s", *r.Etag)
	}
	printInstanceInfo(sourceCompartmentID, computeClient, availabilityDomain)

	// Create ChangeInstanceCompartmentDetails
	changeInstanceCompartmentDetails := core.ChangeInstanceCompartmentDetails{
		CompartmentId: common.String(targetCompartmentId),
	}

	// Create ChangeInstanceCompartmentRequest
	changeInstanceCompartmentRequest := core.ChangeInstanceCompartmentRequest{
		InstanceId:                       common.String(instanceId),
		ChangeInstanceCompartmentDetails: changeInstanceCompartmentDetails,
	}

	if len(ifMatch) > 0 {
		changeInstanceCompartmentRequest.IfMatch = common.String(ifMatch)
	}

	if len(opcRetryToken) > 0 {
		changeInstanceCompartmentRequest.OpcRetryToken = common.String(opcRetryToken)
	}

	log.Printf(" ")
	log.Printf("Moving Instance to target compartment ...")
	// Perform compartment move operation
	rs, err := computeClient.ChangeInstanceCompartment(ctx, changeInstanceCompartmentRequest)
	helpers.FatalIfError(err)

	//Wait for compartment move operation
	waitUnitlMoveCompletion(rs.OpcWorkRequestId)

	log.Printf(" ")
	log.Printf("Instance info after compartment move : ")
	printInstanceInfo(targetCompartmentId, computeClient, availabilityDomain)

	log.Printf(" ")
	fmt.Println("Change Compartment Completed")
}

func printInstanceInfo(id string, c core.ComputeClient, availabilityDomain string) {
	log.Printf("   Compartment Id : %s", id)
	printVolumeAttachments(c, id)
	printVnicAttachments(c, id)
	printBootVolumecAttachments(c, availabilityDomain, id)
	printConsoleConnections(c, id)
	printConsoleHistories(c, id)

}

func printVolumeAttachments(c core.ComputeClient, compartmentId string) {

	request := core.ListVolumeAttachmentsRequest{
		CompartmentId: common.String(compartmentId),
		InstanceId:    common.String(instanceId),
	}
	lvar, err := c.ListVolumeAttachments(context.Background(), request)
	helpers.FatalIfError(err)
	var volumeAttachments = lvar.Items
	if len(volumeAttachments) > 0 {
		log.Printf("   Volume Attachments:")
		for _, v := range volumeAttachments {
			log.Printf("     Volume id : %s", *v.GetId())
			log.Printf("     Compartment id : %s", *v.GetCompartmentId())
			log.Printf("     State : %s", v.GetLifecycleState())
			log.Printf(" ")
		}
	}

}

func printVnicAttachments(c core.ComputeClient, compartmentId string) {

	request := core.ListVnicAttachmentsRequest{
		CompartmentId: common.String(compartmentId),
		InstanceId:    common.String(instanceId),
	}
	lvar, err := c.ListVnicAttachments(context.Background(), request)
	helpers.FatalIfError(err)
	var vnicAttachments = lvar.Items
	if len(vnicAttachments) > 0 {
		log.Printf("   Vnic Attachments:")
		for _, v := range vnicAttachments {
			log.Printf("     Vnic id : %s", *v.VnicId)
			log.Printf("     Compartment id : %s", *v.CompartmentId)
			log.Printf("     State : %s", v.LifecycleState)
			log.Printf(" ")
		}
	}

}

func printBootVolumecAttachments(c core.ComputeClient, availabilityDomain string, compartmentId string) {

	request := core.ListBootVolumeAttachmentsRequest{
		CompartmentId:      common.String(compartmentId),
		InstanceId:         common.String(instanceId),
		AvailabilityDomain: common.String(availabilityDomain),
	}
	lvar, err := c.ListBootVolumeAttachments(context.Background(), request)
	helpers.FatalIfError(err)
	var bootVolumeAttachments = lvar.Items
	if len(bootVolumeAttachments) > 0 {
		log.Printf("   Boot Volume Attachments:")
		for _, v := range bootVolumeAttachments {
			log.Printf("     Volume id : %s", *v.BootVolumeId)
			log.Printf("     Compartment id : %s", *v.CompartmentId)
			log.Printf("     State : %s", v.LifecycleState)
			log.Printf(" ")
		}
	}

}

func printConsoleConnections(c core.ComputeClient, compartmentId string) {

	request := core.ListInstanceConsoleConnectionsRequest{
		CompartmentId: common.String(compartmentId),
		InstanceId:    common.String(instanceId),
	}
	lvar, err := c.ListInstanceConsoleConnections(context.Background(), request)
	helpers.FatalIfError(err)
	var consoleConnections = lvar.Items
	if len(consoleConnections) > 0 {
		log.Printf("   Console Connections:")
		for _, v := range consoleConnections {
			log.Printf("     Console Connection Id : %s", *v.Id)
			log.Printf("     Compartment Id : %s", *v.CompartmentId)
			log.Printf("     State : %s", v.LifecycleState)
			log.Printf(" ")
		}
	}

}

func printConsoleHistories(c core.ComputeClient, compartmentId string) {

	request := core.ListConsoleHistoriesRequest{
		CompartmentId: common.String(compartmentId),
		InstanceId:    common.String(instanceId),
	}
	lvar, err := c.ListConsoleHistories(context.Background(), request)
	helpers.FatalIfError(err)
	var consoleHistories = lvar.Items
	if len(consoleHistories) > 0 {
		log.Printf("   Console Histories:")
		for _, v := range consoleHistories {
			log.Printf("     Console Connection Id : %s", *v.Id)
			log.Printf("     Compartment Id : %s", *v.CompartmentId)
			log.Printf("     State : %s", v.LifecycleState)
			log.Printf(" ")
		}
	}

}

func waitUnitlMoveCompletion(opcWorkRequestID *string) {
	if opcWorkRequestID != nil {
		log.Printf("   opc-work-request-id : %s", *opcWorkRequestID)
		log.Printf("   Querying the status of move operation using opc-work-request-id ")
		wc, _ := workrequests.NewWorkRequestClientWithConfigurationProvider(common.DefaultConfigProvider())

		retryPolicy = getRetryPolicy()
		// Apply wait until work complete retryPolicy
		workRequest := workrequests.GetWorkRequestRequest{
			WorkRequestId: opcWorkRequestID,
			RequestMetadata: common.RequestMetadata{
				RetryPolicy: &retryPolicy,
			},
		}

		// GetWorkRequest get retried until the work request is in Succeeded status
		wr, err := wc.GetWorkRequest(context.Background(), workRequest)
		helpers.FatalIfError(err)

		if wr.Status != "" {
			log.Printf("   Final Work Status : %s, move operation complete", wr.Status)
		}
	}

}

func getRetryPolicy() common.RetryPolicy {

	// maximum times of retry
	attempts := uint(10)

	nextDuration := func(r common.OCIOperationResponse) time.Duration {
		// you might want wait longer for next retry when your previous one failed
		// this function will return the duration as:
		// 1s, 2s, 4s, 8s, 16s, 32s, 64s etc...
		return time.Duration(math.Pow(float64(2), float64(r.AttemptNumber-1))) * time.Second
	}

	var expectedWorkStatus = workrequests.WorkRequestStatusSucceeded

	// Get shouldRetry function based on GetWorkRequestResponse Status
	shouldRetry := func(r common.OCIOperationResponse) bool {
		if _, isServiceError := common.IsServiceError(r.Error); isServiceError {
			// not service error, could be network error or other errors which prevents
			// request send to server, will do retry here
			return true
		}

		if converted, ok := r.Response.(workrequests.GetWorkRequestResponse); ok {
			log.Printf("     WorkRequest Status : %s", converted.Status)
			// do the retry until WorkReqeut Status is Succeeded  - ignore case (BMI-2652)
			return converted.Status != expectedWorkStatus
		}

		return true
	}

	return common.NewRetryPolicy(attempts, shouldRetry, nextDuration)
}

func usage() {
	log.Printf("Please set the following environment variables to use ChangeInstanceCompartment()")
	log.Printf(" ")
	log.Printf("   INSTANCE_ID       # Required: Instance Id")
	log.Printf("   COMPARTMENT_ID    # Required: Target Compartment Id")
	log.Printf("   IF_MATCH          # Optional: ETag")
	log.Printf("   OPC_RETRY_TOKEN   # Optional: OPC retry token string")
	log.Printf(" ")
	os.Exit(1)
}

func parseEnvironmentVariables() {

	instanceId = os.Getenv("INSTANCE_ID")
	targetCompartmentId = os.Getenv("COMPARTMENT_ID")
	ifMatch = os.Getenv("IF_MATCH")
	opcRetryToken = os.Getenv("OPC_RETRY_TOKEN")

	if instanceId == "" || targetCompartmentId == "" {
		usage()
	}

	log.Printf("INSTANCE_ID     : %s", instanceId)
	log.Printf("COMPARTMENT_ID  : %s", targetCompartmentId)
	if ifMatch != "" {
		log.Printf("IF_MATCH        : %s", ifMatch)
	}
	if opcRetryToken != "" {
		log.Printf("OPC_RETRY_TOKEN : %s", opcRetryToken)
	}
}
Output:

Change Compartment Completed
Example (ChangeNatGatewayCompartment)
// Example code for changing nat gateway.
// This script provides a basic example of how to move a NAT Gateway from one compartment to another using Go SDK.
// This script will:
//
//    * Read user configuration
//    * Construct VirtualNetworkClient using user configuration
//    * Create VCN and NAT Gateway
//    * Construct ChangeNatGatewayCompartmentDetails()
//    * Call ChangeNatGatewayCompartment() in core.VirtualNetworkClient()
//    * List NAT Gateway before and after compartment move operation
//    * Delete VCN and NAT Gateway
//
//  This script takes the following values from environment variables
//
//    * SOURCE_COMPARTMENT_ID - The OCID of the compartment where the NAT gateway and related resources will be created
//    * DESTINATION_COMPARTMENT_ID - The OCID of the compartment where the NAT gateway will be moved to
//
//

package main

import (
	"context"
	"fmt"
	"log"
	"os"

	"github.com/oracle/oci-go-sdk/v65/common"
	"github.com/oracle/oci-go-sdk/v65/core"
	"github.com/oracle/oci-go-sdk/v65/example/helpers"
)

const (
	displayName = "oci-go-sdk-example-ngw"
)

var (
	sourceCompartmentId, destinationCompartmentId string
)

func main() {

	// Parse environment variables to get sourceCompartmentId and destinationCompartmentId
	parseArgs()
	log.Printf("Performing operations to change NAT Gateway compartment from %s to %s", sourceCompartmentId, destinationCompartmentId)

	// Create VirtualNetworkClient with default configuration
	vcnClient, err := core.NewVirtualNetworkClientWithConfigurationProvider(common.DefaultConfigProvider())
	helpers.FatalIfError(err)
	ctx := context.Background()

	// A VCN is required to create a NAT Gateway
	vcn := createVcnforNatGateway(ctx, vcnClient)
	log.Printf("Created VCN: %s", *vcn.Id)
	log.Printf("")

	// Create NAT Gateway
	natGateway := createNatGateway(ctx, vcnClient, vcn)

	// Change NAT Gateway's compartment
	changeNatGatewayCompartment(ctx, vcnClient, natGateway)

	fmt.Printf("Change NAT Gateway Compartment Completed")
	// Clean up resources
	defer func() {
		deleteNatGateway(ctx, vcnClient, natGateway)
		log.Printf("Deleted NAT Gateway")

		deleteVcnforNatGateway(ctx, vcnClient, vcn)
		log.Printf("Deleted VCN")
	}()

}

func createVcnforNatGateway(ctx context.Context, c core.VirtualNetworkClient) core.Vcn {
	// create a new VCN
	request := core.CreateVcnRequest{}
	request.CidrBlock = common.String("10.0.0.0/16")
	request.CompartmentId = common.String(sourceCompartmentId)
	request.DisplayName = common.String(displayName)

	r, err := c.CreateVcn(ctx, request)
	helpers.FatalIfError(err)

	// below logic is to wait until VCN is in Available state
	pollUntilAvailable := func(r common.OCIOperationResponse) bool {
		if converted, ok := r.Response.(core.GetVcnResponse); ok {
			return converted.LifecycleState != core.VcnLifecycleStateAvailable
		}
		return true
	}

	pollGetRequest := core.GetVcnRequest{
		VcnId:           r.Id,
		RequestMetadata: helpers.GetRequestMetadataWithCustomizedRetryPolicy(pollUntilAvailable),
	}

	// wait for VCN to become Available
	rsp, pollErr := c.GetVcn(ctx, pollGetRequest)
	helpers.FatalIfError(pollErr)
	return rsp.Vcn
}

func deleteVcnforNatGateway(ctx context.Context, c core.VirtualNetworkClient, vcn core.Vcn) {
	request := core.DeleteVcnRequest{
		VcnId:           vcn.Id,
		RequestMetadata: helpers.GetRequestMetadataWithDefaultRetryPolicy(),
	}

	_, err := c.DeleteVcn(ctx, request)
	helpers.FatalIfError(err)

	// should retry condition check which returns a bool value indicating whether to do retry or not
	// it checks if the lifecycle state equals Terminated
	shouldRetryFunc := func(r common.OCIOperationResponse) bool {
		if serviceError, ok := common.IsServiceError(r.Error); ok && serviceError.GetHTTPStatusCode() == 404 {
			// resource been deleted, stop retry
			return false
		}

		if converted, ok := r.Response.(core.GetVcnResponse); ok {
			return converted.LifecycleState != core.VcnLifecycleStateTerminated
		}
		return true
	}

	pollGetRequest := core.GetVcnRequest{
		VcnId:           vcn.Id,
		RequestMetadata: helpers.GetRequestMetadataWithCustomizedRetryPolicy(shouldRetryFunc),
	}

	_, pollErr := c.GetVcn(ctx, pollGetRequest)
	if serviceError, ok := common.IsServiceError(pollErr); !ok ||
		(ok && serviceError.GetHTTPStatusCode() != 404) {
		// fail if the error is not service error or
		// if the error is service error and status code not equals to 404
		helpers.FatalIfError(pollErr)
	}
}

func createNatGateway(ctx context.Context, c core.VirtualNetworkClient, vcn core.Vcn) core.NatGateway {

	log.Printf("Creating NAT Gateway")
	log.Printf("=======================================")
	createNatGatewayDetails := core.CreateNatGatewayDetails{
		CompartmentId: common.String(sourceCompartmentId),
		VcnId:         vcn.Id,
		DisplayName:   common.String(displayName),
	}

	request := core.CreateNatGatewayRequest{}
	request.CreateNatGatewayDetails = createNatGatewayDetails

	r, err := c.CreateNatGateway(ctx, request)
	helpers.FatalIfError(err)

	// below logic is to wait until NAT Gateway is in Available state
	pollUntilAvailable := func(r common.OCIOperationResponse) bool {
		if converted, ok := r.Response.(core.GetNatGatewayResponse); ok {
			return converted.LifecycleState != core.NatGatewayLifecycleStateAvailable
		}
		return true
	}

	pollGetRequest := core.GetNatGatewayRequest{
		NatGatewayId:    r.Id,
		RequestMetadata: helpers.GetRequestMetadataWithCustomizedRetryPolicy(pollUntilAvailable),
	}

	// wait for lifecyle become Available
	rsp, pollErr := c.GetNatGateway(ctx, pollGetRequest)
	helpers.FatalIfError(pollErr)

	log.Printf("Created NAT Gateway and waited for it to become available %v\n", rsp.NatGateway)
	log.Printf("")
	log.Printf("")

	return rsp.NatGateway
}

func getNatGateway(ctx context.Context, c core.VirtualNetworkClient, natGateway core.NatGateway) core.NatGateway {
	request := core.GetNatGatewayRequest{
		NatGatewayId: natGateway.Id,
	}
	r, err := c.GetNatGateway(ctx, request)
	helpers.FatalIfError(err)
	return r.NatGateway
}

func deleteNatGateway(ctx context.Context, c core.VirtualNetworkClient, natGateway core.NatGateway) {
	request := core.DeleteNatGatewayRequest{
		NatGatewayId:    natGateway.Id,
		RequestMetadata: helpers.GetRequestMetadataWithDefaultRetryPolicy(),
	}

	_, err := c.DeleteNatGateway(ctx, request)
	helpers.FatalIfError(err)

	// should retry condition check which returns a bool value indicating whether to do retry or not
	// it checks the lifecycle status equals to Terminated or not for this case
	shouldRetryFunc := func(r common.OCIOperationResponse) bool {
		if serviceError, ok := common.IsServiceError(r.Error); ok && serviceError.GetHTTPStatusCode() == 404 {
			// resource been deleted, stop retry
			return false
		}

		if converted, ok := r.Response.(core.GetNatGatewayResponse); ok {
			return converted.LifecycleState != core.NatGatewayLifecycleStateTerminated
		}
		return true
	}

	pollGetRequest := core.GetNatGatewayRequest{
		NatGatewayId:    natGateway.Id,
		RequestMetadata: helpers.GetRequestMetadataWithCustomizedRetryPolicy(shouldRetryFunc),
	}

	_, pollErr := c.GetNatGateway(ctx, pollGetRequest)
	if serviceError, ok := common.IsServiceError(pollErr); !ok ||
		(ok && serviceError.GetHTTPStatusCode() != 404) {
		// fail if the error is not service error or
		// if the error is service error and status code not equals to 404
		helpers.FatalIfError(pollErr)
	}
}

func changeNatGatewayCompartment(ctx context.Context, c core.VirtualNetworkClient, natGateway core.NatGateway) {
	log.Printf("Changing NAT Gateway's compartment")
	log.Printf("=======================================")
	changeNatGatewayCompartmentDetails := core.ChangeNatGatewayCompartmentDetails{
		CompartmentId: common.String(destinationCompartmentId),
	}

	request := core.ChangeNatGatewayCompartmentRequest{}
	request.NatGatewayId = natGateway.Id
	request.ChangeNatGatewayCompartmentDetails = changeNatGatewayCompartmentDetails

	_, err := c.ChangeNatGatewayCompartment(ctx, request)
	helpers.FatalIfError(err)
	updatedNatGateway := getNatGateway(ctx, c, natGateway)
	log.Printf("NAT Gateway's compartment has been changed  : %v\n", updatedNatGateway)
	log.Printf("")
	log.Printf("")
}

func envUsage() {
	log.Printf("Please set the following environment variables to use ChangeInstanceCompartment()")
	log.Printf(" ")
	log.Printf("   SOURCE_COMPARTMENT_ID    # Required: Source Compartment Id")
	log.Printf("   DESTINATION_COMPARTMENT_ID    # Required: Destination Compartment Id")
	log.Printf(" ")
	os.Exit(1)
}

func parseArgs() {

	sourceCompartmentId = os.Getenv("SOURCE_COMPARTMENT_ID")
	destinationCompartmentId = os.Getenv("DESTINATION_COMPARTMENT_ID")

	if sourceCompartmentId == "" || destinationCompartmentId == "" {
		envUsage()
	}

	log.Printf("SOURCE_COMPARTMENT_ID     : %s", sourceCompartmentId)
	log.Printf("DESTINATION_COMPARTMENT_ID  : %s", destinationCompartmentId)

}
Output:

Change NAT Gateway Compartment Completed
Example (ChangeServiceGatewayCompartment)
// Example code for changing service gateway.
// This script provides a basic example of how to move a service gateway from one compartment to another using Go SDK.
// This script will:
//
//    * Read user configuration
//    * Construct VirtualNetworkClient using user configuration
//    * Create VCN and Service Gateway
//    * Call ChangeServiceGatewayCompartment() in core.VirtualNetworkClient()
//    * Get Service Gateway to see the updated compartment ID
//    * Delete Service Gateway and VCN
//    * List Instance and its attached resources before and after move operation
//
//  This script takes the following values from environment variables
//
//    * SRC_COMPARTMENT_ID    - Source Compartment ID where the service gateway and VCN should be created
//    * DEST_COMPARTMENT_ID   - Destination Compartment ID where the service gateway should be moved to
//
// Additionally this script assumes that the Default OCI config is setup

package main

import (
	"context"
	"fmt"
	"log"
	"os"

	"github.com/oracle/oci-go-sdk/v65/common"
	"github.com/oracle/oci-go-sdk/v65/core"
	"github.com/oracle/oci-go-sdk/v65/example/helpers"
)

const (
	serviceGatewayVcnDisplayName = "OCI-GOSDK-Sample"
)

var (
	srcCompartmentId, destCompartmentId string
)

func main() {

	// Parse environment variables to get srcCompartmentId, destCompartmentId
	parseEnvVariables()

	// Create VirtualNetworkClient with default configuration
	client, err := core.NewVirtualNetworkClientWithConfigurationProvider(common.DefaultConfigProvider())
	helpers.FatalIfError(err)
	ctx := context.Background()

	log.Printf("Create Vcn ")
	vcn := createSgwVcn(ctx, client)
	log.Printf("VCN OCID : %s", *vcn.Id)

	log.Printf("Create Service Gateway")
	sgw := createServiceGateway(ctx, client, vcn)
	log.Printf("Service Gateway OCID : %s", *sgw.Id)

	log.Printf("Change Service Gateway Compartment")
	changeServiceGatewayCompartment(ctx, client, sgw)
	updatedsgw := getServiceGateway(ctx, client, sgw)
	log.Printf("Updated Service Gateway Compartment : %s", *updatedsgw.CompartmentId)

	fmt.Printf("change compartment completed")

	// clean up resources
	defer func() {
		log.Printf("Delete Service Gateway")
		deleteServiceGateway(ctx, client, sgw)
		log.Printf("Deleted Service Gateway")

		log.Printf("Delete VCN")
		deleteSgwVcn(ctx, client, vcn)
		log.Printf("Deleted VCN")
	}()

}

func createSgwVcn(ctx context.Context, c core.VirtualNetworkClient) core.Vcn {
	// create a new VCN
	request := core.CreateVcnRequest{}
	request.CidrBlock = common.String("10.0.0.0/16")
	request.CompartmentId = common.String(srcCompartmentId)
	request.DisplayName = common.String(serviceGatewayVcnDisplayName)

	r, err := c.CreateVcn(ctx, request)
	helpers.FatalIfError(err)

	// below logic is to wait until VCN is in Available state
	pollUntilAvailable := func(r common.OCIOperationResponse) bool {
		if converted, ok := r.Response.(core.GetVcnResponse); ok {
			return converted.LifecycleState != core.VcnLifecycleStateAvailable
		}
		return true
	}

	pollGetRequest := core.GetVcnRequest{
		VcnId:           r.Id,
		RequestMetadata: helpers.GetRequestMetadataWithCustomizedRetryPolicy(pollUntilAvailable),
	}

	// wait for lifecyle become Available
	rsp, pollErr := c.GetVcn(ctx, pollGetRequest)
	helpers.FatalIfError(pollErr)
	return rsp.Vcn
}

func deleteSgwVcn(ctx context.Context, c core.VirtualNetworkClient, vcn core.Vcn) {
	request := core.DeleteVcnRequest{
		VcnId:           vcn.Id,
		RequestMetadata: helpers.GetRequestMetadataWithDefaultRetryPolicy(),
	}

	_, err := c.DeleteVcn(ctx, request)
	helpers.FatalIfError(err)

	// should retry condition check which returns a bool value indicating whether to do retry or not
	// it checks the lifecycle status equals to Terminated or not for this case
	shouldRetryFunc := func(r common.OCIOperationResponse) bool {
		if serviceError, ok := common.IsServiceError(r.Error); ok && serviceError.GetHTTPStatusCode() == 404 {
			// resource been deleted, stop retry
			return false
		}

		if converted, ok := r.Response.(core.GetVcnResponse); ok {
			return converted.LifecycleState != core.VcnLifecycleStateTerminated
		}
		return true
	}

	pollGetRequest := core.GetVcnRequest{
		VcnId:           vcn.Id,
		RequestMetadata: helpers.GetRequestMetadataWithCustomizedRetryPolicy(shouldRetryFunc),
	}

	_, pollErr := c.GetVcn(ctx, pollGetRequest)
	if serviceError, ok := common.IsServiceError(pollErr); !ok ||
		(ok && serviceError.GetHTTPStatusCode() != 404) {
		// fail if the error is not service error or
		// if the error is service error and status code not equals to 404
		helpers.FatalIfError(pollErr)
	}
}

func createServiceGateway(ctx context.Context, c core.VirtualNetworkClient, vcn core.Vcn) core.ServiceGateway {

	// Update the services field to required Oracle Services
	var services = []core.ServiceIdRequestDetails{}
	createServiceGatewayDetails := core.CreateServiceGatewayDetails{
		CompartmentId: common.String(srcCompartmentId),
		VcnId:         vcn.Id,
		DisplayName:   common.String(serviceGatewayVcnDisplayName),
		Services:      services,
	}

	// create a new VCN
	request := core.CreateServiceGatewayRequest{}
	request.CreateServiceGatewayDetails = createServiceGatewayDetails

	r, err := c.CreateServiceGateway(ctx, request)
	helpers.FatalIfError(err)

	// below logic is to wait until VCN is in Available state
	pollUntilAvailable := func(r common.OCIOperationResponse) bool {
		if converted, ok := r.Response.(core.GetServiceGatewayResponse); ok {
			return converted.LifecycleState != core.ServiceGatewayLifecycleStateAvailable
		}
		return true
	}

	pollGetRequest := core.GetServiceGatewayRequest{
		ServiceGatewayId: r.Id,
		RequestMetadata:  helpers.GetRequestMetadataWithCustomizedRetryPolicy(pollUntilAvailable),
	}

	// wait for lifecyle become Available
	rsp, pollErr := c.GetServiceGateway(ctx, pollGetRequest)
	helpers.FatalIfError(pollErr)
	return rsp.ServiceGateway
}

func deleteServiceGateway(ctx context.Context, c core.VirtualNetworkClient, serviceGateway core.ServiceGateway) {
	request := core.DeleteServiceGatewayRequest{
		ServiceGatewayId: serviceGateway.Id,
		RequestMetadata:  helpers.GetRequestMetadataWithDefaultRetryPolicy(),
	}

	_, err := c.DeleteServiceGateway(ctx, request)
	helpers.FatalIfError(err)

	// should retry condition check which returns a bool value indicating whether to do retry or not
	// it checks the lifecycle status equals to Terminated or not for this case
	shouldRetryFunc := func(r common.OCIOperationResponse) bool {
		if serviceError, ok := common.IsServiceError(r.Error); ok && serviceError.GetHTTPStatusCode() == 404 {
			// resource been deleted, stop retry
			return false
		}

		if converted, ok := r.Response.(core.GetServiceGatewayResponse); ok {
			return converted.LifecycleState != core.ServiceGatewayLifecycleStateTerminated
		}
		return true
	}

	pollGetRequest := core.GetServiceGatewayRequest{
		ServiceGatewayId: serviceGateway.Id,
		RequestMetadata:  helpers.GetRequestMetadataWithCustomizedRetryPolicy(shouldRetryFunc),
	}

	_, pollErr := c.GetServiceGateway(ctx, pollGetRequest)
	if serviceError, ok := common.IsServiceError(pollErr); !ok ||
		(ok && serviceError.GetHTTPStatusCode() != 404) {
		// fail if the error is not service error or
		// if the error is service error and status code not equals to 404
		helpers.FatalIfError(pollErr)
	}
}

func changeServiceGatewayCompartment(ctx context.Context, c core.VirtualNetworkClient, serviceGateway core.ServiceGateway) {
	changeCompartmentDetails := core.ChangeServiceGatewayCompartmentDetails{
		CompartmentId: common.String(destCompartmentId),
	}

	request := core.ChangeServiceGatewayCompartmentRequest{}
	request.ServiceGatewayId = serviceGateway.Id
	request.ChangeServiceGatewayCompartmentDetails = changeCompartmentDetails

	_, err := c.ChangeServiceGatewayCompartment(ctx, request)
	helpers.FatalIfError(err)

}

func getServiceGateway(ctx context.Context, c core.VirtualNetworkClient, serviceGateway core.ServiceGateway) core.ServiceGateway {
	request := core.GetServiceGatewayRequest{
		ServiceGatewayId: serviceGateway.Id,
	}

	r, err := c.GetServiceGateway(ctx, request)
	helpers.FatalIfError(err)

	return r.ServiceGateway
}

func printUsage() {
	fmt.Printf("Please set the following environment variables to use ChangeServiceGatewayCompartment()")
	fmt.Printf(" ")
	fmt.Printf("   SRC_COMPARTMENT_ID       # Required: Source Compartment Id")
	fmt.Printf("   DEST_COMPARTMENT_ID	    # Required: Destination Compartment Id")
	fmt.Printf(" ")
	os.Exit(1)
}

func parseEnvVariables() {

	srcCompartmentId = os.Getenv("SRC_COMPARTMENT_ID")
	destCompartmentId = os.Getenv("DEST_COMPARTMENT_ID")

	if srcCompartmentId == "" || destCompartmentId == "" {
		printUsage()
	}

	log.Printf("SRC_COMPARTMENT_ID     : %s", srcCompartmentId)
	log.Printf("DEST_COMPARTMENT_ID  : %s", destCompartmentId)
}
Output:

change compartment completed
Example (ClusterCRUD)

Example for how to do CRUD on cluster, how to get kubernets config and how to work with WorkRequest

ctx := context.Background()
c, clerr := containerengine.NewContainerEngineClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

// create network resources for cluster.
// this sample is to demonstrate how to use cluster APIs
// for more configuration setup, please refer to the link here:
// https://docs.oracle.com/iaas/Content/ContEng/Concepts/contengnetworkconfig.htm
vcnID, subnet1ID, _ := createVCNWithSubnets(ctx)

defaulKubetVersion := getDefaultKubernetesVersion(c)
createClusterResp := createCluster(ctx, c, vcnID, defaulKubetVersion, subnet1ID)

// wait until work request complete
workReqResp := waitUntilWorkRequestComplete(c, createClusterResp.OpcWorkRequestId)
fmt.Println("cluster created")

// update cluster with a new name and upgrade the kubernets version
updateReq := containerengine.UpdateClusterRequest{}

// please see the document here for actionType values:
// https://docs.oracle.com/iaas/api/#/en/containerengine/20180222/datatypes/WorkRequestResource
clusterID := getResourceID(workReqResp.Resources, containerengine.WorkRequestResourceActionTypeCreated, "CLUSTER")
updateReq.ClusterId = clusterID
defer deleteCluster(ctx, c, clusterID)
updateReq.Name = common.String("GOSDK_Sample_New_CE")

getReq := containerengine.GetClusterRequest{
	ClusterId: updateReq.ClusterId,
}

getResp, err := c.GetCluster(ctx, getReq)
// check for upgrade versions
if len(getResp.Cluster.AvailableKubernetesUpgrades) > 0 {
	// if newer version available, set it for upgrade
	updateReq.KubernetesVersion = common.String(getResp.Cluster.AvailableKubernetesUpgrades[0])
}

updateResp, err := c.UpdateCluster(ctx, updateReq)
helpers.FatalIfError(err)
fmt.Println("updating cluster")

// wait until update complete
workReqResp = waitUntilWorkRequestComplete(c, updateResp.OpcWorkRequestId)
fmt.Println("cluster updated")

// get cluster
getResp, err = c.GetCluster(ctx, getReq)
helpers.FatalIfError(err)

fmt.Printf("cluster name updated to %s\n", *getResp.Name)
Output:

create VCN complete
create subnet1 complete
create subnet2 complete
creating cluster
cluster created
updating cluster
cluster updated
cluster name updated to GOSDK_Sample_New_CE
deleting cluster
Example (ConfigureCircuitBreaker)

Example shows how to configure circuit breaker

// If need to disable all service default circuit breaker, there are two ways: set circuit breaker environment variable to false
// or use global variable.
// common.GlobalCircuitBreakerSetting = common.NoCircuitBreaker()

identityClient, err := identity.NewIdentityClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)
// Add one more status code 404(compare with default circuit breaker setting) as the failure request in circuit breaker.
successStatCodeMap := map[int]bool{
	429: false,
	404: false,
	500: false,
	502: false,
	503: false,
	504: false,
}
// Configure CircuitBreaker
cbst := common.NewCircuitBreakerSettingWithOptions(
	common.WithName("myCircuitBreaker"),
	common.WithIsEnabled(true),
	common.WithMinimumRequests(5),
	common.WithCloseStateWindow(60*time.Second),
	common.WithFailureRateThreshold(0.70),
	common.WithSuccessStatCodeMap(successStatCodeMap),
	common.WithServiceName("Identity"))

// if prefer to use default circuit breaker, no need to define successStatCodeMap and cb, but directly call:
// cbst := common.DefaultCircuitBreakerSetting()

identityClient.BaseClient.Configuration.CircuitBreaker = common.NewCircuitBreaker(cbst)

// The OCID of the tenancy containing the compartment.
tenancyID, err := common.DefaultConfigProvider().TenancyOCID()
helpers.FatalIfError(err)

// make the tenancyOCID incorrect on purpose - testing
fakeTenancyID := tenancyID[1:len(tenancyID)-2] + "mm"

request := identity.ListAvailabilityDomainsRequest{
	CompartmentId: &fakeTenancyID,
}

for i := 0; i < 5; i++ {
	identityClient.ListAvailabilityDomains(context.Background(), request)
	fmt.Println(i*10, "seconds CircuitBreaker state: "+identityClient.Configuration.CircuitBreaker.Cb.State().String())
	time.Sleep(10 * time.Second)
}
time.Sleep(5 * time.Second)
fmt.Println("After 55s, CircuitBreaker current state: " + identityClient.Configuration.CircuitBreaker.Cb.State().String())

fmt.Println("Wait 30 sec...")
time.Sleep(30 * time.Second)
fmt.Println("Make a good API call")

request = identity.ListAvailabilityDomainsRequest{
	CompartmentId: &tenancyID,
}
identityClient.ListAvailabilityDomains(context.Background(), request)
time.Sleep(10 * time.Second)
fmt.Println("check current CircuitBreaker state: " + identityClient.Configuration.CircuitBreaker.Cb.State().String())
Output:

0 seconds CircuitBreaker state: closed
10 seconds CircuitBreaker state: closed
20 seconds CircuitBreaker state: closed
30 seconds CircuitBreaker state: closed
40 seconds CircuitBreaker state: open
After 55s, CircuitBreaker current state: open
Wait 30 sec...
Make a good API call
check current CircuitBreaker state: closed
Example (CopyVolumeBackup)

Copies a volume backup to another region. Polls the copied volume backup in the destination region until it's lifecycle is Available.

sourceBackupId := "REPLACE_WITH_VOLUME_BACKUP_OCID"
destinationRegion := "REPLACE_WITH_DESTINATION_REGION_NAME"
// displayName can be empty, in which case the copied backup will have the same display name as the original backup
displayName := ""
// kmsKey is optional too. If not specified, the copied backup is going to be encrypted with oracle provided
// encryption keys.
kmsKeyId := ""

//Creating the copyVolumeBackupRequest
request := core.CopyVolumeBackupRequest{
	CopyVolumeBackupDetails: core.CopyVolumeBackupDetails{
		DestinationRegion: common.String(destinationRegion),
	},
	VolumeBackupId: common.String(sourceBackupId),
}
if len(displayName) > 0 {
	request.CopyVolumeBackupDetails.DisplayName = common.String(displayName)
}

if len(kmsKeyId) > 0 {
	request.CopyVolumeBackupDetails.KmsKeyId = common.String(kmsKeyId)
}

// Creating a Blockstorage client in the source region to initiate the copy.
bs, err := core.NewBlockstorageClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)
fmt.Println("Copying backup.")
copyResponse, err := bs.CopyVolumeBackup(context.Background(), request)
volumeBackup := copyResponse.VolumeBackup
helpers.FatalIfError(err)
fmt.Println("Copy backup request accepted waiting for the backup to be in Available state.")
backupState := volumeBackup.LifecycleState

// Creating a Blockstorage client in the destination region and
// poll on the copied volume backup's lifecycle state.
destinationBS, err := core.NewBlockstorageClientWithConfigurationProvider(common.DefaultConfigProvider())
destinationBS.SetRegion(destinationRegion)
helpers.FatalIfError(err)
for backupState != core.VolumeBackupLifecycleStateAvailable {
	time.Sleep(15 * time.Second)
	getVolumeBackupRequest := core.GetVolumeBackupRequest{
		VolumeBackupId: volumeBackup.Id,
	}

	getVolumeBackupResponse, err := destinationBS.GetVolumeBackup(context.Background(), getVolumeBackupRequest)
	helpers.FatalIfError(err)
	backupState = getVolumeBackupResponse.VolumeBackup.LifecycleState
}
// VolumeBackup's lifecycle state reached Available.
fmt.Println("Backup copy succeeded.")
Output:

Copying backup.
Copy backup request accepted waiting for the backup to be in Available state.
Backup copy succeeded.
Example (CreateADBsConnectionWithPrivateEndpoint)

Example Use Case: Existing ADB-S with Private Endpoint

This example creates a Database Tools Connection to a Autonomous Database (ADB-S) accessible by private endpoint (PE). Note, since this connection will be against a PE, a Database Tools Private Endpoint Reverse Connection is required. This example serves as an academic exercise of the SDK.

Prerequisites:

  • An existing ADB-S with PE and network security group (i.e. ingress on 1522)
  • Available capacity (limits apply) to create a new Private Endpoint
  • An existing Vault for storage of secrets
  • A previously configured .oci/config file with a [DEFAULT] section
  • The following environment variables set:
  • OCI_DBS_OCID : The ocid for an ADB-s database
  • OCI_VAULT_OCID : The ocid for a vault (to store secrets)
  • OCI_DB_USER : The Oracle database user to connect with
  • OCI_DB_PASS : The Oracle database password to connect with

High-level Steps:

1- Locate the Autonomous Database (ADB-S) by the provided OCID
2- Locate the Vault by the provided OCID
3- Download the wallet for the ADB-S
4- Store the secrets in the Vault (as base64 encoded strings)
5- Create a Database Tools Private Endpoint for a Reverse Connection to the Private Endpoint of the ADB-S
6- Create a Database Tools connection
7- Validate the connection

... cleanup when done (delete the temporary secrets, connection, and PE)

                     Client
                       |
                       |
+----------------------+----------+
|                      V          |
|              +----------------+ |
|              | Database Tools | |
|              |    Service     | |
|              +----------------+ |
|                      |          |
| Database             |          |
| Tools                |          |
| VCN                  |          |
+----------------------+----------+
                       |
                       |
+----------------------+----------+
|                      |          |
|                      V          |
|                +-----------+    |
|                | Database  |    |
|                |  Tools    |    |
|                | Private   |    |
|                | Endpoint  |    |
|                |  Reverse  |    |
|                | Connection|    |
|                +-----------+    |
|                      |          |
|                      V          |
|                +-----------+    |
|                |   ADB-S   |    |
|                |  Private  |    |
|                |  Endpoint |    |
|                +-----------+    |
|                      |          |
| Customer             |          |
| VCN                  |          |
+----------------------+----------+
                       |
                       |
+----------------------+----------+
|                      |          |
|                      V          |
|                  ---------      |
|                 /  ABD-S  \     |
|                 | Private |     |
|                 \ Endpoint/     |
|                  ---------      |
|                                 |
| ADB                             |
| Shared                          |
| VCN                             |
+---------------------------------+
// Parses environment variables, .oci/config, and sets up the SDK clients
cfg := newConfig()

// Ignoring errors for simplicity
privateEndpointId, _ := createDbToolsPrivateEndpoint(cfg)
walletSecretId, _ := createSecretInVault(cfg.WalletBase64, cfg)
passwdSecretId, _ := createSecretInVault(cfg.Password, cfg)
dbConnectionId, _ := createDatabaseToolsConnectionADBs(walletSecretId, passwdSecretId, privateEndpointId, cfg)

if ok := validateDatabaseToolsConnectionOracle(dbConnectionId, cfg); ok {
	log.Println("connection is valid")
}

// ... cleanup resources when finished, comment out the following delete
// calls to keep the resources created above.
if err := deleteConnection(dbConnectionId, cfg); err != nil {
	log.Printf("error deleting connection: %v\n", err)
}
if err := deleteSecret(passwdSecretId, cfg); err != nil {
	log.Printf("error deleting secret: %v\n", err)
}
if err := deleteSecret(walletSecretId, cfg); err != nil {
	log.Printf("error deleting secret: %v\n", err)
}
if err := deletePrivateEndpoint(privateEndpointId, cfg); err != nil {
	log.Printf("error deleting private endpoint: %v\n", err)
}

fmt.Println("Example_createADBsConnectionWithPrivateEndpoint complete")
Output:

Example_createADBsConnectionWithPrivateEndpoint complete
Example (CreateADBsConnectionWithPublicIp)

Example Use Case: Existing ADB-S with public IP (no ACL)

This example creates a Database Tools Connection to an Autonomous Database (ADB) on Shared Exadata Infrastructure, accessible by public IP. Note, since this connection will be against a public IP address, a Database Tools Private Endpoint Reverse Connection is not required.

Prerequisites:

  • An existing ADB-S
  • An existing Vault for storage of secrets
  • A previously configured .oci/config file with a [DEFAULT] section
  • The following environment variables set:
  • OCI_DBS_OCID : The ocid for an ADB-s database
  • OCI_VAULT_OCID : The ocid for a vault (to store secrets)
  • OCI_DB_USER : The Oracle database user to connect with
  • OCI_DB_PASS : The Oracle database password to connect with

High-level Steps:

1- Locate the Autonomous Database (ADB-S) by the provided OCID
2- Locate the Vault by the provided OCID
3- Download the wallet for the ADB-S
4- Store the secrets in the Vault (as base64 encoded strings)
5- Create a Database Tools connection
6- Validate the connection

... cleanup when done (delete the temporary secrets and connection)

                     Client
                       |
                       |
+----------------------+----------+
|                      V          |
|              +----------------+ |
|              | Database Tools | |
|              |    Service     | |
|              +----------------+ |
|                      |          |
| Database             |          |
| Tools                |          |
| VCN                  |          |
+----------------------+----------+
                       |
                       |
+--------------+       |
| Customer     |       |
| VCN          |       |
+--------------+       |
                       |
                       |
+----------------------+----------+
|                      |          |
|                      V          |
|                  ---------      |
|                 /  ABD-S  \     |
|                 \Public IP/     |
|                  ---------      |
|                                 |
| ADB                             |
| Shared                          |
| VCN                             |
+---------------------------------+
// Parses environment variables, .oci/config, and sets up the SDK clients
cfg := newConfig()

// Ignoring errors for simplicity
walletSecretId, _ := createSecretInVault(cfg.WalletBase64, cfg)
passwdSecretId, _ := createSecretInVault(cfg.Password, cfg)
dbConnectionId, _ := createDatabaseToolsConnectionADBsPublicIp(walletSecretId, passwdSecretId, cfg)

if ok := validateDatabaseToolsConnectionOracle(dbConnectionId, cfg); ok {
	log.Println("connection is valid")
}

// ... cleanup resources when finished, comment out the following delete
// calls to keep the resources created above.
if err := deleteConnection(dbConnectionId, cfg); err != nil {
	log.Printf("error deleting connection: %v\n", err)
}
if err := deleteSecret(passwdSecretId, cfg); err != nil {
	log.Printf("error deleting secret: %v\n", err)
}
if err := deleteSecret(walletSecretId, cfg); err != nil {
	log.Printf("error deleting secret: %v\n", err)
}

fmt.Println("Example_createADBsConnectionWithPublicIp complete")
Output:

Example_createADBsConnectionWithPublicIp complete
Example (CreateAdb)
c, clerr := database.NewDatabaseClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

createDbDetails := database.CreateAutonomousDatabaseDetails{
	CompartmentId:        helpers.CompartmentID(),
	DbName:               common.String("gosdkdb"),
	CpuCoreCount:         common.Int(1),
	DataStorageSizeInTBs: common.Int(1),
	AdminPassword:        common.String("DBaaS12345_#"),
	IsAutoScalingEnabled: common.Bool(true),
}

createadbReq := database.CreateAutonomousDatabaseRequest{
	CreateAutonomousDatabaseDetails: createDbDetails,
}

_, err := c.CreateAutonomousDatabase(context.Background(), createadbReq)
helpers.FatalIfError(err)

fmt.Println("create adb successful")
Output:

create adb successful
Example (CreateAdbPreview)
c, clerr := database.NewDatabaseClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

createDbDetails := database.CreateAutonomousDatabaseDetails{
	CompartmentId:                            helpers.CompartmentID(),
	DbName:                                   common.String("gosdkPreview"),
	CpuCoreCount:                             common.Int(1),
	DataStorageSizeInTBs:                     common.Int(1),
	AdminPassword:                            common.String("DBaaS12345_#"),
	IsAutoScalingEnabled:                     common.Bool(false),
	IsPreviewVersionWithServiceTermsAccepted: common.Bool(true),
}

createadbReq := database.CreateAutonomousDatabaseRequest{
	CreateAutonomousDatabaseDetails: createDbDetails,
}

_, err := c.CreateAutonomousDatabase(context.Background(), createadbReq)
helpers.FatalIfError(err)

fmt.Println("create adb Preview successful")
Output:

create adb successful
Example (CreateAndUseInstanceConfiguration)

Example to showcase instance configuration create and operations, and eventual teardown

// Example code for Compute Management Services API
// This class provides an example of how you can create and use an Instance Configuration. It will:
//
//   * Create the InstanceConfiguration from input details
//   * Launching an instance with instance configuration
//   * Creating an instance configuration from a running instance
//   * Clean everything up

package main

import (
	"context"
	"fmt"
	"log"
	"os"

	"github.com/oracle/oci-go-sdk/v65/common"
	"github.com/oracle/oci-go-sdk/v65/core"
	"github.com/oracle/oci-go-sdk/v65/example/helpers"
)

// Example to showcase instance configuration create and operations, and eventual teardown
func main() {
	InstanceConfigsParseEnvironmentVariables()

	ctx := context.Background()

	computeMgmtClient, err := core.NewComputeManagementClientWithConfigurationProvider(common.DefaultConfigProvider())
	helpers.FatalIfError(err)

	computeClient, err := core.NewComputeClientWithConfigurationProvider(common.DefaultConfigProvider())
	helpers.FatalIfError(err)

	// Create instance configuration
	createInstanceConfigurationResponse, _ := createInstanceConfiguration(ctx, computeMgmtClient, imageId, compartmentId)
	fmt.Println("Instance configuration created")

	instanceConfiguration := createInstanceConfigurationResponse.InstanceConfiguration

	launchInstanceConfigResponse, _ := launchInstanceConfiguration(ctx, computeMgmtClient, *instanceConfiguration.Id, subnetId, ad)

	instance := launchInstanceConfigResponse.Instance
	pollUntilDesiredInstanceState(ctx, computeClient, *instance.Id)
	fmt.Println("Instance launched")

	createInstanceConfigFromInstanceResponse, _ := createInstanceConfigFromInstance(ctx, computeMgmtClient, *instance.Id, compartmentId)
	fmt.Println("Instance configuration created from instance")

	instanceConfigFromInstance := createInstanceConfigFromInstanceResponse.InstanceConfiguration

	// clean up resources
	defer func() {
		_, _ = deleteInstanceConfiguration(ctx, computeMgmtClient, *instanceConfiguration.Id)
		fmt.Println("Deleted instance configuration")

		_, _ = deleteInstanceConfiguration(ctx, computeMgmtClient, *instanceConfigFromInstance.Id)
		fmt.Println("Deleted instance configuration created from instance")

		terminateInstance(ctx, computeClient, instance.Id)
	}()

}

func createInstanceConfigFromInstance(ctx context.Context, client core.ComputeManagementClient, instanceId string,
	compartmentId string) (response core.CreateInstanceConfigurationResponse, err error) {

	displayName := "Instance Config From Instance Example"
	configurationDetails := core.CreateInstanceConfigurationFromInstanceDetails{
		DisplayName:   &displayName,
		CompartmentId: &compartmentId,
		InstanceId:    &instanceId,
	}

	req := core.CreateInstanceConfigurationRequest{
		CreateInstanceConfiguration: configurationDetails,
	}

	response, err = client.CreateInstanceConfiguration(ctx, req)
	helpers.FatalIfError(err)

	return
}

func launchInstanceConfiguration(ctx context.Context, client core.ComputeManagementClient, instanceConfigurationId string,
	subnetId string, availabilityDomain string) (response core.LaunchInstanceConfigurationResponse, err error) {

	req := core.LaunchInstanceConfigurationRequest{
		InstanceConfigurationId: &instanceConfigurationId,
		InstanceConfiguration: core.ComputeInstanceDetails{
			LaunchDetails: &core.InstanceConfigurationLaunchInstanceDetails{
				AvailabilityDomain: &availabilityDomain,
				CreateVnicDetails: &core.InstanceConfigurationCreateVnicDetails{
					SubnetId: &subnetId,
				},
			},
		},
	}

	response, err = client.LaunchInstanceConfiguration(ctx, req)
	helpers.FatalIfError(err)

	return
}

func pollUntilDesiredInstanceState(ctx context.Context, client core.ComputeClient, instanceId string) {
	// should retry condition check which returns a bool value indicating whether to do retry or not
	// it checks the lifecycle status equals to Running or not for this case
	shouldRetryFunc := func(r common.OCIOperationResponse) bool {
		if converted, ok := r.Response.(core.GetInstanceResponse); ok {
			return converted.LifecycleState != core.InstanceLifecycleStateRunning
		}
		return true
	}

	// create get instance request with a retry policy which takes a function
	// to determine shouldRetry or not
	pollingGetRequest := core.GetInstanceRequest{
		InstanceId:      &instanceId,
		RequestMetadata: helpers.GetRequestMetadataWithCustomizedRetryPolicy(shouldRetryFunc),
	}

	_, pollError := client.GetInstance(ctx, pollingGetRequest)
	helpers.FatalIfError(pollError)
}

// Usage printing
func InstanceConfigsUsage() {
	log.Printf("Please set the following environment variables to run Instance Configuration sample")
	log.Printf(" ")
	log.Printf("   IMAGE_ID       # Required: Image Id to use")
	log.Printf("   COMPARTMENT_ID    # Required: Compartment Id to use")
	log.Printf("   AD          # Required: AD to use")
	log.Printf("   SUBNET_ID   # Required: Subnet to use")
	log.Printf(" ")
	os.Exit(1)
}

// Args parser
func InstanceConfigsParseEnvironmentVariables() {

	imageId = os.Getenv("IMAGE_ID")
	compartmentId = os.Getenv("COMPARTMENT_ID")
	ad = os.Getenv("AD")
	subnetId = os.Getenv("SUBNET_ID")

	if imageId == "" ||
		compartmentId == "" ||
		ad == "" ||
		subnetId == "" {
		InstanceConfigsUsage()
	}

	log.Printf("IMAGE_ID     : %s", imageId)
	log.Printf("COMPARTMENT_ID  : %s", compartmentId)
	log.Printf("AD     : %s", ad)
	log.Printf("SUBNET_ID  : %s", subnetId)
}
Output:

Instance configuration created
Instance launched
Instance configuration created from instance
Deleted instance configuration
Deleted instance configuration created from instance
terminating instance
instance terminated
Example (CreateAndUseSecurityTokenBasedConfiguration)
// Example code for using security token based auth

package main

import (
	"context"
	"fmt"
	"log"
	"os"
	"os/user"
	"path/filepath"

	"github.com/oracle/oci-go-sdk/v65/common"
	"github.com/oracle/oci-go-sdk/v65/example/helpers"
	"github.com/oracle/oci-go-sdk/v65/identity"
)

// This test requires that you have a specified profile setup for security-token based authentication, for the detail
// how to set up the configuration, please refer https://docs.oracle.com/iaas/en-us/iaas/Content/API/SDKDocs/clitoken.htm?Highlight=security_token_file
// In this example the [security_token_based_auth] is created and lists Lists the Availability Domains in current tenancy.
// The token will expire in 1 hour, user needs to check if the token is valid and then decide refresh steps via OCI CLI command.

const (
	profileName = "security_token_based_auth"
	cfgDirName  = ".oci"
	cfgFileName = "config"
)

func main() {
	homeFolder := getHomeFolder()
	configFilePath := filepath.Join(homeFolder, cfgDirName, cfgFileName)
	securityTokenBasedAuthConfigProvider := common.CustomProfileConfigProvider(configFilePath, profileName)
	c, err := identity.NewIdentityClientWithConfigurationProvider(securityTokenBasedAuthConfigProvider)
	helpers.FatalIfError(err)

	// The OCID of the tenancy containing the compartment.
	tenancyID, err := securityTokenBasedAuthConfigProvider.TenancyOCID()
	helpers.FatalIfError(err)

	request := identity.ListAvailabilityDomainsRequest{
		CompartmentId: &tenancyID,
	}

	r, err := c.ListAvailabilityDomains(context.Background(), request)
	helpers.FatalIfError(err)

	log.Printf("list of available domains: %v", r.Items)
	fmt.Println("list available domains completed")

}

func getHomeFolder() string {
	current, e := user.Current()
	if e != nil {
		//Give up and try to return something sensible
		home := os.Getenv("HOME")
		if home == "" {
			home = os.Getenv("USERPROFILE")
		}
		return home
	}
	return current.HomeDir
}
Output:

list available domains completed
Example (CreateAndWaitForRunningClusterNetwork)

Example to showcase cluster network creation, and eventual teardown

// Example code for Compute Management Services API
// This class provides an example of how you can create and manage a Cluster Network. It will:
//
//   * Create the InstanceConfiguration with an HPC shape
//   * Create a cluster network of size 1 based off of that configuration
//   * Wait for the cluster network to go to Running state
//   * Clean everything up

package main

import (
	"context"
	"fmt"
	"log"
	"os"

	"github.com/oracle/oci-go-sdk/v65/common"
	"github.com/oracle/oci-go-sdk/v65/core"
	"github.com/oracle/oci-go-sdk/v65/example/helpers"
)

var (
	clusterNetwork string
)

// Example to showcase cluster network creation, and eventual teardown
func main() {
	ClusterNetworkParseEnvironmentVariables()

	ctx := context.Background()

	computeMgmtClient, err := core.NewComputeManagementClientWithConfigurationProvider(common.DefaultConfigProvider())
	helpers.FatalIfError(err)

	createInstanceConfigurationResponse, _ := createInstanceConfigurationWithHpcShape(ctx, computeMgmtClient, imageId, compartmentId)
	fmt.Println("Instance configuration created")

	instanceConfiguration := createInstanceConfigurationResponse.InstanceConfiguration

	clusterNetwork, _ := createClusterNetwork(ctx, computeMgmtClient, *instanceConfiguration.Id, subnetId, ad, compartmentId)
	fmt.Println("Cluster Network created")

	// waiting until the cluster network reaches running state
	pollUntilClusterNetworkInDesiredState(ctx, computeMgmtClient, clusterNetwork, core.ClusterNetworkLifecycleStateRunning)

	// clean up resources
	defer func() {
		terminateClusterNetwork(ctx, computeMgmtClient, *clusterNetwork.Id)
		fmt.Println("Terminated Cluster Network")

		deleteInstanceConfiguration(ctx, computeMgmtClient, *instanceConfiguration.Id)
		fmt.Println("Deleted Instance Configuration")
	}()

}

// Usage printing
func ClusterNetworkUsage() {
	log.Printf("Please set the following environment variables to run Cluster Network sample")
	log.Printf(" ")
	log.Printf("   IMAGE_ID       # Required: Image Id to use")
	log.Printf("   COMPARTMENT_ID    # Required: Compartment Id to use")
	log.Printf("   AD          # Required: AD to use")
	log.Printf("   SUBNET_ID   # Required: Subnet to use")
	log.Printf(" ")
	os.Exit(1)
}

// Args parser
func ClusterNetworkParseEnvironmentVariables() {

	imageId = os.Getenv("IMAGE_ID")
	compartmentId = os.Getenv("COMPARTMENT_ID")
	ad = os.Getenv("AD")
	subnetId = os.Getenv("SUBNET_ID")

	if imageId == "" ||
		compartmentId == "" ||
		ad == "" ||
		subnetId == "" {
		ClusterNetworkUsage()
	}

	log.Printf("IMAGE_ID     : %s", imageId)
	log.Printf("COMPARTMENT_ID  : %s", compartmentId)
	log.Printf("AD     : %s", ad)
	log.Printf("SUBNET_ID  : %s", subnetId)
}

// helper method to create a cluster network
func createClusterNetwork(ctx context.Context, client core.ComputeManagementClient, instanceConfigurationId string,
	subnetId string, availabilityDomain string, compartmentId string) (response core.CreateClusterNetworkResponse, err error) {

	displayName := "Cluster Network Example"
	size := 1

	placementConfigurationDetails := core.ClusterNetworkPlacementConfigurationDetails{
		AvailabilityDomain: &availabilityDomain,
		PrimarySubnetId:    &subnetId,
	}

	req := core.CreateClusterNetworkRequest{
		CreateClusterNetworkDetails: core.CreateClusterNetworkDetails{
			CompartmentId:          &compartmentId,
			DisplayName:            &displayName,
			PlacementConfiguration: &placementConfigurationDetails,
			InstancePools: []core.CreateClusterNetworkInstancePoolDetails{
				{
					Size:                    &size,
					InstanceConfigurationId: &instanceConfigurationId,
				},
			},
		},
	}

	response, err = client.CreateClusterNetwork(ctx, req)
	return
}

// helper method to create an instance configuration
func createInstanceConfigurationWithHpcShape(ctx context.Context, client core.ComputeManagementClient, imageId string, compartmentId string) (response core.CreateInstanceConfigurationResponse, err error) {
	vnicDetails := core.InstanceConfigurationCreateVnicDetails{}

	sourceDetails := core.InstanceConfigurationInstanceSourceViaImageDetails{
		ImageId: &imageId,
	}

	displayName := "Instance Configuration HPC Example"
	shape := "BM.HPC2.36"

	launchDetails := core.InstanceConfigurationLaunchInstanceDetails{
		CompartmentId:     &compartmentId,
		DisplayName:       &displayName,
		CreateVnicDetails: &vnicDetails,
		Shape:             &shape,
		SourceDetails:     &sourceDetails,
	}

	instanceDetails := core.ComputeInstanceDetails{
		LaunchDetails: &launchDetails,
	}

	configurationDetails := core.CreateInstanceConfigurationDetails{
		DisplayName:     &displayName,
		CompartmentId:   &compartmentId,
		InstanceDetails: &instanceDetails,
	}

	req := core.CreateInstanceConfigurationRequest{
		CreateInstanceConfiguration: configurationDetails,
	}

	response, err = client.CreateInstanceConfiguration(ctx, req)
	helpers.FatalIfError(err)

	return
}

// helper method to terminate a cluster network
func terminateClusterNetwork(ctx context.Context, client core.ComputeManagementClient,
	clusterNetworkId string) (response core.TerminateClusterNetworkResponse, err error) {

	req := core.TerminateClusterNetworkRequest{
		ClusterNetworkId: &clusterNetworkId,
	}

	response, err = client.TerminateClusterNetwork(ctx, req)
	helpers.FatalIfError(err)

	return
}

// helper method to pool until an cluster network reaches the specified desired state
func pollUntilClusterNetworkInDesiredState(ctx context.Context, computeMgmtClient core.ComputeManagementClient,
	clusterNetwork core.CreateClusterNetworkResponse, desiredState core.ClusterNetworkLifecycleStateEnum) {
	// should retry condition check which returns a bool value indicating whether to do retry or not
	// it checks the lifecycle status equals to Running or not for this case
	shouldRetryFunc := func(r common.OCIOperationResponse) bool {
		if converted, ok := r.Response.(core.GetClusterNetworkResponse); ok {
			return converted.LifecycleState != desiredState
		}
		return true
	}
	// create get cluster network request with a retry policy which takes a function
	// to determine shouldRetry or not
	pollingGetRequest := core.GetClusterNetworkRequest{
		ClusterNetworkId: clusterNetwork.Id,
		RequestMetadata:  helpers.GetRequestMetadataWithCustomizedRetryPolicy(shouldRetryFunc),
	}
	_, pollError := computeMgmtClient.GetClusterNetwork(ctx, pollingGetRequest)
	helpers.FatalIfError(pollError)
	fmt.Println("Cluster Network is", desiredState)
}
Output:

Instance configuration created
Cluster Network created
Cluster Network is RUNNING
Terminated Cluster Network
Deleted Instance Configuration
Example (CreateAndWaitForRunningInstancePool)

Example to showcase instance pool create and operations, and eventual teardown

// Example code for Compute Management Services API
// This class provides an example of how you can create and manage an Instance Pool. It will:
//
//   * Create the InstanceConfiguration
//   * Create a pool of size 1 based off of that configuration
//   * Wait for the pool to go to Running state
//   * Update the pool to a size of 2
//   * Wait for the InstancePool to scale up
//   * Attached a load balancer to the pool
//   * Wait for the load balancer to become ATTACHED
//   * Clean everything up

package main

import (
	"context"
	"fmt"
	"log"
	"os"

	"github.com/oracle/oci-go-sdk/v65/common"
	"github.com/oracle/oci-go-sdk/v65/core"
	"github.com/oracle/oci-go-sdk/v65/example/helpers"
)

var (
	imageId, ad, subnetId, loadBalancerId, loadBalancerBackendSetName, compartmentId string
)

// Example to showcase instance pool create and operations, and eventual teardown
func main() {
	InstancePoolsParseEnvironmentVariables()

	ctx := context.Background()

	computeMgmtClient, err := core.NewComputeManagementClientWithConfigurationProvider(common.DefaultConfigProvider())
	helpers.FatalIfError(err)

	createInstanceConfigurationResponse, _ := createInstanceConfiguration(ctx, computeMgmtClient, imageId, compartmentId)
	fmt.Println("Instance configuration created")

	instanceConfiguration := createInstanceConfigurationResponse.InstanceConfiguration

	instancePool, _ := createInstancePool(ctx, computeMgmtClient, *instanceConfiguration.Id, subnetId, ad, compartmentId)
	fmt.Println("Instance pool created")

	// waiting until the poll reaches running state
	pollUntilDesiredState(ctx, computeMgmtClient, instancePool, core.InstancePoolLifecycleStateRunning)

	setInstancePoolSize(ctx, computeMgmtClient, *instancePool.Id, 2)

	// updating the pool size will make it go to scaling state first and then to running
	pollUntilDesiredState(ctx, computeMgmtClient, instancePool, core.InstancePoolLifecycleStateScaling)
	pollUntilDesiredState(ctx, computeMgmtClient, instancePool, core.InstancePoolLifecycleStateRunning)

	// attach load balancer to the created pool
	attachLBtoInstancePool(ctx, computeMgmtClient, *instancePool.Id, loadBalancerId, loadBalancerBackendSetName)

	// poll for instance pool until the lb becomes attached
	pollUntilDesiredLBAttachmentState(ctx, computeMgmtClient, instancePool, core.InstancePoolLoadBalancerAttachmentLifecycleStateAttached)

	// gets the targeted load balancer information
	getLbAttachmentForPool(ctx, computeMgmtClient, *instancePool.Id)

	// clean up resources
	defer func() {
		terminateInstancePool(ctx, computeMgmtClient, *instancePool.Id)
		fmt.Println("Terminated Instance Pool")

		deleteInstanceConfiguration(ctx, computeMgmtClient, *instanceConfiguration.Id)
		fmt.Println("Deleted Instance Configuration")
	}()

}

// Usage printing
func InstancePoolsUsage() {
	log.Printf("Please set the following environment variables to run Instance Pool sample")
	log.Printf(" ")
	log.Printf("   IMAGE_ID       # Required: Image Id to use")
	log.Printf("   COMPARTMENT_ID    # Required: Compartment Id to use")
	log.Printf("   AD          # Required: AD to use")
	log.Printf("   SUBNET_ID   # Required: Subnet to use")
	log.Printf("   LB_ID   # Required: Load balancer to use")
	log.Printf("   LB_BACKEND_SET_NAME   # Required: Load balancer backend set name to use")
	log.Printf(" ")
	os.Exit(1)
}

// Args parser
func InstancePoolsParseEnvironmentVariables() {

	imageId = os.Getenv("IMAGE_ID")
	compartmentId = os.Getenv("COMPARTMENT_ID")
	ad = os.Getenv("AD")
	subnetId = os.Getenv("SUBNET_ID")
	loadBalancerId = os.Getenv("LB_ID")
	loadBalancerBackendSetName = os.Getenv("LB_BACKEND_SET_NAME")

	if imageId == "" ||
		compartmentId == "" ||
		ad == "" ||
		subnetId == "" ||
		loadBalancerId == "" ||
		loadBalancerBackendSetName == "" {
		InstancePoolsUsage()
	}

	log.Printf("IMAGE_ID     : %s", imageId)
	log.Printf("COMPARTMENT_ID  : %s", compartmentId)
	log.Printf("AD     : %s", ad)
	log.Printf("SUBNET_ID  : %s", subnetId)
	log.Printf("LB_ID  : %s", loadBalancerId)
	log.Printf("LB_BACKEND_SET_NAME  : %s", loadBalancerBackendSetName)
}

// helper method to create an instance configuration
func createInstanceConfiguration(ctx context.Context, client core.ComputeManagementClient, imageId string, compartmentId string) (response core.CreateInstanceConfigurationResponse, err error) {
	vnicDetails := core.InstanceConfigurationCreateVnicDetails{}

	sourceDetails := core.InstanceConfigurationInstanceSourceViaImageDetails{
		ImageId: &imageId,
	}

	displayName := "Instance Configuration Example"
	shape := "VM.Standard2.1"

	launchDetails := core.InstanceConfigurationLaunchInstanceDetails{
		CompartmentId:     &compartmentId,
		DisplayName:       &displayName,
		CreateVnicDetails: &vnicDetails,
		Shape:             &shape,
		SourceDetails:     &sourceDetails,
	}

	instanceDetails := core.ComputeInstanceDetails{
		LaunchDetails: &launchDetails,
	}

	configurationDetails := core.CreateInstanceConfigurationDetails{
		DisplayName:     &displayName,
		CompartmentId:   &compartmentId,
		InstanceDetails: &instanceDetails,
	}

	req := core.CreateInstanceConfigurationRequest{
		CreateInstanceConfiguration: configurationDetails,
	}

	response, err = client.CreateInstanceConfiguration(ctx, req)
	helpers.FatalIfError(err)

	return
}

// helper method to create an instance pool
func createInstancePool(ctx context.Context, client core.ComputeManagementClient, instanceConfigurationId string,
	subnetId string, availabilityDomain string, compartmentId string) (response core.CreateInstancePoolResponse, err error) {

	displayName := "Instance Pool Example"
	size := 1

	req := core.CreateInstancePoolRequest{
		CreateInstancePoolDetails: core.CreateInstancePoolDetails{
			CompartmentId:           &compartmentId,
			InstanceConfigurationId: &instanceConfigurationId,
			PlacementConfigurations: []core.CreateInstancePoolPlacementConfigurationDetails{
				{
					PrimarySubnetId:    &subnetId,
					AvailabilityDomain: &availabilityDomain,
				},
			},
			Size:        &size,
			DisplayName: &displayName,
		},
	}

	response, err = client.CreateInstancePool(ctx, req)
	return
}

// helper method to terminate an instance configuration
func terminateInstancePool(ctx context.Context, client core.ComputeManagementClient,
	poolId string) (response core.TerminateInstancePoolResponse, err error) {

	req := core.TerminateInstancePoolRequest{
		InstancePoolId: &poolId,
	}

	response, err = client.TerminateInstancePool(ctx, req)
	helpers.FatalIfError(err)

	return
}

// helper method to delete an instance configuration
func deleteInstanceConfiguration(ctx context.Context, client core.ComputeManagementClient,
	instanceConfigurationId string) (response core.DeleteInstanceConfigurationResponse, err error) {

	req := core.DeleteInstanceConfigurationRequest{
		InstanceConfigurationId: &instanceConfigurationId,
	}

	response, err = client.DeleteInstanceConfiguration(ctx, req)
	helpers.FatalIfError(err)

	return
}

// helper method to update an instance pool size
func setInstancePoolSize(ctx context.Context, client core.ComputeManagementClient,
	poolId string, newSize int) (response core.UpdateInstancePoolResponse, err error) {

	updateDetails := core.UpdateInstancePoolDetails{
		Size: &newSize,
	}

	req := core.UpdateInstancePoolRequest{
		InstancePoolId:            &poolId,
		UpdateInstancePoolDetails: updateDetails,
	}

	response, err = client.UpdateInstancePool(ctx, req)
	helpers.FatalIfError(err)

	return
}

// helper method to update an instance pool size
func attachLBtoInstancePool(ctx context.Context, client core.ComputeManagementClient,
	poolId string, loadBalancerId string, lbBackendSetName string) (response core.AttachLoadBalancerResponse, err error) {

	port := 80
	vnic := "PrimaryVnic"

	attachDetails := core.AttachLoadBalancerDetails{
		LoadBalancerId: &loadBalancerId,
		BackendSetName: &loadBalancerBackendSetName,
		Port:           &port,
		VnicSelection:  &vnic,
	}

	req := core.AttachLoadBalancerRequest{
		InstancePoolId:            &poolId,
		AttachLoadBalancerDetails: attachDetails,
	}

	response, err = client.AttachLoadBalancer(ctx, req)
	helpers.FatalIfError(err)

	return
}

// helper method to pool until an instance pool reaches the specified desired state
func pollUntilDesiredState(ctx context.Context, computeMgmtClient core.ComputeManagementClient,
	instancePool core.CreateInstancePoolResponse, desiredState core.InstancePoolLifecycleStateEnum) {
	// should retry condition check which returns a bool value indicating whether to do retry or not
	// it checks the lifecycle status equals to Running or not for this case
	shouldRetryFunc := func(r common.OCIOperationResponse) bool {
		if converted, ok := r.Response.(core.GetInstancePoolResponse); ok {
			return converted.LifecycleState != desiredState
		}
		return true
	}
	// create get instance pool request with a retry policy which takes a function
	// to determine shouldRetry or not
	pollingGetRequest := core.GetInstancePoolRequest{
		InstancePoolId:  instancePool.Id,
		RequestMetadata: helpers.GetRequestMetadataWithCustomizedRetryPolicy(shouldRetryFunc),
	}
	_, pollError := computeMgmtClient.GetInstancePool(ctx, pollingGetRequest)
	helpers.FatalIfError(pollError)
	fmt.Println("Instance pool is", desiredState)
}

// helper method to pool until an instance pool lb attachment reaches the specified desired state
func pollUntilDesiredLBAttachmentState(ctx context.Context, computeMgmtClient core.ComputeManagementClient,
	instancePool core.CreateInstancePoolResponse, desiredState core.InstancePoolLoadBalancerAttachmentLifecycleStateEnum) {
	// should retry condition check which returns a bool value indicating whether to do retry or not
	// it checks the lifecycle status equals to Running or not for this case
	shouldRetryFunc := func(r common.OCIOperationResponse) bool {
		if converted, ok := r.Response.(core.GetInstancePoolResponse); ok {
			attachments := converted.LoadBalancers

			for i := range attachments {
				if attachments[i].LifecycleState != desiredState {
					return true
				}
			}

			return false
		}
		return true
	}
	// create get instance pool request with a retry policy which takes a function
	// to determine shouldRetry or not
	pollingGetRequest := core.GetInstancePoolRequest{
		InstancePoolId:  instancePool.Id,
		RequestMetadata: helpers.GetRequestMetadataWithCustomizedRetryPolicy(shouldRetryFunc),
	}
	_, pollError := computeMgmtClient.GetInstancePool(ctx, pollingGetRequest)
	helpers.FatalIfError(pollError)
	fmt.Println("Instance pool attachment is", desiredState)
}

// function showing how to get lb attachment info for a pool
func getLbAttachmentForPool(ctx context.Context, computeMgmtClient core.ComputeManagementClient,
	instancePoolId string) {

	// gets the fresh instance pool info which, after lb attaching, now has lb attachment information
	getReq := core.GetInstancePoolRequest{
		InstancePoolId: &instancePoolId,
	}

	instancePoolResp, _ := computeMgmtClient.GetInstancePool(ctx, getReq)

	// takes the 1st load balancer attachment id from the pool
	lbAttachmentId := instancePoolResp.LoadBalancers[0].Id

	req := core.GetInstancePoolLoadBalancerAttachmentRequest{
		InstancePoolId:                       &instancePoolId,
		InstancePoolLoadBalancerAttachmentId: lbAttachmentId,
	}

	response, _ := computeMgmtClient.GetInstancePoolLoadBalancerAttachment(ctx, req)
	fmt.Println("Instance pool attachment has vnic", *response.VnicSelection)
}
Output:

Instance configuration created
Instance pool created
Instance pool is RUNNING
Instance pool is SCALING
Instance pool is RUNNING
Instance pool attachment is ATTACHED
Instance pool attachment has vnic PrimaryVnic
Terminated Instance Pool
Deleted Instance Configuration
Example (CreateDataset)

Example_createDataset to create a dataset in a given compartment Id.

compartment := "REPLACE_WITH_COMPARTMENT_OCID"
annotationFormat := "REPLACE_WITH_ANNOTATION_FORMAT"
datasetFormat := "REPLACE_WITH_DATASET_FORMAT"
namespace := "REPLACE_WITH_OBJECT_STORAGE_NAMESPACE"
bucketName := "REPLACE_WITH_OBJECT_STORAGE_BUCKET_NAME"
displayName := "REPLACE_WITH_DATASET_DISPLAY_NAME"
labelString := "REPLACE_WITH_LABEL_NAME"
description := "REPLACE_WITH_DATASET_DESCRIPTION"

client, err := datalabelingservice.NewDataLabelingManagementClientWithConfigurationProvider(common.DefaultConfigProvider())
if err != nil {
	panic(err)
}

fmt.Println("Creating dataset")

var addLabels []datalabelingservice.Label

addLabels = append(addLabels, datalabelingservice.Label{Name: common.String(labelString)})

// createDatasetRequest for the CreateDataset operation
createDatasetRequest := datalabelingservice.CreateDatasetRequest{
	CreateDatasetDetails: datalabelingservice.CreateDatasetDetails{
		CompartmentId:                        common.String(compartment),
		AnnotationFormat:                     common.String(annotationFormat),
		DatasetSourceDetails:                 map[string]interface{}{"sourceType": "OBJECT_STORAGE", "namespace": namespace, "bucket": bucketName},
		DatasetFormatDetails:                 map[string]interface{}{"formatType": datasetFormat},
		LabelSet:                             &datalabelingservice.LabelSet{Items: addLabels},
		DisplayName:                          common.String(displayName),
		Description:                          common.String(description),
		InitialRecordGenerationConfiguration: nil,
		FreeformTags:                         nil,
	},
}

// Send the request using the service client
_, datasetErr := client.CreateDataset(context.Background(), createDatasetRequest)
helpers.FatalIfError(datasetErr)

fmt.Println("Dataset creation completed")
fmt.Println("Done")
Example (CreateDbHomeBackupDestination)
c, clerr := database.NewDatabaseClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

dbName := strings.ToLower(helpers.GetRandomString(8))
dbUniqueName := dbName + "_" + strings.ToLower(helpers.GetRandomString(20))
dbVersion := "18.0.0.0"
adminPassword := "DBaaS12345_#"
displayName := helpers.GetRandomString(32)

backupDestination := database.BackupDestinationDetails{
	Type: database.BackupDestinationDetailsTypeEnum("NFS"),
	Id:   common.String("backup-destination-ocid"),
}

dbBackupConfig := database.DbBackupConfig{
	BackupDestinationDetails: []database.BackupDestinationDetails{backupDestination},
}

// create database details
createDatabaseDetails := database.CreateDatabaseDetails{
	AdminPassword:  &adminPassword,
	DbName:         &dbName,
	DbUniqueName:   &dbUniqueName,
	DbBackupConfig: &dbBackupConfig,
}

// create dbhome details
createDbHomeDetails := database.CreateDbHomeWithVmClusterIdDetails{
	DisplayName: &displayName,
	Database:    &createDatabaseDetails,
	VmClusterId: common.String("vm-cluster-ocid"),
	DbVersion:   &dbVersion,
}

// create dbome request
request := database.CreateDbHomeRequest{CreateDbHomeWithDbSystemIdDetails: createDbHomeDetails}

_, createErrors := c.CreateDbHome(context.Background(), request)

helpers.FatalIfError(createErrors)

fmt.Printf("Create DB Home with backupDestination completed")
Output:

Create DB Home with backupDestination completed
Example (CreateFreeAdb)
c, clerr := database.NewDatabaseClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

createDbDetails := database.CreateAutonomousDatabaseDetails{
	CompartmentId:        helpers.CompartmentID(),
	DbName:               common.String("freeadb"),
	CpuCoreCount:         common.Int(1),
	DataStorageSizeInTBs: common.Int(1),
	AdminPassword:        common.String("DBaaS12345_#"),
	IsFreeTier:           common.Bool(true),
}

createadbReq := database.CreateAutonomousDatabaseRequest{
	CreateAutonomousDatabaseDetails: createDbDetails,
}

_, err := c.CreateAutonomousDatabase(context.Background(), createadbReq)
helpers.FatalIfError(err)

fmt.Println("create free adb successful")
Output:

create free adb successful
Example (CreateGenericJdbcConnection)

Example use case: Create a Generic JDBC connection

This example creates a Generic JDBC Database Tools Connection

Prequisites:

  • An existing Vault for storage of secrets
  • A previously configured .oci/config file with a [DEFAULT] section
  • The following environment variable set:
  • OCI_VAULT_OCID : The ocid for a vault (to store secrets)

High-level Steps:

1- Store the secret in the Vault (as base64 encoded strings)
2- Create the connection
3- Cleanup
cfg := newConfig()

// Create the secret and validate that it was properly created
passwordSecretId, _ := createSecretInVault(common.String("DatabasePassword"), cfg)
if passwordSecretId == nil || cfg.getVaultCompartmentId() == nil {
	log.Printf("There was an error. Either the password couldn't be created or there was no defined compartment id")
	return
}

// Create a new Generic JDBC connection
connectionCreationDetails := databasetools.CreateDatabaseToolsConnectionGenericJdbcDetails{
	UserName:       common.String("test-user"),
	UserPassword:   databasetools.DatabaseToolsUserPasswordSecretIdDetails{SecretId: passwordSecretId},
	CompartmentId:  cfg.getVaultCompartmentId(),
	Url:            common.String("jdbc:mysql://localhost:3306"),
	DisplayName:    common.String("dbtools-temp-connection-" + helpers.GetRandomString(10)),
	RuntimeSupport: databasetools.RuntimeSupportUnsupported,
}
connectionId, err := createDatabaseToolsConnection(connectionCreationDetails, cfg)
if err != nil {
	log.Printf("error creating the connection: %v\n", err)
}

// Cleanup
if err := deleteSecret(passwordSecretId, cfg); err != nil {
	log.Printf("error deleting secret: %v\n", err)
}
if err := deleteConnection(connectionId, cfg); err != nil {
	log.Printf("error deleting connection: %v\n", err)
}

fmt.Println("Example_createGenericJdbcConnection complete")
Output:

Example_createGenericJdbcConnection complete
Example (CreateImageDetails_Polymorphic)

Example_createImageDetails_Polymorphic creates a boot disk image for the specified instance or imports an exported image from the Oracle Cloud Infrastructure Object Storage service.

request := core.CreateImageRequest{}
request.CompartmentId = helpers.CompartmentID()

// you can import an image based on the Object Storage URL 'core.ImageSourceViaObjectStorageUriDetails'
// or based on the namespace, bucket name and object name 'core.ImageSourceViaObjectStorageTupleDetails'
// following example shows how to import image from object storage uri, you can use another one:
// request.ImageSourceDetails = core.ImageSourceViaObjectStorageTupleDetails
sourceDetails := core.ImageSourceViaObjectStorageUriDetails{}
sourceDetails.SourceUri = common.String(objectStorageURIWtihImage)

request.ImageSourceDetails = sourceDetails

c, err := core.NewComputeClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)

_, err = c.CreateImage(context.Background(), request)
helpers.FatalIfError(err)
fmt.Println("image created")
Example (CreateLoadbalancer)
// Example code for Load Balancing Service API
// This example creates a new loadbalancer with SSL cipher suites. After that it creates a new listener with the SSL configuration and updates the backend set with it. Finally it updates the loadbalancer cipher suites.

package main

import (
	"context"
	"fmt"
	"math"
	"time"

	"github.com/oracle/oci-go-sdk/v65/common"
	"github.com/oracle/oci-go-sdk/v65/core"
	"github.com/oracle/oci-go-sdk/v65/example/helpers"
	"github.com/oracle/oci-go-sdk/v65/identity"
	"github.com/oracle/oci-go-sdk/v65/loadbalancer"
)

const (
	loadbalancerDisplayName = "OCI-GO-Sample-LB"
	nsgDisplayNameOne       = "OCI-GOSDK-Sample-NSG-1"
	nsgDisplayNameTwo       = "OCI-GOSDK-Sample-NSG-2"
	listenerDisplayName     = "GO_SDK_Listener"
	rulesetOneName          = "ruleset1"
	backendSetOneName       = "backendset1"
	cipherName              = "test-cipher"
	certificateName         = "example-certificate"
	publicCert              = `-----BEGIN CERTIFICATE-----
    publicKeyGoesHere
    -----END CERTIFICATE-----`

	privateKey = `-----BEGIN RSA PRIVATE KEY-----
    PrivateKeyGoesHere
    -----END RSA PRIVATE KEY-----`
)

func main() {
	c, clerr := loadbalancer.NewLoadBalancerClientWithConfigurationProvider(common.DefaultConfigProvider())
	ctx := context.Background()
	helpers.FatalIfError(clerr)
	request := loadbalancer.CreateLoadBalancerRequest{}
	request.CompartmentId = helpers.CompartmentID()
	request.DisplayName = common.String(loadbalancerDisplayName)

	subnet1 := CreateOrGetSubnet()
	fmt.Println("create subnet1 complete")

	// create a subnet in different availability domain
	identityClient, err := identity.NewIdentityClientWithConfigurationProvider(common.DefaultConfigProvider())
	helpers.FatalIfError(err)
	req := identity.ListAvailabilityDomainsRequest{}
	req.CompartmentId = helpers.CompartmentID()
	response, err := identityClient.ListAvailabilityDomains(ctx, req)
	helpers.FatalIfError(err)
	availableDomain := response.Items[1].Name

	subnet2 := CreateOrGetSubnetWithDetails(common.String(subnetDisplayName2), common.String("10.0.1.0/24"), common.String("subnetdns2"), availableDomain)
	fmt.Println("create subnet2 complete")

	request.SubnetIds = []string{*subnet1.Id, *subnet2.Id}

	shapes := listLoadBalancerShapes(ctx, c)
	fmt.Println("list load balancer shapes complete")

	request.ShapeName = shapes[0].Name

	// Create rulesets to modify response / request headers or control access types based on REST request
	ruleSets := map[string]loadbalancer.RuleSetDetails{
		rulesetOneName: {
			Items: []loadbalancer.Rule{
				loadbalancer.AddHttpRequestHeaderRule{
					Header: common.String("some-header-name-to-add"),
					Value:  common.String("some-value-for-header"),
				},
				loadbalancer.RemoveHttpResponseHeaderRule{
					Header: common.String("some-header-name-to-remove"),
				},
				loadbalancer.ExtendHttpRequestHeaderValueRule{
					Header: common.String("some-other-header-name-to-alter"),
					Prefix: common.String("some-prefix-value-for-header"),
					Suffix: common.String("some-suffix-value-for-header"),
				},
				loadbalancer.AllowRule{
					Description: common.String("Allow traffic from internet clients"),
					Conditions: []loadbalancer.RuleCondition{
						loadbalancer.SourceIpAddressCondition{
							AttributeValue: common.String("111.111.111.111/32"),
						},
					},
				},
				loadbalancer.ControlAccessUsingHttpMethodsRule{
					AllowedMethods: []string{
						"PUT",
						"POST",
					},
					StatusCode: common.Int(403),
				},
			}},
	}
	request.RuleSets = ruleSets

	// ssl cipher suites for the load balancer

	sslCiphers := map[string]loadbalancer.SslCipherSuiteDetails{
		cipherName: {
			Name:    common.String(cipherName),
			Ciphers: []string{"AES128-SHA", "AES256-SHA"},
		},
	}

	request.SslCipherSuites = sslCiphers

	cert := map[string]loadbalancer.CertificateDetails{
		certificateName: {
			CertificateName:   common.String(certificateName),
			PrivateKey:        common.String(privateKey),
			PublicCertificate: common.String(publicCert),
			CaCertificate:     common.String(publicCert),
		},
	}

	request.Certificates = cert

	// Backend Sets for our new LB. Includes an LB Cookie session persistence configuration. Note that this is
	//   mutually exclusive with a session persistence configuration.
	backendSets := map[string]loadbalancer.BackendSetDetails{
		backendSetOneName: {
			Policy: common.String("ROUND_ROBIN"),
			HealthChecker: &loadbalancer.HealthCheckerDetails{
				Protocol: common.String("HTTP"),
				UrlPath:  common.String("/health"),
				Port:     common.Int(80),
			},
			Backends: []loadbalancer.BackendDetails{
				{
					IpAddress: common.String("10.11.10.5"),
					Port:      common.Int(80),
				},
				{
					IpAddress: common.String("10.12.20.3"),
					Port:      common.Int(80),
				},
			},
			LbCookieSessionPersistenceConfiguration: &loadbalancer.LbCookieSessionPersistenceConfigurationDetails{
				CookieName:      common.String("X-Oracle-OCI-cookie-1"),
				DisableFallback: common.Bool(true),
				Domain:          common.String("www.example.org"),
				Path:            common.String("/cookiepath1"),
				MaxAgeInSeconds: common.Int(300),
				IsSecure:        common.Bool(false),
				IsHttpOnly:      common.Bool(false),
			},
			SslConfiguration: &loadbalancer.SslConfigurationDetails{
				CertificateName:       common.String("example-certificate"),
				VerifyPeerCertificate: common.Bool(true),
				CipherSuiteName:       common.String(cipherName),
				Protocols:             []string{"TLSv1.1"},
			},
		},
	}
	request.BackendSets = backendSets

	listeners := map[string]loadbalancer.ListenerDetails{
		listenerDisplayName: {
			DefaultBackendSetName: common.String(backendSetOneName),
			Port:                  common.Int(80),
			Protocol:              common.String("HTTP"),
			RuleSetNames:          []string{rulesetOneName},
			SslConfiguration: &loadbalancer.SslConfigurationDetails{
				CertificateName:       common.String("example-certificate"),
				VerifyPeerCertificate: common.Bool(true),
				CipherSuiteName:       common.String(cipherName),
				Protocols:             []string{"TLSv1.1"},
			},
		},
	}

	request.Listeners = listeners

	_, err = c.CreateLoadBalancer(ctx, request)
	helpers.FatalIfError(err)

	fmt.Println("create load balancer complete")

	// get created loadbalancer
	getLoadBalancer := func() *loadbalancer.LoadBalancer {
		loadbalancers := listLoadBalancers(ctx, c, loadbalancer.LoadBalancerLifecycleStateActive)
		for _, element := range loadbalancers {
			if *element.DisplayName == loadbalancerDisplayName {
				// found it, return
				return &element
			}
		}

		return nil
	}

	attempts := uint(10)
	retryIfLBNotReady := func(r common.OCIOperationResponse) bool {
		loadBalancer := getLoadBalancer()
		if loadBalancer != nil {
			fieldLifecycle, err := helpers.FindLifecycleFieldValue(loadBalancer)

			if err != nil {
				common.Logf("Error getting lifecycleState. Error is %v", err)
				return true
			}

			lifecycleState := string(loadbalancer.LoadBalancerLifecycleStateActive)
			isEqual := fieldLifecycle == lifecycleState
			if isEqual {
				return false
			}
			common.Logf("Current lifecycle state is %s, waiting for it to become %s", fieldLifecycle, lifecycleState)
			return true
		}
		common.Logf("LB not available, waiting...")
		return true
	}

	nextDuration := func(r common.OCIOperationResponse) time.Duration {
		// this function will return the duration as:
		// 1s, 2s, 4s, 8s, 16s, 32s, 64s etc...
		return time.Duration(math.Pow(float64(2), float64(r.AttemptNumber-1))) * time.Second
	}

	defaultRetryPolicy := common.NewRetryPolicy(attempts, retryIfLBNotReady, nextDuration)

	request.RequestMetadata = common.RequestMetadata{
		RetryPolicy: &defaultRetryPolicy,
	}

	_, err = c.CreateLoadBalancer(ctx, request)
	helpers.FatalIfError(err)

	newCreatedLoadBalancer := getLoadBalancer()
	fmt.Printf("New loadbalancer LifecycleState is: %s\n\n", newCreatedLoadBalancer.LifecycleState)

	//Update cipher suites
	updateSSLCiphers(ctx, c, newCreatedLoadBalancer.Id)

	loadBalancerRuleSets := listRuleSets(ctx, c, newCreatedLoadBalancer.Id)
	fmt.Printf("Rule Sets from GET: %+v\n\n", loadBalancerRuleSets)

	newRuleSetResponse, err := addRuleSet(ctx, c, newCreatedLoadBalancer.Id)
	fmt.Printf("New rule set response: %+v\n\n", newRuleSetResponse)

	newBackendSetResponse, err := addBackendSet(ctx, c, newCreatedLoadBalancer.Id)
	fmt.Printf("New backend set: %+v\n\n", newBackendSetResponse)

	getListenerRulesResponse := listListenerRules(ctx, c, newCreatedLoadBalancer.Id, common.String(listenerDisplayName))
	fmt.Printf("Listener Rules: %+v\n\n", getListenerRulesResponse)

	vnClient, vclerr := core.NewVirtualNetworkClientWithConfigurationProvider(common.DefaultConfigProvider())
	helpers.FatalIfError(vclerr)
	vcnId := subnet1.VcnId

	nsg1 := createNsg(ctx, vnClient, nsgDisplayNameOne, helpers.CompartmentID(), vcnId)
	nsg2 := createNsg(ctx, vnClient, nsgDisplayNameTwo, helpers.CompartmentID(), vcnId)

	var nsgArray = []string{*(nsg1.Id), *(nsg2.Id)}

	// Change Compartment (Requires second compartment to move the LB into)
	secondCompartmentId := helpers.RootCompartmentID()
	changeCompartmentResponse, err := changeLBCompartment(ctx, c, newCreatedLoadBalancer.Id, secondCompartmentId)
	fmt.Printf("Load balancer compartment changed: %+v", changeCompartmentResponse)

	//Update nsg call
	updateNsgWithLbr(ctx, c, newCreatedLoadBalancer.Id, nsgArray)

	// clean up resources
	defer func() {
		deleteLoadbalancer(ctx, c, newCreatedLoadBalancer.Id)

		vcnID := subnet1.VcnId
		deleteSubnet(ctx, vnClient, subnet1.Id)
		deleteSubnet(ctx, vnClient, subnet2.Id)
		deleteNsg(ctx, vnClient, nsg1.Id)
		deleteNsg(ctx, vnClient, nsg2.Id)
		deleteVcn(ctx, vnClient, vcnID)
	}()

}

func listLoadBalancerShapes(ctx context.Context, client loadbalancer.LoadBalancerClient) []loadbalancer.LoadBalancerShape {
	request := loadbalancer.ListShapesRequest{
		CompartmentId: helpers.CompartmentID(),
	}

	r, err := client.ListShapes(ctx, request)
	helpers.FatalIfError(err)
	return r.Items
}

func listLoadBalancers(ctx context.Context, client loadbalancer.LoadBalancerClient, lifecycleState loadbalancer.LoadBalancerLifecycleStateEnum) []loadbalancer.LoadBalancer {
	request := loadbalancer.ListLoadBalancersRequest{
		CompartmentId:  helpers.CompartmentID(),
		DisplayName:    common.String(loadbalancerDisplayName),
		LifecycleState: lifecycleState,
	}

	r, err := client.ListLoadBalancers(ctx, request)
	helpers.FatalIfError(err)
	return r.Items
}

func deleteLoadbalancer(ctx context.Context, client loadbalancer.LoadBalancerClient, id *string) {
	request := loadbalancer.DeleteLoadBalancerRequest{
		LoadBalancerId: id,
	}

	_, err := client.DeleteLoadBalancer(ctx, request)
	helpers.FatalIfError(err)
	fmt.Println("deleting load balancer")

	// get loadbalancer
	getLoadBalancer := func() *loadbalancer.LoadBalancer {
		loadbalancers := listLoadBalancers(ctx, client, loadbalancer.LoadBalancerLifecycleStateDeleting)
		for _, element := range loadbalancers {
			if *element.DisplayName == loadbalancerDisplayName {
				// found it, return
				return &element
			}
		}

		return nil
	}

	// use to check the lifecycle state of load balancer
	loadBalancerLifecycleStateCheck := func() (interface{}, error) {
		loadBalancer := getLoadBalancer()
		if loadBalancer != nil {
			return loadBalancer, nil
		}

		// cannot find load balancer which means it's been deleted
		return loadbalancer.LoadBalancer{LifecycleState: loadbalancer.LoadBalancerLifecycleStateDeleted}, nil
	}

	// wait for load balancer been deleted
	helpers.FatalIfError(
		helpers.RetryUntilTrueOrError(
			loadBalancerLifecycleStateCheck,
			helpers.CheckLifecycleState(string(loadbalancer.LoadBalancerLifecycleStateDeleted)),
			time.Tick(10*time.Second),
			time.After((10 * time.Minute))))

	fmt.Println("load balancer deleted")
}

// Add a new ruleset to an existing LB
func addRuleSet(ctx context.Context, client loadbalancer.LoadBalancerClient, id *string) (loadbalancer.CreateRuleSetResponse, error) {
	request := loadbalancer.CreateRuleSetRequest{}
	request.LoadBalancerId = id
	ruleSetDetails := loadbalancer.CreateRuleSetDetails{
		Name: common.String("ruleset2"),
		Items: []loadbalancer.Rule{
			loadbalancer.AddHttpResponseHeaderRule{
				Header: common.String("some-second-header-name-to-add"),
				Value:  common.String("some-second-value-for-header"),
			},
			loadbalancer.RemoveHttpRequestHeaderRule{
				Header: common.String("some-second-header-name-to-remove"),
			},
		},
	}
	request.CreateRuleSetDetails = ruleSetDetails

	response, err := client.CreateRuleSet(ctx, request)
	helpers.FatalIfError(err)
	println("ruleset added")
	return response, err
}

// Get a list of rulesets from a given LB
func listRuleSets(ctx context.Context, client loadbalancer.LoadBalancerClient, id *string) []loadbalancer.RuleSet {
	request := loadbalancer.ListRuleSetsRequest{
		LoadBalancerId: id,
	}

	r, err := client.ListRuleSets(ctx, request)
	helpers.FatalIfError(err)
	return r.Items
}

// Add a new backend set to an existing LB
func addBackendSet(ctx context.Context, client loadbalancer.LoadBalancerClient, id *string) (loadbalancer.CreateBackendSetResponse, error) {
	request := loadbalancer.CreateBackendSetRequest{}
	request.LoadBalancerId = id
	backendSetDetails := loadbalancer.CreateBackendSetDetails{
		Name:   common.String("backendset2"),
		Policy: common.String("ROUND_ROBIN"),
		HealthChecker: &loadbalancer.HealthCheckerDetails{
			Protocol: common.String("HTTP"),
			UrlPath:  common.String("/health"),
			Port:     common.Int(80),
		},
		Backends: []loadbalancer.BackendDetails{
			{
				IpAddress: common.String("10.11.10.5"),
				Port:      common.Int(80),
			},
			{
				IpAddress: common.String("10.12.20.3"),
				Port:      common.Int(80),
			},
		},
		LbCookieSessionPersistenceConfiguration: &loadbalancer.LbCookieSessionPersistenceConfigurationDetails{
			CookieName:      common.String("X-Oracle-OCI-cookie-2"),
			DisableFallback: common.Bool(true),
			Domain:          common.String("www.example.org"),
			Path:            common.String("/cookiepath2"),
			MaxAgeInSeconds: common.Int(300),
			IsSecure:        common.Bool(false),
			IsHttpOnly:      common.Bool(false),
		},
	}
	request.CreateBackendSetDetails = backendSetDetails

	response, err := client.CreateBackendSet(ctx, request)
	helpers.FatalIfError(err)
	println("backendset added")
	return response, err
}

// Update ssl ciphers
func updateSSLCiphers(ctx context.Context, c loadbalancer.LoadBalancerClient, loadBalancerId *string) {
	request := loadbalancer.UpdateSSLCipherSuiteRequest{
		RequestMetadata: helpers.GetRequestMetadataWithDefaultRetryPolicy(),
	}
	details := loadbalancer.UpdateSslCipherSuiteDetails{
		Ciphers: []string{"ECDHE-ECDSA-AES256-GCM-SHA384"},
	}
	request.LoadBalancerId = loadBalancerId
	request.UpdateSslCipherSuiteDetails = details
	_, err := c.UpdateSSLCipherSuite(ctx, request)
	helpers.FatalIfError(err)
}

// Create network security group
func createNsg(ctx context.Context, c core.VirtualNetworkClient, displayName string, compartmentId, vcnId *string) core.NetworkSecurityGroup {
	// create a new nsg
	createNsgRequest := core.CreateNetworkSecurityGroupRequest{
		RequestMetadata: helpers.GetRequestMetadataWithDefaultRetryPolicy(),
	}
	cnsgDetails := core.CreateNetworkSecurityGroupDetails{}
	cnsgDetails.CompartmentId = compartmentId
	cnsgDetails.DisplayName = common.String(displayName)
	cnsgDetails.VcnId = vcnId

	createNsgRequest.CreateNetworkSecurityGroupDetails = cnsgDetails

	r, err := c.CreateNetworkSecurityGroup(ctx, createNsgRequest)
	helpers.FatalIfError(err)
	return r.NetworkSecurityGroup
}

// Delete network security group
func deleteNsg(ctx context.Context, c core.VirtualNetworkClient, nsgId *string) {
	//delete the nsg
	deleteNsgRequest := core.DeleteNetworkSecurityGroupRequest{
		RequestMetadata: helpers.GetRequestMetadataWithDefaultRetryPolicy(),
	}

	deleteNsgRequest.NetworkSecurityGroupId = nsgId

	_, err := c.DeleteNetworkSecurityGroup(ctx, deleteNsgRequest)
	helpers.FatalIfError(err)
}

// Update nsg list with load balancer
func updateNsgWithLbr(ctx context.Context, c loadbalancer.LoadBalancerClient, loadBalancerId *string, networkSecurityGroupIds []string) {
	request := loadbalancer.UpdateNetworkSecurityGroupsRequest{
		RequestMetadata: helpers.GetRequestMetadataWithDefaultRetryPolicy(),
	}
	details := loadbalancer.UpdateNetworkSecurityGroupsDetails{
		NetworkSecurityGroupIds: networkSecurityGroupIds,
	}
	request.LoadBalancerId = loadBalancerId
	request.UpdateNetworkSecurityGroupsDetails = details

	_, err := c.UpdateNetworkSecurityGroups(ctx, request)
	helpers.FatalIfError(err)
}

// List Listener Rules for a given listener (by load balancer id and listener name)
func listListenerRules(ctx context.Context, client loadbalancer.LoadBalancerClient, id *string, name *string) []loadbalancer.ListenerRuleSummary {
	request := loadbalancer.ListListenerRulesRequest{
		LoadBalancerId: id,
		ListenerName:   name,
	}
	r, err := client.ListListenerRules(ctx, request)
	helpers.FatalIfError(err)
	return r.Items
}

// Move the LB to a new compartment
func changeLBCompartment(ctx context.Context, client loadbalancer.LoadBalancerClient, id *string, compartmentId *string) (loadbalancer.ChangeLoadBalancerCompartmentResponse, error) {
	request := loadbalancer.ChangeLoadBalancerCompartmentRequest{}
	request.LoadBalancerId = id
	changeCompartmentDetails := loadbalancer.ChangeLoadBalancerCompartmentDetails{
		CompartmentId: compartmentId,
	}
	request.ChangeLoadBalancerCompartmentDetails = changeCompartmentDetails
	response, err := client.ChangeLoadBalancerCompartment(ctx, request)
	helpers.FatalIfError(err)

	return response, err
}
Output:

create subnet1 complete
create subnet2 complete
list load balancer shapes complete
create load balancer complete
new loadbalancer LifecycleState is: ACTIVE
Rule Sets from GET: {}
New rule set response: {}
New backend set: {}
deleting load balancer
load balancer deleted
deleteing subnet
subnet deleted
deleteing subnet
subnet deleted
deleteing VCN
VCN deleted
Example (CreateMySqlConnectionWithPublicIp)

Example Use Case: Existing MySQL database with public IP (customer-managed)

This example creates a Database Tools Connection to a MySQL database accessible by public IP. Note, since this connection will be against a public IP address, a Database Tools Private Endpoint Reverse Connection is not required. Exposing a database directly to the Internet is not a recommended practice for security reasons. This example serves as an academic exercise of the SDK and proof of concept only.

Prerequisites:

  • An existing MySQL database on a compute node, for example
  • Firewall or security list entries allowing TCP traffic to MySQL
  • An existing Vault for storage of secrets
  • A previously configured .oci/config file with a [DEFAULT] section
  • The following environment variables set:
  • OCI_VAULT_OCID : The ocid for a vault (to store secrets)
  • OCI_DB_USER : The MySQL database user to connect with
  • OCI_DB_PASS : The MySQL database password to connect with
  • OCI_CONN_STRING : The MySQL connection string, asin mysql://host:port

High-level Steps:

1- Locate the Vault by the provided OCID
2- Store the secret in the Vault (as base64 encoded string)
3- Create a Database Tools Connection
4- Validate the connection

... cleanup when done (delete the temporary secret and connection)

                     Client
                       |
                       |
+----------------------+----------+
|                      V          |
|              +----------------+ |
|              | Database Tools | |
|              |    Service     | |
|              +----------------+ |
|                      |          |
| Database             |          |
| Tools                |          |
| VCN                  |          |
+----------------------+----------+
                       |
                       |
+----------------------+----------+
| Compute              |          |
| Node                 |          |
|                      |          |
|                      |          |
|                      V          |
|                  ---------      |
|                 /  MySQL  \     |
|                 \Public IP/     |
|                  ---------      |
|                                 |
+---------------------------------+
// Parses environment variables, .oci/config, and sets up the SDK clients
cfg := newConfig()

// Ignoring errors for simplicity
passwdSecretId, _ := createSecretInVault(cfg.Password, cfg)
dbConnectionId, _ := createDatabaseToolsConnectionMySqlPublicIp(passwdSecretId, cfg)

if ok := validateDatabaseToolsConnectionMySQL(dbConnectionId, cfg); ok {
	log.Println("connection is valid")
}

// ... cleanup resources when finished, comment out the following delete
// calls to keep the resources created above.
if err := deleteConnection(dbConnectionId, cfg); err != nil {
	log.Printf("error deleting connection: %v\n", err)
}
if err := deleteSecret(passwdSecretId, cfg); err != nil {
	log.Printf("error deleting secret: %v\n", err)
}

fmt.Println("Example_createMySqlConnectionWithPublicIp complete")
Output:

Example_createMySqlConnectionWithPublicIp complete
Example (CreateMySqlDbSystemConnectionWithPrivateEndpoint)

Example Use Case: MySQL DB System with Database Tools Private Endpoint

This example creates a Database Tools Connection to a MySQL DB System accessible by private IP. Note, since this connection will be against a private IP address, a Database Tools Private Endpoint Reverse Connection is required. This example serves as an academic exercise of the SDK.

Prerequisites:

  • An existing MySQL DB System in a VCN and associated subnet
  • Available capacity (limits apply) to create a new Private Endpoint
  • An existing Vault for storage of secrets
  • A previously configured .oci/config file with a [DEFAULT] section
  • The following environment variables set:
  • OCI_DBS_OCID : The ocid for a MySQL DB System
  • OCI_VAULT_OCID : The ocid for a vault (to store secrets)
  • OCI_DB_USER : The MySQL database user to connect with
  • OCI_DB_PASS : The MySQL database password to connect with

High-level Steps:

1- Locate the MySQL DB System by provided OCID
2- Locate the Vault by provided OCID
3- Store the secret in the Vault (as base64 encoded string)
4- Create a Database Tools Private Endpoint Reverse Connection
5- Create a Database Tools connection
6- Validate the connection

... cleanup when done (delete the temporary secret, connection, and PE)

                     Client
                       |
                       |
+----------------------+----------+
|                      V          |
|              +----------------+ |
|              | Database Tools | |
|              |    Service     | |
|              +----------------+ |
|                      |          |
| Database             |          |
| Tools                |          |
| VCN                  |          |
+----------------------+----------+
                       |
                       |
+----------------------+----------+
|                      |          |
|                      V          |
|                +-----------+    |
|                | Database  |    |
|                |  Tools    |    |
|                | Private   |    |
|                | Endpoint  |    |
|                +-----------+    |
|                      |          |
|                      |          |
|                      V          |
|                  ---------      |
|                 /  MDS    \     |
|                | Private  |     |
|                \   IP    /      |
|                 ---------       |
|                                 |
| Customer                        |
| VCN (jump host not required)    |
+---------------------------------+
// Parses environment variables, .oci/config, and sets up the SDK clients
cfg := newConfig()

// Ignoring errors for simplicity
privateEndpointId, _ := createDbToolsPrivateEndpoint(cfg)
passwdSecretId, _ := createSecretInVault(cfg.Password, cfg)
dbConnectionId, _ := createDatabaseToolsConnectionMySql(passwdSecretId, privateEndpointId, cfg)

if ok := validateDatabaseToolsConnectionMySQL(dbConnectionId, cfg); ok {
	log.Println("connection is valid")
}

// ... cleanup resources when finished, comment out the following delete
// calls to keep the resources created above.
if err := deleteConnection(dbConnectionId, cfg); err != nil {
	log.Printf("error deleting connection: %v\n", err)
}
if err := deleteSecret(passwdSecretId, cfg); err != nil {
	log.Printf("error deleting secret: %v\n", err)
}
if err := deletePrivateEndpoint(privateEndpointId, cfg); err != nil {
	log.Printf("error deleting private endpoint: %v\n", err)
}

fmt.Println("Example_createMySqlDbSystemConnectionWithPrivateEndpoint complete")
Output:

Example_createMySqlDbSystemConnectionWithPrivateEndpoint complete
Example (CreateNFSBackupDestination)
c, clerr := database.NewDatabaseClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)
displayNameBackupDestination := helpers.GetRandomString(32)
compartmentId := os.Getenv("OCI_COMPARTMENT_ID")

createBackupDestinationDetails := database.CreateNfsBackupDestinationDetails{
	CompartmentId:       &compartmentId,
	DisplayName:         &displayNameBackupDestination,
	LocalMountPointPath: common.String("path"),
}

createbackupdestinationReq := database.CreateBackupDestinationRequest{
	CreateBackupDestinationDetails: createBackupDestinationDetails,
}

_, err := c.CreateBackupDestination(context.Background(), createbackupdestinationReq)
helpers.FatalIfError(err)

fmt.Println("create backup destination is successful")
Output:

create backup destination is successful
Example (CreateRecord)
datasetId := "REPLACE_WITH_DATASET_OCID"
compartment := "REPLACE_WITH_COMPARTMENT_OCID"
namespace := "REPLACE_WITH_OBJECT_STORAGE_NAMESPACE"
bucketName := "REPLACE_WITH_OBJECT_STORAGE_BUCKET_NAME"
objectName := "REPLACE_WITH_OBJECT_NAME"

client, err := datalabelingservicedataplane.NewDataLabelingClientWithConfigurationProvider(common.DefaultConfigProvider())
if err != nil {
	panic(err)
}

fmt.Println("Record creation")

objectPath := fmt.Sprintf("/n/%s/b/%s/o/%s", namespace, bucketName, objectName)

sourceDetails := datalabelingservicedataplane.ObjectStorageSourceDetails{
	RelativePath: common.String(objectName),
	Path:         common.String(objectPath),
}
name := common.String(objectName)

createRecordRequest := datalabelingservicedataplane.CreateRecordRequest{
	CreateRecordDetails: datalabelingservicedataplane.CreateRecordDetails{
		Name:          name,
		DatasetId:     common.String(datasetId),
		CompartmentId: common.String(compartment),
		SourceDetails: sourceDetails,
		FreeformTags:  nil,
		DefinedTags:   nil,
	},
	OpcRetryToken:   nil,
	OpcRequestId:    nil,
	RequestMetadata: common.RequestMetadata{},
}

_, recordErr := client.CreateRecord(context.Background(), createRecordRequest)
helpers.FatalIfError(recordErr)

fmt.Println("Record creation succeeded")
Output:

Record creation.
Record creation succeeded.
Example (CreateServiceGateway)
displayName := "OCI-GOSDK-CreateServiceGateway-Example" // displayName for created VCN and ServiceGateway
compartmentID := os.Getenv("OCI_COMPARTMENT_ID")        // OCI_COMPARTMENT_ID env variable must be defined

// initialize VirtualNetworkClient
client, err := core.NewVirtualNetworkClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)
ctx := context.Background()

// create VCN
createVcnRequest := core.CreateVcnRequest{
	RequestMetadata: helpers.GetRequestMetadataWithDefaultRetryPolicy(),
}
createVcnRequest.CompartmentId = common.String(compartmentID)
createVcnRequest.DisplayName = common.String(displayName)
createVcnRequest.CidrBlock = common.String("10.0.0.0/16")
createVcnResponse, err := client.CreateVcn(ctx, createVcnRequest)
helpers.FatalIfError(err)

// create ServiceGateway
createServiceGatewayRequest := core.CreateServiceGatewayRequest{
	RequestMetadata: helpers.GetRequestMetadataWithDefaultRetryPolicy(),
}
createServiceGatewayRequest.CompartmentId = common.String(compartmentID)
createServiceGatewayRequest.DisplayName = common.String(displayName)
createServiceGatewayRequest.VcnId = createVcnResponse.Id
createServiceGatewayRequest.Services = []core.ServiceIdRequestDetails{}
_, err = client.CreateServiceGateway(ctx, createServiceGatewayRequest)
helpers.FatalIfError(err)

fmt.Println("ServiceGateway created")
Output:

ServiceGateway created
Example (CreateVcn)
displayName := "OCI-GOSDK-CreateVcn-Example"
compartmentID := os.Getenv("OCI_COMPARTMENT_ID") // OCI_COMPARTMENT_ID env variable must be defined

// initialize VirtualNetworkClient
client, err := core.NewVirtualNetworkClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)
ctx := context.Background()

// create VCN
createVcnRequest := core.CreateVcnRequest{
	RequestMetadata: helpers.GetRequestMetadataWithDefaultRetryPolicy(),
}
createVcnRequest.CompartmentId = common.String(compartmentID)
createVcnRequest.DisplayName = common.String(displayName)
createVcnRequest.CidrBlock = common.String("10.0.0.0/16")
_, err = client.CreateVcn(ctx, createVcnRequest)
helpers.FatalIfError(err)

fmt.Println("VCN created")
Output:

VCN created
Example (CryptoOperations)
vaultClient, clientError := keymanagement.NewKmsVaultClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clientError)

ctx := context.Background()
vaultName := "KmsVault"
keyName := "KmsKey"
testInput := "CryptoOps Test Input"

vault := createVault(ctx, vaultClient, vaultName)
defer cleanupResources(ctx, vaultClient, vault.Id)
// wait for instance lifecycle state becomes active
waitForStateVaultClient(ctx, vault.Id, vaultClient, keymanagement.VaultLifecycleStateActive)

vaultManagementClient, mgmtClientError := keymanagement.
	NewKmsManagementClientWithConfigurationProvider(common.DefaultConfigProvider(), *vault.ManagementEndpoint)
helpers.FatalIfError(mgmtClientError)

// Create Key
key, keyShape := createKey(ctx, vaultManagementClient, &keyName)

// Create crypto client
vaultCryptoClient, cryptoClientError := keymanagement.
	NewKmsCryptoClientWithConfigurationProvider(common.DefaultConfigProvider(), *vault.CryptoEndpoint)
helpers.FatalIfError(cryptoClientError)

// Generate DEK
includePlaintextKeyInResponse := true
generateKeyDetails := keymanagement.GenerateKeyDetails{
	KeyId:               key.Id,
	KeyShape:            &keyShape,
	IncludePlaintextKey: &includePlaintextKeyInResponse,
}
generateDekRequest := keymanagement.GenerateDataEncryptionKeyRequest{
	GenerateKeyDetails: generateKeyDetails,
}

generateDekResponse, err := vaultCryptoClient.GenerateDataEncryptionKey(ctx, generateDekRequest)
helpers.FatalIfError(err)
generatedKey := generateDekResponse.GeneratedKey
fmt.Printf("Plaintext generated DEK: %s\n", *generatedKey.Plaintext)

fmt.Println("generate DEK")

// Encrypt
encryptedDataDetails := keymanagement.EncryptDataDetails{
	KeyId:     key.Id,
	Plaintext: &testInput,
}
encryptRequest := keymanagement.EncryptRequest{
	EncryptDataDetails: encryptedDataDetails,
}

encryptResponse, encryptErr := vaultCryptoClient.Encrypt(ctx, encryptRequest)
helpers.FatalIfError(encryptErr)

cipherText := encryptResponse.Ciphertext

fmt.Print("encrypt data")

// Decrypt
decryptDataDetails := keymanagement.DecryptDataDetails{
	KeyId:      key.Id,
	Ciphertext: cipherText,
}
decryptRequest := keymanagement.DecryptRequest{
	DecryptDataDetails: decryptDataDetails,
}
decryptResponse, decryptErr := vaultCryptoClient.Decrypt(ctx, decryptRequest)
helpers.FatalIfError(decryptErr)

plainText := decryptResponse.Plaintext
fmt.Printf("Decrypted plaintext: %s\n", *plainText)

fmt.Print("decrypt data")
Output:

create vault
create key
Plaintext generated DEK: <generated key>
generate DEK
encrypt data
Decrypted plaintext: CryptoOps Test Input
decrypt data
schedule vault deletion
Example (CustomRetry)

Example_customRetry shows how to use retry for Create and Delete groups, please refer to example_core_test.go->Example_launchInstance for more examples

// create and delete group with retry
client, clerr := identity.NewIdentityClientWithConfigurationProvider(common.DefaultConfigProvider())
ctx := context.Background()
helpers.FatalIfError(clerr)

request := identity.CreateGroupRequest{}
request.CompartmentId = helpers.RootCompartmentID()
request.Name = common.String("GoSDK_Sample_Group")
request.Description = common.String("GoSDK Sample Group Description")

// maximum times of retry
attempts := uint(10)

// retry for all non-200 status code
retryOnAllNon200ResponseCodes := func(r common.OCIOperationResponse) bool {
	return !(r.Error == nil && 199 < r.Response.HTTPResponse().StatusCode && r.Response.HTTPResponse().StatusCode < 300)
}

customRetryPolicy := common.NewRetryPolicyWithOptions(
	// since this retries on ANY non-2xx response, we don't need special handling for eventual consistency
	common.ReplaceWithValuesFromRetryPolicy(common.DefaultRetryPolicyWithoutEventualConsistency()),
	common.WithMaximumNumberAttempts(attempts),
	common.WithShouldRetryOperation(retryOnAllNon200ResponseCodes),
)

// create request metadata for retry
request.RequestMetadata = common.RequestMetadata{
	RetryPolicy: &customRetryPolicy,
}

resp, err := client.CreateGroup(ctx, request)
helpers.FatalIfError(err)
fmt.Println("Creating Group")

// Get with polling
shouldRetry := func(r common.OCIOperationResponse) bool {
	if _, isServiceError := common.IsServiceError(r.Error); isServiceError {
		// not service error, could be network error or other errors which prevents
		// request send to server, will do retry here
		return true
	}

	if converted, ok := r.Response.(identity.GetGroupResponse); ok {
		// do the retry until lifecycle state become active
		return converted.LifecycleState != identity.GroupLifecycleStateActive
	}

	return true
}

lifecycleStateCheckRetryPolicy := common.NewRetryPolicyWithOptions(
	// since this retries on ANY error response, we don't need special handling for eventual consistency
	common.ReplaceWithValuesFromRetryPolicy(common.DefaultRetryPolicyWithoutEventualConsistency()),
	common.WithMaximumNumberAttempts(attempts),
	common.WithShouldRetryOperation(shouldRetry),
)

getRequest := identity.GetGroupRequest{
	GroupId: resp.Id,
	RequestMetadata: common.RequestMetadata{
		RetryPolicy: &lifecycleStateCheckRetryPolicy,
	},
}

_, errAfterPolling := client.GetGroup(ctx, getRequest)
helpers.FatalIfError(errAfterPolling)
fmt.Println("Group Created")

defer func() {
	// if we've successfully created a group, make sure that we delete it
	rDel := identity.DeleteGroupRequest{
		GroupId: resp.Id,
		RequestMetadata: common.RequestMetadata{
			RetryPolicy: &customRetryPolicy,
		},
	}

	_, err = client.DeleteGroup(ctx, rDel)
	helpers.FatalIfError(err)
	fmt.Println("Group Deleted")
}()
Output:

Creating Group
Group Created
Group Deleted
Example (DeleteBackupDestination)
c, clerr := database.NewDatabaseClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

deletebackupDestinationReq := database.DeleteBackupDestinationRequest{
	BackupDestinationId: common.String("backup-destination-ocid"),
}

_, err := c.DeleteBackupDestination(context.Background(), deletebackupDestinationReq)
helpers.FatalIfError(err)

fmt.Println("delete backup destination is successful")
Output:

delete backup destination is successful
Example (DeleteDataset)

Example_deleteDataset to delete a existing dataset.

datasetId := "REPLACE_WITH_DATASET_OCID"

client, err := datalabelingservice.NewDataLabelingManagementClientWithConfigurationProvider(common.DefaultConfigProvider())
if err != nil {
	panic(err)
}

fmt.Println("Deleting dataset")

// deleteDatasetRequest create request for deletion of dataset
deleteDatasetRequest := datalabelingservice.DeleteDatasetRequest{
	DatasetId: common.String(datasetId),
}

// Send the request using the service client
_, datasetErr := client.DeleteDataset(context.Background(), deleteDatasetRequest)
helpers.FatalIfError(datasetErr)
fmt.Println("Dataset deleted")
Example (DeleteRecord)
recordId := "REPLACE_WITH_RECORD_OCID"

client, err := datalabelingservicedataplane.NewDataLabelingClientWithConfigurationProvider(common.DefaultConfigProvider())
if err != nil {
	panic(err)
}

fmt.Println("Deleting record.")

deleteRecordRequest := datalabelingservicedataplane.DeleteRecordRequest{
	RecordId: common.String(recordId),
}
_, recordErr := client.DeleteRecord(context.Background(), deleteRecordRequest)
helpers.FatalIfError(recordErr)

fmt.Println("Record deletion succeeded.")
fmt.Println("Done")
Output:

Deleting record.
Record deletion succeeded.
Done
Example (DnsSteeringPolicy)

ExampleSteeringPolicy creates, gets, lists, and deletes a DNS Steering Policy. If optional TARGET_COMPARTMENT_ID env var is set, it will also move the DNS Steering Policy to the compartment.

client, err := dns.NewDnsClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)

// Optional parsed value from env var TARGET_COMPARTMENT_ID
targetCompartmentId := os.Getenv("TARGET_COMPARTMENT_ID")
log.Printf("TARGET_COMPARTMENT_ID: %s", targetCompartmentId)

ctx := context.Background()

// Create a new steering policy
displayName := common.String(helpers.GetRandomString(15))
createReq := dns.CreateSteeringPolicyRequest{
	CreateSteeringPolicyDetails: dns.CreateSteeringPolicyDetails{
		CompartmentId: helpers.CompartmentID(),
		DisplayName:   displayName,
		Template:      dns.CreateSteeringPolicyDetailsTemplateLoadBalance,
		Ttl:           common.Int(60),
	},
}
createResp, err := client.CreateSteeringPolicy(ctx, createReq)
helpers.FatalIfError(err)
fmt.Printf("created dns steering policy %s", *displayName)

// below logic is to wait until steering policy is in active state
pollUntilAvailable := func(r common.OCIOperationResponse) bool {
	if converted, ok := r.Response.(dns.GetSteeringPolicyResponse); ok {
		return converted.LifecycleState != dns.SteeringPolicyLifecycleStateActive
	}
	return true
}
getRequest := dns.GetSteeringPolicyRequest{
	SteeringPolicyId: createResp.Id,
	RequestMetadata:  helpers.GetRequestMetadataWithCustomizedRetryPolicy(pollUntilAvailable),
}
getResp, err := client.GetSteeringPolicy(ctx, getRequest)
helpers.FatalIfError(err)
fmt.Printf("get dns steering policy %s", *displayName)

listResp, err := client.ListSteeringPolicies(ctx, dns.ListSteeringPoliciesRequest{
	CompartmentId:       helpers.CompartmentID(),
	DisplayNameContains: displayName,
	Limit:               common.Int64(10),
	SortBy:              dns.ListSteeringPoliciesSortByTimecreated,
	SortOrder:           dns.ListSteeringPoliciesSortOrderAsc,
})
helpers.FatalIfError(err)
fmt.Println("list dns steering policies")
log.Printf("count of dns steering policies in compartment %s: %d", *helpers.CompartmentID(), listResp.OpcTotalItems)

if targetCompartmentId != "" && targetCompartmentId != *helpers.CompartmentID() {
	changeRequest := dns.ChangeSteeringPolicyCompartmentRequest{
		SteeringPolicyId: getResp.Id,
		ChangeSteeringPolicyCompartmentDetails: dns.ChangeSteeringPolicyCompartmentDetails{
			CompartmentId: &targetCompartmentId,
		},
	}
	_, err := client.ChangeSteeringPolicyCompartment(ctx, changeRequest)
	helpers.FatalIfError(err)
	fmt.Printf("change dns steering policy compartment to %s", targetCompartmentId)
}

// Clean up
defer func() {
	_, err = client.DeleteSteeringPolicy(ctx, dns.DeleteSteeringPolicyRequest{
		SteeringPolicyId: getResp.Id,
	})
	helpers.FatalIfError(err)
}()
Output:

created dns steering policy
get dns steering policy
list dns steering policy
deleted dns steering policy
Example (DnsZone)

Example_dnsZone creates, gets, lists, and deletes a DNS Zone. If optional TARGET_COMPARTMENT_ID env var is set, it will also move the DNS Zone to the compartment.

client, err := dns.NewDnsClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)

// Optional parsed value from env var TARGET_COMPARTMENT_ID
targetCompartmentId := os.Getenv("TARGET_COMPARTMENT_ID")
log.Printf("TARGET_COMPARTMENT_ID: %s", targetCompartmentId)

ctx := context.Background()

// Create a new zone
zoneName := common.String("testdomain." + helpers.GetRandomString(15))
createReq := dns.CreateZoneRequest{
	CreateZoneDetails: dns.CreateZoneDetails{
		CompartmentId: helpers.CompartmentID(),
		Name:          zoneName,
		ZoneType:      dns.CreateZoneDetailsZoneTypePrimary,
	},
}
createResp, err := client.CreateZone(ctx, createReq)
helpers.FatalIfError(err)
fmt.Printf("created dns zone %s", *zoneName)

// below logic is to wait until zone is in active state
pollUntilAvailable := func(r common.OCIOperationResponse) bool {
	if converted, ok := r.Response.(dns.GetZoneResponse); ok {
		return converted.LifecycleState != dns.ZoneLifecycleStateActive
	}
	return true
}
getRequest := dns.GetZoneRequest{
	ZoneNameOrId:    createResp.Id,
	CompartmentId:   helpers.CompartmentID(),
	RequestMetadata: helpers.GetRequestMetadataWithCustomizedRetryPolicy(pollUntilAvailable),
}
getResp, err := client.GetZone(ctx, getRequest)
helpers.FatalIfError(err)
fmt.Printf("get dns zone %s", *zoneName)

listResp, err := client.ListZones(ctx, dns.ListZonesRequest{
	CompartmentId: helpers.CompartmentID(),
	NameContains:  zoneName,
	Limit:         common.Int64(10),
	SortBy:        dns.ListZonesSortByTimecreated,
	SortOrder:     dns.ListZonesSortOrderAsc,
})
helpers.FatalIfError(err)
fmt.Println("list dns zones")
log.Printf("count of dns zones in compartment %s: %d", *helpers.CompartmentID(), listResp.OpcTotalItems)

if targetCompartmentId != "" && targetCompartmentId != *helpers.CompartmentID() {
	changeRequest := dns.ChangeZoneCompartmentRequest{
		ZoneId: getResp.Id,
		ChangeZoneCompartmentDetails: dns.ChangeZoneCompartmentDetails{
			CompartmentId: &targetCompartmentId,
		},
	}
	_, err := client.ChangeZoneCompartment(ctx, changeRequest)
	helpers.FatalIfError(err)
	fmt.Printf("change dns zone compartment to %s", targetCompartmentId)
}

// Clean up
defer func() {
	_, err = client.DeleteZone(ctx, dns.DeleteZoneRequest{
		ZoneNameOrId: getResp.Id,
	})
	helpers.FatalIfError(err)
}()
Output:

created dns zone
get dns zone
list dns zone
deleted dns zone
Example (EmailSender)
// Example code for Email Delivery Service API

package main

import (
	"context"
	"fmt"
	"log"

	"github.com/oracle/oci-go-sdk/v65/common"
	"github.com/oracle/oci-go-sdk/v65/email"
	"github.com/oracle/oci-go-sdk/v65/example/helpers"
)

const (
	// The address of the email sender
	senderEmailAddress = "sample@sample.com"
)

func main() {
	client, err := email.NewEmailClientWithConfigurationProvider(common.DefaultConfigProvider())
	helpers.FatalIfError(err)

	ctx := context.Background()

	createReq := email.CreateSenderRequest{
		CreateSenderDetails: email.CreateSenderDetails{
			CompartmentId: helpers.CompartmentID(),
			EmailAddress:  common.String(senderEmailAddress),
		},
	}

	createResp, err := client.CreateSender(ctx, createReq)
	helpers.FatalIfError(err)
	fmt.Println("email sender created")

	getReq := email.GetSenderRequest{
		SenderId: createResp.Id,
	}

	getResp, err := client.GetSender(ctx, getReq)
	helpers.FatalIfError(err)
	fmt.Println("get email sender")
	log.Printf("get email sender with email address %s\n", *getResp.EmailAddress)

	// you can provide additional filters and sorts, here lists all senders
	// sorted by email address and filter by email address
	listReq := email.ListSendersRequest{
		CompartmentId: helpers.CompartmentID(),
		SortBy:        email.ListSendersSortByEmailaddress,
		SortOrder:     email.ListSendersSortOrderAsc,
	}

	listResp, err := client.ListSenders(ctx, listReq)
	helpers.FatalIfError(err)
	log.Printf("list email senders return %v results\n", len(listResp.Items))
	fmt.Println("list email senders")

	defer func() {
		deleteReq := email.DeleteSenderRequest{
			SenderId: getReq.SenderId,
		}

		_, err = client.DeleteSender(ctx, deleteReq)
		helpers.FatalIfError(err)
		fmt.Println("email sender deleted")
	}()

}
Output:

email sender created
get email sender
list email senders
email sender deleted
Example (EventuallyConsistentRetryBehavior_Default)

This example simulates the behaviors of retry strategies with respect to eventual consistency. The operation that is called that is eventually consistent is CreateGroup in the Identity service. After that, this example is making a number of GetInstance requests in the Compute service, which are guaranteed to fail with a 404-NotAuthorizedOrNotFound, because the OCID is not a real OCID of an instance. But it does simulate the behavior of the retries you would see if there were a replication delay due to the eventual consistency of the group.

Note: This is a long running example, it takes over 4 minutes. That's why the "Output:" line has been changed to prevent the example from automatically running as a test.

// setup
ctx := context.Background()

coreClient, clerr := core.NewComputeClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)
compartmentID, _ := common.DefaultConfigProvider().TenancyOCID()

// this will set the eventually consistent timestamp, because the CreateGroup is eventually consistent and sets the timestamp
groupId := createGroup(ctx, compartmentID)
deleteGroup(ctx, groupId)

// test

defaultRetryPolicy := common.DefaultRetryPolicy()
nonEcRetryPolicy := common.DefaultRetryPolicyWithoutEventualConsistency()

fmt.Printf("EC retry policy:     %v\n", defaultRetryPolicy)
fmt.Printf("Non-EC retry policy: %v\n", nonEcRetryPolicy)

// Without retry policy, we do not see retries
fmt.Printf("\nNo retry policy (expect immediate error):\n")
var elapsed = getInstance(ctx, coreClient, missingInstanceOcid, nil)
fmt.Printf("No retry policy (expect immediate error), elapsed less than three seconds? %v\n",
	getComparisonMessage(elapsed.String(), elapsed < time.Duration(3)*time.Second))

// With the non-EC retry policy, we do not see a retry, because it doesn't consider eventual consistency.
// Without eventual consistency, 404-NotAuthorizedOrNotFound are not retried.
fmt.Printf("\nNon-EC retry policy (expect immediate error):\n")
elapsed = getInstance(ctx, coreClient, missingInstanceOcid, &nonEcRetryPolicy)
fmt.Printf("Non-EC retry policy (expect immediate error), elapsed less than three seconds? %v\n",
	getComparisonMessage(elapsed.String(), elapsed < time.Duration(3)*time.Second))

// With the default retry policy, we do see retries, and this part takes a long time (about 4 minutes).
// These retries on 404-NotAuthorizedOrNotFound only happen because there was an eventually consistent
// operation in the recent past (CreateGroup).
fmt.Printf("\nDefault retry policy (expect long wait, then error):\n")
elapsed = getInstance(ctx, coreClient, missingInstanceOcid, &defaultRetryPolicy)
fmt.Printf("Default retry policy (expect long wait, then error), elapsed about 4 minutes? %v\n",
	getComparisonMessage(elapsed.String(), (time.Duration(239)*time.Second < elapsed) && (elapsed < time.Duration(250)*time.Second)))

// We use the the EC retry policy again, but by now we're outside the eventually consistent window, so we don't see retries anymore.
fmt.Printf("\nDefault retry policy, but no more EC (end of window in the past? %v) (expect immediate error):\n",
	getComparisonMessage(fmt.Sprintf("now=%v, eow=%v", time.Now(), common.EcContext.GetEndOfWindow()),
		time.Now().After(*common.EcContext.GetEndOfWindow())))
elapsed = getInstance(ctx, coreClient, missingInstanceOcid, &defaultRetryPolicy)
fmt.Printf("Default retry policy, but no more EC (expect immediate error), elapsed less than three seconds? %v\n",
	getComparisonMessage(elapsed.String(), elapsed < time.Duration(3)*time.Second))

// Output -- to enable this example as a test, change this line to "// Output:"
// EC retry policy:     {MaximumNumberAttempts=9, MinSleepBetween=0, MaxSleepBetween=45, ExponentialBackoffBase=3.52, NonEventuallyConsistentPolicy={MaximumNumberAttempts=8, MinSleepBetween=0, MaxSleepBetween=30, ExponentialBackoffBase=2, NonEventuallyConsistentPolicy=<nil>}}
// Non-EC retry policy: {MaximumNumberAttempts=8, MinSleepBetween=0, MaxSleepBetween=30, ExponentialBackoffBase=2, NonEventuallyConsistentPolicy=<nil>}
//
// No retry policy (expect immediate error):
// Service error: NotAuthorizedOrNotFound. Authorization failed or requested resource not found. http status code: 404.
// No retry policy (expect immediate error), elapsed less than three seconds? true
//
// Non-EC retry policy (expect immediate error):
// Setting retry policy: {MaximumNumberAttempts=8, MinSleepBetween=0, MaxSleepBetween=30, ExponentialBackoffBase=2, NonEventuallyConsistentPolicy=<nil>}
// Service error: NotAuthorizedOrNotFound. Authorization failed or requested resource not found. http status code: 404.
// Non-EC retry policy (expect immediate error), elapsed less than three seconds? true
//
// Default retry policy (expect long wait, then error):
// Setting retry policy: {MaximumNumberAttempts=9, MinSleepBetween=0, MaxSleepBetween=45, ExponentialBackoffBase=3.52, NonEventuallyConsistentPolicy={MaximumNumberAttempts=8, MinSleepBetween=0, MaxSleepBetween=30, ExponentialBackoffBase=2, NonEventuallyConsistentPolicy=<nil>}}
// Service error: NotAuthorizedOrNotFound. Authorization failed or requested resource not found. http status code: 404.
// Default retry policy (expect long wait, then error), elapsed about 4 minutes? true
//
// Default retry policy, but no more EC (end of window in the past? true) (expect immediate error):
// Setting retry policy: {MaximumNumberAttempts=9, MinSleepBetween=0, MaxSleepBetween=45, ExponentialBackoffBase=3.52, NonEventuallyConsistentPolicy={MaximumNumberAttempts=8, MinSleepBetween=0, MaxSleepBetween=30, ExponentialBackoffBase=2, NonEventuallyConsistentPolicy=<nil>}}
// Service error: NotAuthorizedOrNotFound. Authorization failed or requested resource not found. http status code: 404.
// Default retry policy, but no more EC (expect immediate error), elapsed less than three seconds? true
Example (EventuallyConsistentRetryBehavior_MakeEventuallyConsistentChange)

This example lets you test the behavior of eventual consistency across processes. This test makes an eventually consistent change, and Example_eventuallyConsistentRetryBehavior_RetryIfEventuallyConsistentChangeMade should retry if an eventually consistent change had been made. Note that this only works if the EC communication mode is set to 'file' by setting the OCI_GO_SDK_EC_CONFIG environment variable to "file,<timestampFile>"

// setup
ctx := context.Background()

compartmentID, _ := common.DefaultConfigProvider().TenancyOCID()

// this will set the eventually consistent timestamp, because the CreateGroup is eventually consistent and sets the timestamp
groupId := createGroup(ctx, compartmentID)
deleteGroup(ctx, groupId)

fmt.Printf("Eventually consistent change made\n")
Output:

Eventually consistent change made
Example (EventuallyConsistentRetryBehavior_RetryIfEventuallyConsistentChangeMade)

This example lets you test the behavior of eventual consistency across processes. This test makes an operation that should be retried only if there was an eventually consistent change made, which can be done using Example_eventuallyConsistentRetryBehavior_MakeEventuallyConsistentChange should retry if an eventually consistent change had been made. Note that this only works if the EC communication mode is set to 'file' by setting the OCI_GO_SDK_EC_CONFIG environment variable to "file,<timestampFile>" You should start this example within 30 seconds of running Example_eventuallyConsistentRetryBehavior_MakeEventuallyConsistentChange.

Since this test ONLY works if the EC communication mode is set to 'file', this test has been commented out. It is also a long-running test, typically taking about 4 minutes, and it needs to be run in coordination with Example_eventuallyConsistentRetryBehavior_MakeEventuallyConsistentChange. That's why the "Output:" line has been changed to prevent the example from automatically running as a test.

// setup
ctx := context.Background()

coreClient, clerr := core.NewComputeClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

// test

defaultRetryPolicy := common.DefaultRetryPolicy()
nonEcRetryPolicy := common.DefaultRetryPolicyWithoutEventualConsistency()

fmt.Printf("EC retry policy:     %v\n", defaultRetryPolicy)
fmt.Printf("Non-EC retry policy: %v\n", nonEcRetryPolicy)

// With the default retry policy, we do see retries, and this part takes a long time (about 4 minutes).
// These retries on 404-NotAuthorizedOrNotFound only happen because there was an eventually consistent
// operation in the recent past (CreateGroup).
fmt.Printf("\nDefault retry policy (expect long wait, then error):\n")
elapsed := getInstance(ctx, coreClient, missingInstanceOcid, &defaultRetryPolicy)
fmt.Printf("Default retry policy (expect long wait, then error), elapsed about 4 minutes? %v\n",
	getComparisonMessage(elapsed.String(), (time.Duration(209)*time.Second < elapsed) && (elapsed < time.Duration(250)*time.Second)))

// Output -- to enable this example as a test, change this line to "// Output:"
// EC retry policy:     {MaximumNumberAttempts=9, MinSleepBetween=0, MaxSleepBetween=45, ExponentialBackoffBase=3.52, NonEventuallyConsistentPolicy={MaximumNumberAttempts=8, MinSleepBetween=0, MaxSleepBetween=30, ExponentialBackoffBase=2, NonEventuallyConsistentPolicy=<nil>}}
// Non-EC retry policy: {MaximumNumberAttempts=8, MinSleepBetween=0, MaxSleepBetween=30, ExponentialBackoffBase=2, NonEventuallyConsistentPolicy=<nil>}
//
// Default retry policy (expect long wait, then error):
// Setting retry policy: {MaximumNumberAttempts=9, MinSleepBetween=0, MaxSleepBetween=45, ExponentialBackoffBase=3.52, NonEventuallyConsistentPolicy={MaximumNumberAttempts=8, MinSleepBetween=0, MaxSleepBetween=30, ExponentialBackoffBase=2, NonEventuallyConsistentPolicy=<nil>}}
// Service error: NotAuthorizedOrNotFound. Authorization failed or requested resource not found. http status code: 404.
// Default retry policy (expect long wait, then error), elapsed about 4 minutes? true
Example (EventuallyConsistentRetryBehavior_UnlimitedAttempts)

This example simulates the behaviors of retry strategies with respect to eventual consistency. The operation that is called that is eventually consistent is CreateGroup in the Identity service. After that, this example is making a number of GetInstance requests in the Compute service, which are guaranteed to fail with a 404-NotAuthorizedOrNotFound, because the OCID is not a real OCID of an instance. But it does simulate the behavior of the retries you would see if there were a replication delay due to the eventual consistency of the group. Instead of using the default retry strategy, which uses exponential backoff and a limited number of attempts, the retry strategy here uses unlimited attempts, but a limited amount of time.

Note: This is a long running example, it takes over 4 minutes. That's why the "Output:" line has been changed to prevent the example from automatically running as a test.

// setup
ctx := context.Background()

coreClient, clerr := core.NewComputeClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)
compartmentID, _ := common.DefaultConfigProvider().TenancyOCID()

// this will set the eventually consistent timestamp, because the CreateGroup is eventually consistent and sets the timestamp
groupId := createGroup(ctx, compartmentID)
deleteGroup(ctx, groupId)

// test

maximumCumulativeBackoff := time.Duration(2) * time.Minute

// retry unlimited number of times, up to two minutes
nonEcRetryPolicy := common.NewRetryPolicyWithOptions(
	common.ReplaceWithValuesFromRetryPolicy(common.DefaultRetryPolicyWithoutEventualConsistency()),
	common.WithUnlimitedAttempts(maximumCumulativeBackoff),
	common.WithShouldRetryOperation(func(r common.OCIOperationResponse) bool {
		durationSinceInitialAttempt := time.Since(r.InitialAttemptTime)
		tooLong := durationSinceInitialAttempt > maximumCumulativeBackoff
		return common.DefaultShouldRetryOperation(r) && !tooLong
	}),
	common.WithNextDuration(func(r common.OCIOperationResponse) time.Duration {
		return time.Duration(math.Pow(float64(2), float64(r.AttemptNumber-1))) * time.Second
	}),
)

ecRetryPolicy := common.EventuallyConsistentRetryPolicy(nonEcRetryPolicy)

fmt.Printf("EC retry policy    : %v\n", ecRetryPolicy)
fmt.Printf("Non-EC retry policy: %v\n", nonEcRetryPolicy)

// Without retry policy, we do not see retries
fmt.Printf("\nNo retry policy (expect immediate error):\n")
var elapsed = getInstance(ctx, coreClient, missingInstanceOcid, nil)
fmt.Printf("No retry policy (expect immediate error), elapsed less than three seconds? %v\n",
	getComparisonMessage(elapsed.String(), elapsed < time.Duration(3)*time.Second))

// With the non-EC retry policy, we do not see a retry, because it doesn't consider eventual consistency.
// Without eventual consistency, 404-NotAuthorizedOrNotFound are not retried.
fmt.Printf("\nNon-EC retry policy (expect immediate error):\n")
elapsed = getInstance(ctx, coreClient, missingInstanceOcid, &nonEcRetryPolicy)
fmt.Printf("Non-EC retry policy (expect immediate error), elapsed less than three seconds? %v\n",
	getComparisonMessage(elapsed.String(), elapsed < time.Duration(3)*time.Second))

// With the EC retry policy, we do see retries, and this part takes a long time (about 4 minutes).
// These retries on 404-NotAuthorizedOrNotFound only happen because there was an eventually consistent
// operation in the recent past (CreateGroup).
fmt.Printf("\nEC retry policy (expect long wait, then error):\n")
elapsed = getInstance(ctx, coreClient, missingInstanceOcid, &ecRetryPolicy)
fmt.Printf("EC retry policy (expect long wait, then error), elapsed about 4 minutes? %v\n",
	getComparisonMessage(elapsed.String(), (time.Duration(239)*time.Second < elapsed) && (elapsed < time.Duration(250)*time.Second)))

// We use the the EC retry policy again, but by now we're outside the eventually consistent window, so we don't see retries anymore.
fmt.Printf("\nEC retry policy, but no more EC (end of window in the past? %v) (expect immediate error):\n",
	getComparisonMessage(fmt.Sprintf("now=%v, eow=%v", time.Now(), common.EcContext.GetEndOfWindow()),
		time.Now().After(*common.EcContext.GetEndOfWindow())))
elapsed = getInstance(ctx, coreClient, missingInstanceOcid, &ecRetryPolicy)
fmt.Printf("EC retry policy, but no more EC (expect immediate error), elapsed less than three seconds? %v\n",
	getComparisonMessage(elapsed.String(), elapsed < time.Duration(3)*time.Second))

// Output -- to enable this example as a test, change this line to "// Output:"
// EC retry policy    : {MaximumNumberAttempts=9, MinSleepBetween=0, MaxSleepBetween=45, ExponentialBackoffBase=3.52, NonEventuallyConsistentPolicy={MaximumNumberAttempts=0, MinSleepBetween=0, MaxSleepBetween=30, ExponentialBackoffBase=2, NonEventuallyConsistentPolicy=<nil>}}
// Non-EC retry policy: {MaximumNumberAttempts=0, MinSleepBetween=0, MaxSleepBetween=30, ExponentialBackoffBase=2, NonEventuallyConsistentPolicy=<nil>}
//
// No retry policy (expect immediate error):
// Service error: NotAuthorizedOrNotFound. Authorization failed or requested resource not found. http status code: 404.
// No retry policy (expect immediate error), elapsed less than three seconds? true
//
// Non-EC retry policy (expect immediate error):
// Setting retry policy: {MaximumNumberAttempts=0, MinSleepBetween=0, MaxSleepBetween=30, ExponentialBackoffBase=2, NonEventuallyConsistentPolicy=<nil>}
// Service error: NotAuthorizedOrNotFound. Authorization failed or requested resource not found. http status code: 404.
// Non-EC retry policy (expect immediate error), elapsed less than three seconds? true
//
// EC retry policy (expect long wait, then error):
// Setting retry policy: {MaximumNumberAttempts=9, MinSleepBetween=0, MaxSleepBetween=45, ExponentialBackoffBase=3.52, NonEventuallyConsistentPolicy={MaximumNumberAttempts=0, MinSleepBetween=0, MaxSleepBetween=30, ExponentialBackoffBase=2, NonEventuallyConsistentPolicy=<nil>}}
// Service error: NotAuthorizedOrNotFound. Authorization failed or requested resource not found. http status code: 404.
// EC retry policy (expect long wait, then error), elapsed about 4 minutes? true
//
// EC retry policy, but no more EC (end of window in the past? true) (expect immediate error):
// Setting retry policy: {MaximumNumberAttempts=9, MinSleepBetween=0, MaxSleepBetween=45, ExponentialBackoffBase=3.52, NonEventuallyConsistentPolicy={MaximumNumberAttempts=0, MinSleepBetween=0, MaxSleepBetween=30, ExponentialBackoffBase=2, NonEventuallyConsistentPolicy=<nil>}}
// Service error: NotAuthorizedOrNotFound. Authorization failed or requested resource not found. http status code: 404.
// EC retry policy, but no more EC (expect immediate error), elapsed less than three seconds? true
Example (FreeformAndDefinedTag)

Example_freeformAndDefinedTag shows how to use freeform and defined tags

// create a tag namespace and two tags
identityClient, err := identity.NewIdentityClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)

ctx := context.Background()

tagNamespaceName := "GOSDKSampleTagNamespaceName_1"
tagNamespaceID := createTagNamespace(ctx, identityClient, common.String(tagNamespaceName))
fmt.Println("tag namespace created")

tagName := "GOSDKSampleTagName_1"
createTag(ctx, identityClient, tagNamespaceID, common.String(tagName))
fmt.Println("tag1 created")

tagName2 := "GOSDKSampleTagName_2"
createTag(ctx, identityClient, tagNamespaceID, common.String(tagName2))
fmt.Println("tag2 created")

// We can assign freeform and defined tags at resource creation time. Freeform tags are a dictionary of
// string-to-string, where the key is the tag name and the value is the tag value.
//
// Defined tags are a dictionary where the key is the tag namespace (string) and the value is another dictionary. In
// this second dictionary, the key is the tag name (string) and the value is the tag value. The tag names have to
// correspond to the name of a tag within the specified namespace (and the namespace must exist).
freeformTags := map[string]string{"free": "form", "another": "item"}
definedTags := map[string]map[string]interface{}{
	tagNamespaceName: {
		tagName:  "hello",
		tagName2: "world",
	},
}

coreClient, clerr := core.NewVirtualNetworkClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

// create a new VCN with tags
createVCNReq := core.CreateVcnRequest{
	CreateVcnDetails: core.CreateVcnDetails{
		CidrBlock:     common.String("10.0.0.0/16"),
		CompartmentId: helpers.CompartmentID(),
		DisplayName:   common.String("GOSDKSampleVCNName"),
		DnsLabel:      common.String("vcndns"),
		FreeformTags:  freeformTags,
		DefinedTags:   definedTags,
	},
}

resp, err := coreClient.CreateVcn(ctx, createVCNReq)

if err != nil && resp.RawResponse.StatusCode == 404 {
	// You may get a 404 if you create/reactivate a tag and try and use it straight away. If you have a delay/sleep between
	// creating the tag and then using it (or alternatively retry the 404) that may resolve the issue.
	time.Sleep(time.Second * 10)
	resp, err = coreClient.CreateVcn(ctx, createVCNReq)
}

helpers.FatalIfError(err)
fmt.Println("VCN created with tags")

// replace the tag
freeformTags = map[string]string{"total": "replaced"}

// update the tag value
definedTags[tagNamespaceName][tagName2] = "replaced"

// update the VCN with different tag values
updateVCNReq := core.UpdateVcnRequest{
	VcnId: resp.Id,
	UpdateVcnDetails: core.UpdateVcnDetails{
		FreeformTags: freeformTags,
		DefinedTags:  definedTags,
	},
}
_, err = coreClient.UpdateVcn(ctx, updateVCNReq)
helpers.FatalIfError(err)
fmt.Println("VCN tag updated")

// remove the tag from VCN
updateVCNReq.FreeformTags = nil
updateVCNReq.DefinedTags = nil
_, err = coreClient.UpdateVcn(ctx, updateVCNReq)
helpers.FatalIfError(err)
fmt.Println("VCN tag removed")

defer func() {
	request := core.DeleteVcnRequest{
		VcnId: resp.Id,
	}

	_, err = coreClient.DeleteVcn(ctx, request)
	helpers.FatalIfError(err)
	fmt.Println("VCN deleted")
}()
Output:

tag namespace created
tag1 created
tag2 created
VCN created with tags
VCN tag updated
VCN tag removed
VCN deleted
Example (FunctionInvoke)
	SETUP:

	This test requires that you have a [DEFAULT] OCI user profile setup e.g. in ~/.oci/config
	the DEFAULT user will be used in these tests, so any variables supplied must be compatible with that user

	This test requires 4 environment variables to be set:
	for these environment variables, see example/example_test.go {
		OCI_COMPARTMENT_ID
		OCI_AVAILABILITY_DOMAIN
		OCI_ROOT_COMPARTMENT_ID
    }
	OCI_FN_IMAGE : The URI of a publicly available image in the Oracle Cloud Infrastructure Registry (OCIR) e.g. phx.ocir.io/<tenancy-name>/<directory>/<image-name>:<image-tag>

	RUN:
	To run this test/example run:
	go test github.com/oracle/oci-go-sdk/example -run Example_functionInvoke
// Example code for functions API

package main

import (
	"bytes"
	"context"
	"fmt"
	"io/ioutil"
	"net/http"
	"os"
	"reflect"

	"github.com/oracle/oci-go-sdk/v65/common"
	"github.com/oracle/oci-go-sdk/v65/core"
	"github.com/oracle/oci-go-sdk/v65/example/helpers"
	"github.com/oracle/oci-go-sdk/v65/functions"
)

var (
	fnImage       string
	gwDisplayName = "OCI-GOSDK-Sample-Gateway"
	rtDisplyName  = "Default Route Table for OCI-GOSDK-Sample-VCN"
)

/*
		SETUP:

		This test requires that you have a [DEFAULT] OCI user profile setup e.g. in ~/.oci/config
		the DEFAULT user will be used in these tests, so any variables supplied must be compatible with that user

		This test requires 4 environment variables to be set:
		for these environment variables, see example/example_test.go {
			OCI_COMPARTMENT_ID
			OCI_AVAILABILITY_DOMAIN
			OCI_ROOT_COMPARTMENT_ID
	    }
		OCI_FN_IMAGE : The URI of a publicly available image in the Oracle Cloud Infrastructure Registry (OCIR) e.g. phx.ocir.io/<tenancy-name>/<directory>/<image-name>:<image-tag>

		RUN:
		To run this test/example run:
		go test github.com/oracle/oci-go-sdk/example -run Example_functionInvoke
*/
func main() {
	managementClient, err := functions.NewFunctionsManagementClientWithConfigurationProvider(common.DefaultConfigProvider())
	helpers.FatalIfError(err)
	fnImage = os.Getenv("OCI_FN_IMAGE")

	ctx := context.Background()
	subnetID := createOrGetNetworkInfrastructure()
	// A subnet is required to expose and be able invoke functions.
	// In multiple AD regions, subnets can be created in multiple ADs to provide redundancy.
	fmt.Println("Network Layer Created")
	// An application's name must be unique per-compartment
	appName := "Example-Go-SDK-App"
	// A function's name must be unique per-application
	fnName := "Example-Go-SDK-Fn"
	// We must specify which compartment we want to create our Application in
	compartmentID := helpers.CompartmentID()

	createdApp := createApplication(ctx, managementClient, appName, compartmentID, []string{*subnetID})
	fmt.Println("Application Created:", *createdApp.DisplayName)

	gotApp := getReadyApplication(ctx, managementClient, createdApp.Id)
	fmt.Println("Application Got:", *gotApp.DisplayName)

	listedApps := listApplications(ctx, managementClient, compartmentID)
	fmt.Println("Applications Listed:", *listedApps[0].DisplayName)

	createdFn := createFunction(ctx, managementClient, fnName, createdApp.Id)
	fmt.Println("Function Created:", *createdFn.DisplayName)

	gotFn := getReadyFunction(ctx, managementClient, createdFn.Id)
	fmt.Println("Function Got:", *gotFn.DisplayName)

	listedFns := listFunctions(ctx, managementClient, createdApp.Id)
	fmt.Println("Functions Listed:", *listedFns[0].DisplayName)

	invokeClient, err := functions.NewFunctionsInvokeClientWithConfigurationProvider(common.DefaultConfigProvider(), *createdFn.InvokeEndpoint)
	helpers.FatalIfError(err)

	invokeFunction(ctx, invokeClient, createdFn.Id)

	fmt.Println("Function invoked")

	deleteFunction(ctx, managementClient, createdFn.Id)
	fmt.Println("Function Deleted:", *createdFn.DisplayName)

	deleteApplication(ctx, managementClient, createdApp.Id)
	fmt.Println("Application Deleted:", *createdApp.DisplayName)

}

func createApplication(ctx context.Context, client functions.FunctionsManagementClient, appName string, compartmentID *string, subnetIDs []string) functions.Application {
	details := functions.CreateApplicationDetails{
		CompartmentId: compartmentID,
		DisplayName:   &appName,
		SubnetIds:     subnetIDs,
	}

	request := functions.CreateApplicationRequest{CreateApplicationDetails: details}

	response, err := client.CreateApplication(ctx, request)
	helpers.FatalIfError(err)

	return response.Application
}

// Gets an application, if that application is not ready polls until the application is ready
func getReadyApplication(ctx context.Context, client functions.FunctionsManagementClient, appID *string) (app functions.Application) {
	metaWithRetry := helpers.GetRequestMetadataWithCustomizedRetryPolicy(shouldRetryGetApplication)

	request := functions.GetApplicationRequest{
		ApplicationId:   appID,
		RequestMetadata: metaWithRetry,
	}

	response, err := client.GetApplication(ctx, request)
	helpers.FatalIfError(err)

	return response.Application
}

func listApplications(ctx context.Context, client functions.FunctionsManagementClient, compartmentID *string) []functions.ApplicationSummary {
	request := functions.ListApplicationsRequest{CompartmentId: compartmentID}
	response, err := client.ListApplications(ctx, request)
	helpers.FatalIfError(err)
	return response.Items
}

func deleteApplication(ctx context.Context, client functions.FunctionsManagementClient, appID *string) {
	request := functions.DeleteApplicationRequest{ApplicationId: appID}

	_, err := client.DeleteApplication(ctx, request)
	helpers.FatalIfError(err)
	return
}

func createFunction(ctx context.Context, client functions.FunctionsManagementClient, fnName string, appID *string) functions.Function {
	memory := int64(128)
	details := functions.CreateFunctionDetails{
		DisplayName:   &fnName,
		ApplicationId: appID,
		Image:         &fnImage,
		MemoryInMBs:   &memory,
	}

	request := functions.CreateFunctionRequest{CreateFunctionDetails: details}

	response, err := client.CreateFunction(ctx, request)
	helpers.FatalIfError(err)

	return response.Function
}

func getReadyFunction(ctx context.Context, client functions.FunctionsManagementClient, fnID *string) functions.Function {
	metaWithRetry := helpers.GetRequestMetadataWithCustomizedRetryPolicy(shouldRetryGetFunction)

	request := functions.GetFunctionRequest{
		FunctionId:      fnID,
		RequestMetadata: metaWithRetry,
	}

	response, err := client.GetFunction(ctx, request)
	helpers.FatalIfError(err)

	return response.Function
}

func listFunctions(ctx context.Context, client functions.FunctionsManagementClient, appID *string) []functions.FunctionSummary {
	request := functions.ListFunctionsRequest{ApplicationId: appID}

	response, err := client.ListFunctions(ctx, request)
	helpers.FatalIfError(err)

	return response.Items
}

func invokeFunction(ctx context.Context, client functions.FunctionsInvokeClient, fnID *string) *string {
	// Retry function invocation with a standard back-off if we get a 404 in response.
	// This is in case the function creation has not yet completed by the time invocation is attempted
	metaWithRetry := helpers.GetRequestMetadataWithCustomizedRetryPolicy(shouldRetryInvokeFunction)
	requestBody := ioutil.NopCloser(bytes.NewReader([]byte("")))
	request := functions.InvokeFunctionRequest{
		FunctionId:         fnID,
		InvokeFunctionBody: requestBody,
		RequestMetadata:    metaWithRetry,
	}

	response, err := client.InvokeFunction(ctx, request)
	if err != nil {
		fmt.Println("Invoke Error:", err)
		return nil
	}
	resp := response.RawResponse
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		fmt.Println("Invoke Failed:", resp.StatusCode)
		return nil
	}

	bodyBytes, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		fmt.Println("Could not read invoke body:", err)
	}
	responseBody := string(bodyBytes)
	return &responseBody
}

func deleteFunction(ctx context.Context, client functions.FunctionsManagementClient, fnID *string) {
	request := functions.DeleteFunctionRequest{FunctionId: fnID}

	_, err := client.DeleteFunction(ctx, request)
	helpers.FatalIfError(err)
	return
}

func createOrGetNetworkInfrastructure() *string {
	c, err := core.NewVirtualNetworkClientWithConfigurationProvider(common.DefaultConfigProvider())
	if err != nil {
		fmt.Println("Network client request error:", err)
	}
	sn := CreateOrGetSubnet()
	gw := createOrGetInternetGateway(c, sn.VcnId)
	createOrGetRouteTable(c, gw.Id, sn.VcnId)

	return sn.Id
}

func createOrGetInternetGateway(c core.VirtualNetworkClient, vcnID *string) (gateway core.InternetGateway) {
	ctx := context.Background()

	//List Gateways
	listGWRequest := core.ListInternetGatewaysRequest{
		CompartmentId: helpers.CompartmentID(),
		VcnId:         vcnID,
		DisplayName:   &gwDisplayName,
	}

	listGWRespone, err := c.ListInternetGateways(ctx, listGWRequest)
	if err != nil {
		fmt.Println("Internet gateway list error:", err)
	}

	if len(listGWRespone.Items) >= 1 {
		//Gateway with name already exists
		gateway = listGWRespone.Items[0]
	} else {
		//Create new Gateway
		enabled := true
		createGWDetails := core.CreateInternetGatewayDetails{
			CompartmentId: helpers.CompartmentID(),
			IsEnabled:     &enabled,
			VcnId:         vcnID,
			DisplayName:   &gwDisplayName,
		}

		createGWRequest := core.CreateInternetGatewayRequest{CreateInternetGatewayDetails: createGWDetails}

		createGWResponse, err := c.CreateInternetGateway(ctx, createGWRequest)
		if err != nil {
			fmt.Println("Internet gateway create error:", err)
		}
		gateway = createGWResponse.InternetGateway
	}
	return
}

func createOrGetRouteTable(c core.VirtualNetworkClient, gatewayID, VcnID *string) (routeTable core.RouteTable) {
	ctx := context.Background()

	//List Route Table
	listRTRequest := core.ListRouteTablesRequest{
		CompartmentId: helpers.CompartmentID(),
		VcnId:         VcnID,
		DisplayName:   &rtDisplyName,
	}

	listRTResponse, err := c.ListRouteTables(ctx, listRTRequest)
	if err != nil {
		fmt.Println("Route table list error", err)
	}

	cidrRange := "0.0.0.0/0"
	rr := core.RouteRule{
		NetworkEntityId: gatewayID,
		Destination:     &cidrRange,
		DestinationType: core.RouteRuleDestinationTypeCidrBlock,
	}

	if len(listRTResponse.Items) >= 1 {

		//Default Route Table found and has at least 1 route rule
		if len(listRTResponse.Items[0].RouteRules) >= 1 {
			routeTable = listRTResponse.Items[0]
			//Default Route table needs route rule adding
		} else {

			updateRTDetails := core.UpdateRouteTableDetails{
				RouteRules: []core.RouteRule{rr},
			}

			updateRTRequest := core.UpdateRouteTableRequest{
				RtId:                    listRTResponse.Items[0].Id,
				UpdateRouteTableDetails: updateRTDetails,
			}

			updateRTResponse, err := c.UpdateRouteTable(ctx, updateRTRequest)
			if err != nil {
				fmt.Println("Error updating route table:", err)
			}
			routeTable = updateRTResponse.RouteTable
		}

	} else {
		//No default route table found
		fmt.Println("Error could not find VCN default route table, VCN OCID:", *VcnID, "Could not find route table:", rtDisplyName)
	}
	return
}

func shouldRetryGetApplication(response common.OCIOperationResponse) bool {
	createResponse, correctType := response.Response.(functions.GetApplicationResponse)
	if !correctType {
		fmt.Println("Retry attempt used incompatible response type, expected GetApplicationResponse, found:", reflect.TypeOf(response.Response))
	}
	if createResponse.LifecycleState != functions.ApplicationLifecycleStateActive {
		return true
	}
	return false
}

func shouldRetryGetFunction(response common.OCIOperationResponse) bool {
	createResponse, correctType := response.Response.(functions.GetFunctionResponse)
	if !correctType {
		fmt.Println("Retry attempt used incompatible response type, expected GetFunctionResponse, found:", reflect.TypeOf(response.Response))
	}
	if createResponse.LifecycleState != functions.FunctionLifecycleStateActive {
		return true
	}
	return false
}

func shouldRetryInvokeFunction(response common.OCIOperationResponse) bool {
	invokeResponse, correctType := response.Response.(functions.InvokeFunctionResponse)
	if !correctType {
		fmt.Println("Retry attempt used incompatible response type, expected InvokeFunctionResponse, found:", reflect.TypeOf(response.Response))
	}
	if invokeResponse.RawResponse.StatusCode == 404 {
		return true
	}
	return false
}
Output:

Network Layer Created
Application Created: Example-Go-SDK-App
Application Got: Example-Go-SDK-App
Applications Listed: Example-Go-SDK-App
Function Created: Example-Go-SDK-Fn
Function Got: Example-Go-SDK-Fn
Functions Listed: Example-Go-SDK-Fn
Function invoked
Function Deleted: Example-Go-SDK-Fn
Application Deleted: Example-Go-SDK-App
Example (GetBackupDestination)
c, clerr := database.NewDatabaseClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

getbackupDestinationReq := database.GetBackupDestinationRequest{
	BackupDestinationId: common.String("backup-destination-ocid"),
}

_, err := c.GetBackupDestination(context.Background(), getbackupDestinationReq)
helpers.FatalIfError(err)

fmt.Println("get backup destination is successful")
Output:

get backup destination is successful
Example (GetDataset)

Example_getDataset to get dataset details from a given dataset Id.

datasetId := "REPLACE_WITH_DATASET_OCID"

client, err := datalabelingservice.NewDataLabelingManagementClientWithConfigurationProvider(common.DefaultConfigProvider())
if err != nil {
	panic(err)
}

fmt.Println("Getting dataset")
getDatasetRequest := datalabelingservice.GetDatasetRequest{
	DatasetId: common.String(datasetId),
}

// Send the request using the service client
_, datasetErr := client.GetDataset(context.Background(), getDatasetRequest)
helpers.FatalIfError(datasetErr)

fmt.Println("Done.")
Example (GetRecords)
recordId := "REPLACE_WITH_RECORD_OCID"

client, err := datalabelingservicedataplane.NewDataLabelingClientWithConfigurationProvider(common.DefaultConfigProvider())
if err != nil {
	panic(err)
}
fmt.Println("Getting record.")

// getRecordRequest creating a request body
getRecordRequest := datalabelingservicedataplane.GetRecordRequest{
	RecordId:        common.String(recordId),
	OpcRequestId:    nil,
	RequestMetadata: common.RequestMetadata{},
}

// Send the request using the service client
_, recordErr := client.GetRecord(context.Background(), getRecordRequest)
helpers.FatalIfError(recordErr)

fmt.Println("Get record succeeded.")
fmt.Println("Done")
Output:

Getting record.
Get record succeeded.
Done
Example (HealthChecksHttpSamples)
ctx := context.Background()

// Initialize default config provider
configProvider := common.DefaultConfigProvider()
if ok, err := common.IsConfigurationProviderValid(configProvider); !ok {
	panic(err)
}

client, clerr := healthchecks.NewHealthChecksClientWithConfigurationProvider(configProvider)
helpers.FatalIfError(clerr)

compartmentId, _ := configProvider.TenancyOCID()

_ = exampleListVantagePoints(ctx, client)
//fmt.Println(vantagePoints)
fmt.Println("List Vantage Points")

httpMonitor := exampleCreateHttpMonitor(ctx, client, &compartmentId)
fmt.Println("Create Monitor")
httpMonitor = exampleUpdateHttpMonitor(ctx, client, httpMonitor.Id)
fmt.Println("Update Monitor")
//fmt.Println(httpMonitor)

_ = exampleListHttpMonitorResults(ctx, client, httpMonitor.Id)
fmt.Println("Retrieved Results")

// We need a different compartment to run this.
// exampleMoveCompartmentHttpMonitor(ctx, client, httpMonitor.Id, &compartmentId)

exampleDeleteHttpMonitor(ctx, client, httpMonitor.Id)
fmt.Println("Deleted Monitor")
Output:

List Vantage Points
Create Monitor
Update Monitor
Retrieved Results
Deleted Monitor
Example (HealthChecksPingSamples)
ctx := context.Background()

// Initialize default config provider
configProvider := common.DefaultConfigProvider()
if ok, err := common.IsConfigurationProviderValid(configProvider); !ok {
	panic(err)
}

client, clerr := healthchecks.NewHealthChecksClientWithConfigurationProvider(configProvider)
helpers.FatalIfError(clerr)

compartmentId, _ := configProvider.TenancyOCID()

_ = exampleListVantagePoints(ctx, client)
//fmt.Println(vantagePoints)
fmt.Println("List Vantage Points")

pingMonitor := exampleCreatePingMonitor(ctx, client, &compartmentId)
fmt.Println("Create Monitor")
pingMonitor = exampleUpdatePingMonitor(ctx, client, pingMonitor.Id)
fmt.Println("Update Monitor")
//fmt.Println(pingMonitor)

_ = exampleListPingMonitorResults(ctx, client, pingMonitor.Id)
fmt.Println("Retrieved Results")

// We need a different compartment to run this.
// exampleMoveCompartmentPingMonitor(ctx, client, pingMonitor.Id, &compartmentId)

exampleDeletePingMonitor(ctx, client, pingMonitor.Id)
fmt.Println("Deleted Monitor")
Output:

List Vantage Points
Create Monitor
Update Monitor
Retrieved Results
Deleted Monitor
Example (InstancePrincipals)
provider, err := auth.InstancePrincipalConfigurationProvider()
helpers.FatalIfError(err)

tenancyID := helpers.RootCompartmentID()
request := identity.ListAvailabilityDomainsRequest{
	CompartmentId: tenancyID,
}

client, err := identity.NewIdentityClientWithConfigurationProvider(provider)
// Override the region, this is an optional step.
// the InstancePrincipalsConfigurationProvider defaults to the region
// in which the compute instance is currently running
client.SetRegion(string(common.RegionLHR))

r, err := client.ListAvailabilityDomains(context.Background(), request)
helpers.FatalIfError(err)

log.Printf("list of available domains: %v", r.Items)
fmt.Println("Done")
Output:

Done
Example (InstancePrincipalsWithCustomClient)

Example_instancePrincipalsWithCustomClient lists the availability domains in your tenancy. Similar to the example above, this example shows how to customize the client.

// Just load the system cert pool for demonstration purposes.
rootCaPool, err := x509.SystemCertPool()

helpers.FatalIfError(err)

provider, err := auth.InstancePrincipalConfigurationProviderWithCustomClient(func(dispatcher common.HTTPRequestDispatcher) (common.HTTPRequestDispatcher, error) {
	client := dispatcher.(*http.Client)
	client.Transport = &http.Transport{
		TLSClientConfig: &tls.Config{
			RootCAs: rootCaPool,
		},
	}
	return client, nil
})

tenancyID := helpers.RootCompartmentID()
request := identity.ListAvailabilityDomainsRequest{
	CompartmentId: tenancyID,
}

client, err := identity.NewIdentityClientWithConfigurationProvider(provider)

r, err := client.ListAvailabilityDomains(context.Background(), request)
helpers.FatalIfError(err)

log.Printf("list of available domains: %v", r.Items)
fmt.Println("Done")
Example (KeyOperations)

ExampleKeyManagement_KeyOperations shows how to create, enable and disable a KMS key

vaultClient, clientError := keymanagement.NewKmsVaultClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clientError)

ctx := context.Background()
vaultName := "KmsVault"
keyName := "KmsKey"
updatedKeyName := "UpdatedKmsKey"

vault := createVault(ctx, vaultClient, vaultName)
defer cleanupResources(ctx, vaultClient, vault.Id)
// wait for instance lifecycle state becomes active
waitForStateVaultClient(ctx, vault.Id, vaultClient, keymanagement.VaultLifecycleStateActive)

vaultManagementClient, mgmtClientError := keymanagement.
	NewKmsManagementClientWithConfigurationProvider(common.DefaultConfigProvider(), *vault.ManagementEndpoint)
helpers.FatalIfError(mgmtClientError)

// Create Key
key, _ := createKey(ctx, vaultManagementClient, &keyName)

// Disable Key
disableRequest := keymanagement.DisableKeyRequest{
	KeyId: key.Id,
}

disableResponse, disableErr := vaultManagementClient.DisableKey(ctx, disableRequest)
helpers.FatalIfError(disableErr)
key = disableResponse.Key
// Wait for key to be in Disabled state
waitForStateVaultManagementClient(ctx, key.Id, vaultManagementClient, keymanagement.KeyLifecycleStateDisabled)

fmt.Println("disable key")

// Enable Key
enableRequest := keymanagement.EnableKeyRequest{
	KeyId: key.Id,
}

enableResponse, enableErr := vaultManagementClient.EnableKey(ctx, enableRequest)
helpers.FatalIfError(enableErr)
key = enableResponse.Key
// Wait for key to be in Enabled state
waitForStateVaultManagementClient(ctx, key.Id, vaultManagementClient, keymanagement.KeyLifecycleStateEnabled)

fmt.Println("enable key")

// Schedule Key Deletion
scheduleKeyDeletionRequest := keymanagement.ScheduleKeyDeletionRequest{
	KeyId: key.Id,
}

scheduleKeyDeletionResponse, scheduleKeyDeletionErr := vaultManagementClient.ScheduleKeyDeletion(ctx, scheduleKeyDeletionRequest)
helpers.FatalIfError(scheduleKeyDeletionErr)
key = scheduleKeyDeletionResponse.Key
// Wait for key to be in PendingDeletion state
waitForStateVaultManagementClient(ctx, key.Id, vaultManagementClient, keymanagement.KeyLifecycleStatePendingDeletion)

fmt.Println("schedule key deletion")

// Cancel Key Deletion
cancelKeyDeletionRequest := keymanagement.CancelKeyDeletionRequest{
	KeyId: key.Id,
}

cancelKeyDeletionResponse, cancelKeyDeletionErr := vaultManagementClient.CancelKeyDeletion(ctx, cancelKeyDeletionRequest)
helpers.FatalIfError(cancelKeyDeletionErr)
key = cancelKeyDeletionResponse.Key
// Wait for key to be in Enabled state
waitForStateVaultManagementClient(ctx, key.Id, vaultManagementClient, keymanagement.KeyLifecycleStateEnabled)

fmt.Println("cancel scheduled key deletion")

// Update Key
updateKeyDetails := keymanagement.UpdateKeyDetails{
	DisplayName: &updatedKeyName,
}
updateKeyRequest := keymanagement.UpdateKeyRequest{
	KeyId:            key.Id,
	UpdateKeyDetails: updateKeyDetails,
}

updateResponse, updateErr := vaultManagementClient.UpdateKey(ctx, updateKeyRequest)
helpers.FatalIfError(updateErr)
key = updateResponse.Key

fmt.Println("update key")

// Move to root compartment
changeKeyCompartment(ctx, vaultManagementClient, helpers.RootCompartmentID(), key.Id)
waitForStateVaultManagementClient(ctx, key.Id, vaultManagementClient, keymanagement.KeyLifecycleStateEnabled)
Output:

create vault
create key
disable key
enable key
schedule key deletion
cancel scheduled key deletion
update key
change key compartment
schedule vault deletion
Example (KubeConfig)
ctx := context.Background()
c, clerr := containerengine.NewContainerEngineClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

clusterID := common.String("[YOUR CLUSTER ID]")
req := containerengine.CreateKubeconfigRequest{
	ClusterId: clusterID,
}

_, err := c.CreateKubeconfig(ctx, req)
helpers.FatalIfError(err)
fmt.Println("create kubeconfig")
Output:

create kubeconfig
Example (LaunchInstance)

Example_launchInstance does create an instance NOTE: launch instance will create a new instance and VCN. please make sure delete the instance after execute this sample code, otherwise, you will be charged for the running instance

c, err := core.NewComputeClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)
ctx := context.Background()

// create the launch instance request
request := core.LaunchInstanceRequest{}
request.CompartmentId = helpers.CompartmentID()
request.DisplayName = common.String("OCI-Sample-Instance")
request.AvailabilityDomain = helpers.AvailabilityDomain()

// create a subnet or get the one already created
subnet := CreateOrGetSubnet()
fmt.Println("subnet created")
request.CreateVnicDetails = &core.CreateVnicDetails{SubnetId: subnet.Id}

// get a image
image := listImages(ctx, c)[0]
fmt.Println("list images")
request.SourceDetails = core.InstanceSourceViaImageDetails{ImageId: image.Id}

// use VM.Standard2.1 to create instance
request.Shape = common.String(instanceShape)

// default retry policy will retry on non-200 response
request.RequestMetadata = helpers.GetRequestMetadataWithDefaultRetryPolicy()

createResp, err := c.LaunchInstance(ctx, request)
helpers.FatalIfError(err)

fmt.Println("launching instance")

// should retry condition check which returns a bool value indicating whether to do retry or not
// it checks the lifecycle status equals to Running or not for this case
shouldRetryFunc := func(r common.OCIOperationResponse) bool {
	if converted, ok := r.Response.(core.GetInstanceResponse); ok {
		return converted.LifecycleState != core.InstanceLifecycleStateRunning
	}
	return true
}

// create get instance request with a retry policy which takes a function
// to determine shouldRetry or not
pollingGetRequest := core.GetInstanceRequest{
	InstanceId:      createResp.Instance.Id,
	RequestMetadata: helpers.GetRequestMetadataWithCustomizedRetryPolicy(shouldRetryFunc),
}

instance, pollError := c.GetInstance(ctx, pollingGetRequest)
helpers.FatalIfError(pollError)

fmt.Println("instance launched")

attachVnicResponse, err := c.AttachVnic(context.Background(), core.AttachVnicRequest{
	AttachVnicDetails: core.AttachVnicDetails{
		CreateVnicDetails: &core.CreateVnicDetails{
			SubnetId:       subnet.Id,
			AssignPublicIp: common.Bool(true),
		},
		InstanceId: instance.Id,
	},
})

helpers.FatalIfError(err)
fmt.Println("vnic attached")

vnicState := attachVnicResponse.VnicAttachment.LifecycleState
for vnicState != core.VnicAttachmentLifecycleStateAttached {
	time.Sleep(15 * time.Second)
	getVnicAttachmentRequest, err := c.GetVnicAttachment(context.Background(), core.GetVnicAttachmentRequest{
		VnicAttachmentId: attachVnicResponse.Id,
	})
	helpers.FatalIfError(err)
	vnicState = getVnicAttachmentRequest.VnicAttachment.LifecycleState
}

_, err = c.DetachVnic(context.Background(), core.DetachVnicRequest{
	VnicAttachmentId: attachVnicResponse.Id,
})

helpers.FatalIfError(err)
fmt.Println("vnic dettached")

defer func() {
	terminateInstance(ctx, c, createResp.Id)

	client, clerr := core.NewVirtualNetworkClientWithConfigurationProvider(common.DefaultConfigProvider())
	helpers.FatalIfError(clerr)

	vcnID := subnet.VcnId
	deleteSubnet(ctx, client, subnet.Id)
	deleteVcn(ctx, client, vcnID)
}()
Output:

subnet created
list images
list shapes
launching instance
instance launched
vnic attached
vnic dettached
terminating instance
instance terminated
deleteing subnet
subnet deleted
deleteing VCN
VCN deleted
Example (ListAvailabilityDomains)

Example_listAvailabilityDomains Lists the Availability Domains in your tenancy. Specify the OCID of either the tenancy or another of your compartments as the value for the compartment ID (remember that the tenancy is simply the root compartment).

c, err := identity.NewIdentityClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)

// The OCID of the tenancy containing the compartment.
tenancyID, err := common.DefaultConfigProvider().TenancyOCID()
helpers.FatalIfError(err)

request := identity.ListAvailabilityDomainsRequest{
	CompartmentId: &tenancyID,
}

r, err := c.ListAvailabilityDomains(context.Background(), request)
helpers.FatalIfError(err)

log.Printf("list of available domains: %v", r.Items)
fmt.Println("list available domains completed")
Output:

list available domains completed
Example (ListDataset)

Example_listDataset is to list all dataset in a given compartment Id.

compartment := "REPLACE_WITH_COMPARTMENT_OCID"

client, err := datalabelingservice.NewDataLabelingManagementClientWithConfigurationProvider(common.DefaultConfigProvider())
if err != nil {
	panic(err)
}

fmt.Println("Listing all datasets")

req := datalabelingservice.ListDatasetsRequest{
	CompartmentId:  common.String(compartment),
	Limit:          common.Int(500),
	LifecycleState: datalabelingservice.DatasetLifecycleStateActive,
}

// Send the request using the service client
_, datasetErr := client.ListDatasets(context.Background(), req)
helpers.FatalIfError(datasetErr)
fmt.Println("Listing datasets completed")
Example (ListEvents)
c, clerr := audit.NewAuditClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

// list events for last 5 hour
req := audit.ListEventsRequest{
	CompartmentId: helpers.CompartmentID(),
	StartTime:     &common.SDKTime{time.Now().Add(time.Hour * -5)},
	EndTime:       &common.SDKTime{time.Now()},
}

_, err := c.ListEvents(context.Background(), req)
helpers.FatalIfError(err)

//log.Printf("events returned back: %v", resp.Items)
fmt.Println("list events completed")
Output:

list events completed
Example (ListGroupsWithCustomSignedHeader)

Example_listGroupsWithCustomSignedHeader Lists groups by passing a custom signed header in the request

provider := common.DefaultConfigProvider()
c, err := identity.NewIdentityClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)

//Bear in mind that services expect well known headers to be signed. Signing arbitrary headers
//might lead to authentication errors
customHeader := "opc-my-token"
allHeaders := append(common.DefaultGenericHeaders(), customHeader)
c.Signer = common.RequestSigner(provider, allHeaders, common.DefaultBodyHeaders())
c.Interceptor = func(request *http.Request) error {
	request.Header.Add(customHeader, "customvalue")
	return nil
}

// The OCID of the tenancy containing the compartment.
tenancyID, _ := provider.TenancyOCID()
request := identity.ListGroupsRequest{
	CompartmentId: common.String(tenancyID),
}
r, err := c.ListGroups(context.Background(), request)
helpers.FatalIfError(err)

log.Printf("list groups completed: %v", r.Items)
fmt.Println("list groups completed")
Output:

list groups completed
Example (ListRecords)
datasetId := "REPLACE_WITH_DATASET_OCID"
compartment := "REPLACE_WITH_COMPARTMENT_OCID"

client, err := datalabelingservicedataplane.NewDataLabelingClientWithConfigurationProvider(common.DefaultConfigProvider())
if err != nil {
	panic(err)
}
fmt.Println("Listing records.")

listRecordsRequest := datalabelingservicedataplane.ListRecordsRequest{
	CompartmentId: common.String(compartment),
	DatasetId:     common.String(datasetId),
	Limit:         common.Int(500),
	SortBy:        datalabelingservicedataplane.ListRecordsSortByTimecreated,
	SortOrder:     datalabelingservicedataplane.ListRecordsSortOrderDesc,
}

// Send the request using the service client
_, recordErr := client.ListRecords(context.Background(), listRecordsRequest)
helpers.FatalIfError(recordErr)

fmt.Println("Record listing succeeded.")
fmt.Println("Done")
Output:

Listing records.
Record listing succeeded.
Done
Example (ListShapes_Pagination)

Example_listShapes_Pagination demostrate how to use page parameter

c, err := core.NewComputeClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)

request := core.ListShapesRequest{
	CompartmentId: helpers.CompartmentID(),
}

// to show how pagination works, reduce number of items to return in a paginated "List" call
request.Limit = common.Int(2)

listShapesFunc := func(request core.ListShapesRequest) (core.ListShapesResponse, error) {
	return c.ListShapes(context.Background(), request)
}

for r, err := listShapesFunc(request); ; r, err = listShapesFunc(request) {
	helpers.FatalIfError(err)

	log.Printf("list shapes returns: %v", r.Items)

	if r.OpcNextPage != nil {
		// if there are more items in next page, fetch items from next page
		request.Page = r.OpcNextPage
	} else {
		// no more result, break the loop
		break
	}
}

fmt.Println("list shapes completed")
Output:

list shapes completed
Example (ListUsers_RawRequest)

ExampleRawRequest compose a request, sign it and send to server

// build the url
url := "https://identity.us-phoenix-1.oraclecloud.com/20160918/users/?compartmentId=" + *helpers.RootCompartmentID()

// create request
request, err := http.NewRequest("GET", url, nil)
helpers.FatalIfError(err)

// Set the Date header
request.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat))

// And a provider of cryptographic keys
provider := common.DefaultConfigProvider()

// Build the signer
signer := common.DefaultRequestSigner(provider)

// Sign the request
signer.Sign(request)

client := http.Client{}

fmt.Println("send request")

// Execute the request
resp, err := client.Do(request)
helpers.FatalIfError(err)

defer resp.Body.Close()

log.Println("response Status:", resp.Status)
log.Println("response Headers:", resp.Header)

body, _ := ioutil.ReadAll(resp.Body)
log.Println("response Body:", string(body))

fmt.Println("receive response")
Output:

send request
receive response
Example (MoveCompartment)

Example_moveCompartment Moves an active compartment under a different parent

// Example code for Compartments Service API
// This script provides an example on how to move a compartment to a different compartment
// This script will
//
//   * create cp_source_GOSDK under tenancy
//   * create cp_target_GOSDK under tenancy
//   * move cp_source_GOSDK under cp_target_GOSDK

package main

import (
	"context"
	"fmt"
	"log"

	"github.com/oracle/oci-go-sdk/v65/common"
	"github.com/oracle/oci-go-sdk/v65/example/helpers"
	"github.com/oracle/oci-go-sdk/v65/identity"
)

// Example_moveCompartment Moves an active compartment under a different parent
func main() {
	c, err := identity.NewIdentityClientWithConfigurationProvider(common.DefaultConfigProvider())
	helpers.FatalIfError(err)

	// The OCID of the tenancy containing the compartment.
	tenancyID, err := common.DefaultConfigProvider().TenancyOCID()
	helpers.FatalIfError(err)

	ctx := context.Background()
	cpSource := createCompartment(ctx, c, common.String(tenancyID), common.String("cp_source_GOSDK"))
	cpTarget := createCompartment(ctx, c, common.String(tenancyID), common.String("cp_target_GOSDK"))

	moveDetail := identity.MoveCompartmentDetails{
		TargetCompartmentId: cpTarget,
	}

	moveRequest := identity.MoveCompartmentRequest{
		CompartmentId:          cpSource,
		MoveCompartmentDetails: moveDetail,
		RequestMetadata:        helpers.GetRequestMetadataWithDefaultRetryPolicy(),
	}

	resp, err := c.MoveCompartment(ctx, moveRequest)
	helpers.FatalIfError(err)
	log.Printf("move compartment with workrequest id: %s", *resp.OpcWorkRequestId)
	fmt.Println("move compartment request is accepted")

	// get cpSource new parent
	cpSourceNewParent := getCompartment(ctx, c, cpSource).CompartmentId
	cpSourceNewParentName := getCompartment(ctx, c, cpSourceNewParent).Name

	log.Printf("cp_source_GOSDK new parent is: %v", *cpSourceNewParentName)
	fmt.Println("move compartment completed")

}

func createCompartment(ctx context.Context, client identity.IdentityClient, tenantId *string, compartmentName *string) *string {
	detail := identity.CreateCompartmentDetails{
		CompartmentId: tenantId,
		Name:          compartmentName,
		Description:   compartmentName,
	}
	request := identity.CreateCompartmentRequest{
		CreateCompartmentDetails: detail,
		RequestMetadata:          helpers.GetRequestMetadataWithDefaultRetryPolicy(),
	}

	resp, err := client.CreateCompartment(ctx, request)
	helpers.FatalIfError(err)

	return resp.Id
}

func getCompartment(ctx context.Context, client identity.IdentityClient, compartmentId *string) identity.Compartment {
	request := identity.GetCompartmentRequest{
		CompartmentId:   compartmentId,
		RequestMetadata: helpers.GetRequestMetadataWithDefaultRetryPolicy(),
	}

	resp, err := client.GetCompartment(ctx, request)
	helpers.FatalIfError(err)

	return resp.Compartment
}
Output:

move compartment request is accepted
move compartment completed
Example (MultipartDownload)

Example_multipartDownload shows how to use get object API to perform multi-part download operation

// Example code for Object Storage multipart download

package main

import (
	"context"
	"fmt"
	"io/ioutil"
	"math"
	"strconv"
	"sync"

	"github.com/oracle/oci-go-sdk/v65/common"
	"github.com/oracle/oci-go-sdk/v65/example/helpers"
	"github.com/oracle/oci-go-sdk/v65/objectstorage"
)

// Example_multipartDownload shows how to use get object API to perform multi-part download operation
func main() {
	c, err := objectstorage.NewObjectStorageClientWithConfigurationProvider(common.DefaultConfigProvider())
	helpers.FatalIfError(err)

	ctx := context.Background()
	// Change the bname and objectName to the file name and bucket you want you download from
	bname := "test_bucket_name"
	objectName := "test_download_file"
	// This value controls the size per part
	partSize := 500
	namespace := getNamespace(ctx, c)
	downloadThread := 5

	// Get the object size info from object storage
	listResponse, err := c.ListObjects(ctx, objectstorage.ListObjectsRequest{
		NamespaceName: common.String(namespace),
		BucketName:    common.String(bname),
		Prefix:        common.String(objectName),
		Fields:        common.String("name,size"),
	})
	helpers.FatalIfError(err)
	// The result will return a list of objects with the required name, we just select the first in this example
	size := int(*listResponse.Objects[0].Size)
	totalParts := size / partSize
	if size%partSize != 0 {
		totalParts++
	}

	done := make(chan struct{})
	prepareDownloadParts := splitToParts(done, totalParts, partSize, size, namespace, bname, objectName)

	downloadedParts := multipartDownload(ctx, c, downloadThread, done, prepareDownloadParts)

	// In this example, we're storing the download content in memory, please be aware of any issue with oom
	result := make([]byte, size)

	for part := range downloadedParts {
		if part.err != nil {
			// User should properly handle failure here, can be either raise an fatal error or retry to download the error part
			// For this example, we simply ignore the error handling here
			continue
		}
		for i := int64(0); i < part.size; i++ {
			result[i+part.offset] = part.partBody[i]
		}
	}
	fmt.Println(result)
}

// downloadPart contains the data downloaded from object storage and the body part info
type downloadPart struct {
	size     int64
	partBody []byte
	offset   int64
	partNum  int
	err      error
}

// prepareDownloadPart wraps an GetObjectRequest with splitted part related info
type prepareDownloadPart struct {
	request objectstorage.GetObjectRequest
	offset  int64
	partNum int
	size    int64
}

// splitToParts splits the file to the partSize and build a new struct to prepare for multipart download, this function will return a prepareDownloadPart channel
func splitToParts(done <-chan struct{}, totalParts int, partSize int, fileSize int, namespace string, bname string, objectName string) chan prepareDownloadPart {
	prepareDownloadParts := make(chan prepareDownloadPart)
	go func() {
		defer func() {
			fmt.Println("Split to parts completed, closing channel")
			close(prepareDownloadParts)
		}()

		for part := 0; part < totalParts; part++ {
			start := int64(part * partSize)
			end := int64(math.Min(float64((part+1)*partSize), float64(fileSize)) - 1)
			bytesRange := strconv.FormatInt(start, 10) + "-" + strconv.FormatInt(end, 10)
			part := prepareDownloadPart{
				request: objectstorage.GetObjectRequest{
					NamespaceName: common.String(namespace),
					BucketName:    common.String(bname),
					ObjectName:    common.String(objectName),
					// This is the parameter where you control the download size/request
					Range: common.String("bytes=" + bytesRange),
				},
				offset:  start,
				partNum: part,
				size:    end - start,
			}

			select {
			case prepareDownloadParts <- part:
			case <-done:
				return
			}
		}
	}()
	return prepareDownloadParts
}

// multipartDownload will consume prepareDownloadPart from channel and from different gorountine, it will perform multipart download and save the download result to another channel
func multipartDownload(ctx context.Context, c objectstorage.ObjectStorageClient, downloadThreads int, done <-chan struct{}, prepareDownloadParts chan prepareDownloadPart) chan downloadPart {
	result := make(chan downloadPart)
	var wg sync.WaitGroup
	wg.Add(downloadThreads)

	for i := 0; i < downloadThreads; i++ {
		go func() {
			downloadFilePart(ctx, c, done, prepareDownloadParts, result)
			wg.Done()
		}()
	}

	go func() {
		wg.Wait()
		close(result)
	}()

	return result
}

// downloadFilePart wraps objectStorage GetObject API call
func downloadFilePart(ctx context.Context, c objectstorage.ObjectStorageClient, done <-chan struct{}, prepareDownloadParts chan prepareDownloadPart, result chan downloadPart) {
	for part := range prepareDownloadParts {
		resp, err := c.GetObject(ctx, part.request)
		downloadedPart := downloadPart{}
		if err != nil {
			fmt.Println("Error in downloading: ", err)
			downloadedPart.err = err
		} else {
			content, _ := ioutil.ReadAll(resp.Content)
			downloadedPart = downloadPart{
				size:     int64(len(content)),
				partBody: content,
				offset:   part.offset,
				partNum:  part.partNum,
			}
		}
		select {
		case result <- downloadedPart:
		case <-done:
			fmt.Println("downloadParts received Done")
			return
		}
	}
}
Example (NodePoolCRUD)

Example for NodePool

ctx := context.Background()
c, clerr := containerengine.NewContainerEngineClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

compute, err := core.NewComputeClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)

identityClient, err := identity.NewIdentityClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)
req := identity.ListAvailabilityDomainsRequest{}
req.CompartmentId = helpers.CompartmentID()
ads, err := identityClient.ListAvailabilityDomains(ctx, req)
helpers.FatalIfError(err)

// create network resources for cluster
vcnID, subnet1ID, subnet2ID := createVCNWithSubnets(ctx)

// create cluster
kubeVersion := getDefaultKubernetesVersion(c)
createClusterResp := createCluster(ctx, c, vcnID, kubeVersion, subnet1ID)

// wait until work request complete
workReqResp := waitUntilWorkRequestComplete(c, createClusterResp.OpcWorkRequestId)
fmt.Println("cluster created")
clusterID := getResourceID(workReqResp.Resources, containerengine.WorkRequestResourceActionTypeCreated, "CLUSTER")

// get Image Id
image := getImageID(ctx, compute)

// create NodePool
createNodePoolReq := containerengine.CreateNodePoolRequest{}
createNodePoolReq.CompartmentId = helpers.CompartmentID()
createNodePoolReq.Name = common.String("GOSDK_SAMPLE_NP")
createNodePoolReq.ClusterId = clusterID
createNodePoolReq.KubernetesVersion = common.String(kubeVersion)
createNodePoolReq.NodeSourceDetails = containerengine.NodeSourceViaImageDetails{ImageId: image.Id}
createNodePoolReq.NodeShape = common.String("VM.Standard1.1")
createNodePoolReq.InitialNodeLabels = []containerengine.KeyValue{{Key: common.String("foo"), Value: common.String("bar")}}
createNodePoolReq.NodeConfigDetails = &containerengine.CreateNodePoolNodeConfigDetails{
	PlacementConfigs: make([]containerengine.NodePoolPlacementConfigDetails, 0, len(ads.Items)),
	Size:             common.Int(len(ads.Items)),
}

for i := 0; i < len(ads.Items); i++ {
	createNodePoolReq.NodeConfigDetails.PlacementConfigs = append(createNodePoolReq.NodeConfigDetails.PlacementConfigs,
		containerengine.NodePoolPlacementConfigDetails{
			AvailabilityDomain: ads.Items[i].Name,
			SubnetId:           &subnet2ID,
		})
}

createNodePoolResp, err := c.CreateNodePool(ctx, createNodePoolReq)
helpers.FatalIfError(err)
fmt.Println("creating nodepool")

workReqResp = waitUntilWorkRequestComplete(c, createNodePoolResp.OpcWorkRequestId)
fmt.Println("nodepool created")

nodePoolID := getResourceID(workReqResp.Resources, containerengine.WorkRequestResourceActionTypeCreated, "NODEPOOL")

defer func() {
	deleteNodePool(ctx, c, nodePoolID)
	deleteCluster(ctx, c, clusterID)
}()

// update NodePool
updateNodePoolReq := containerengine.UpdateNodePoolRequest{
	NodePoolId: nodePoolID,
}

updateNodePoolReq.Name = common.String("GOSDK_SAMPLE_NP_NEW")
updateNodePoolResp, err := c.UpdateNodePool(ctx, updateNodePoolReq)
helpers.FatalIfError(err)
fmt.Println("updating nodepool")

workReqResp = waitUntilWorkRequestComplete(c, updateNodePoolResp.OpcWorkRequestId)
fmt.Println("nodepool updated")
Output:

create VCN complete
create subnet1 complete
create subnet2 complete
creating cluster
cluster created
creating nodepool
nodepool created
updating nodepool
nodepool updated
deleting nodepool
deleting cluster
Example (ObjectStorage_GetNamespace)

Example for getting Object Storage namespace of a tenancy that is not their own. This is useful in cross-tenant Object Storage operations. Object Storage namespace can be retrieved using the compartment id of the target tenancy if the user has necessary permissions to access that tenancy.

For example if Tenant A wants to access Tenant B's object storage namespace then Tenant A has to define a policy similar to following:

DEFINE TENANCY TenantB AS <TenantB OCID> ENDORSE GROUP <TenantA user group name> TO {OBJECTSTORAGE_NAMESPACE_READ} IN TENANCY TenantB

and Tenant B should add a policy similar to following:

DEFINE TENANCY TenantA AS <TenantA OCID> DEFINE GROUP TenantAGroup AS <TenantA user group OCID> ADMIT GROUP TenantAGroup OF TENANCY TenantA TO {OBJECTSTORAGE_NAMESPACE_READ} IN TENANCY

This example covers only GetNamespace operation across tenants. Additional permissions will be required to perform more Object Storage operations.

Example_objectStorage_GetNamespace shows how to get namespace providing compartmentId.

c, clerr := objectstorage.NewObjectStorageClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

ctx := context.Background()

request := objectstorage.GetNamespaceRequest{}
request.CompartmentId = helpers.CompartmentID()

r, err := c.GetNamespace(ctx, request)
helpers.FatalIfError(err)

log.Printf("Namespace for compartment %s is: %s", *request.CompartmentId, *r.Value)

fmt.Println("Namespace retrieved")
Output:

Namespace retrieved
Example (ObjectStorage_GetObjectUsingRealmSpecificEndpoint)
// This example shows how to use realm specific endpoint to get object.
// You can select either this environment variable or the customClientConfiguration to enable realm specific endpoint.
os.Setenv("OCI_REALM_SPECIFIC_SERVICE_ENDPOINT_TEMPLATE_ENABLED", "true")

c, clerr := objectstorage.NewObjectStorageClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

// An alternative way to enable realm specific endpoint is to use the following code.
c.SetCustomClientConfiguration(common.CustomClientConfiguration{
	RealmSpecificServiceEndpointTemplateEnabled: common.Bool(true),
})
ctx := context.Background()
bname := helpers.GetRandomString(8)
namespace := getNamespace(ctx, c)
getRequest := objectstorage.GetObjectRequest{
	NamespaceName: common.String(namespace),
	BucketName:    common.String(bname),
	ObjectName:    common.String("Example_objectStorage_GetObjectUsingRealmSpecificEndpoint"),
}

response, err := c.GetObject(context.Background(), getRequest)
if err != nil {
	fmt.Println("404")
	return
}
fmt.Println(response)
Output:

404
Example (ObjectStorage_UploadFile)

Example_objectStorage_UploadFile shows how to create a bucket and upload a file

c, clerr := objectstorage.NewObjectStorageClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

ctx := context.Background()
bname := helpers.GetRandomString(8)
namespace := getNamespace(ctx, c)

createBucket(ctx, c, namespace, bname)
defer deleteBucket(ctx, c, namespace, bname)

contentlen := 1024 * 1000
fpath, filesize := helpers.WriteTempFileOfSize(int64(contentlen))
filename := filepath.Base(fpath)
defer func() {
	os.Remove(filename)
}()

file, e := os.Open(fpath)

if e != nil {
	file.Close()
	helpers.FatalIfError(e)
} else {
	defer file.Close()
}

e = putObject(ctx, c, namespace, bname, filename, filesize, file, nil)
helpers.FatalIfError(e)
defer deleteObject(ctx, c, namespace, bname, filename)
Output:

get namespace
create bucket
put object
delete object
delete bucket
Example (ObjectStorage_UploadManager_Stream)
c, clerr := objectstorage.NewObjectStorageClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

ctx := context.Background()
bname := "bname"
namespace := getNamespace(ctx, c)

createBucket(ctx, c, namespace, bname)
defer deleteBucket(ctx, c, namespace, bname)

contentlen := 1024 * 1000 * 130 // 130MB
fpath, _ := helpers.WriteTempFileOfSize(int64(contentlen))
filename := filepath.Base(fpath)
defer func() {
	os.Remove(filename)
}()

uploadManager := transfer.NewUploadManager()
objectName := "sampleStreamUploadObj"

file, _ := os.Open(fpath)
defer file.Close()

req := transfer.UploadStreamRequest{
	UploadRequest: transfer.UploadRequest{
		NamespaceName:                       common.String(namespace),
		BucketName:                          common.String(bname),
		ObjectName:                          common.String(objectName),
		EnableMultipartChecksumVerification: common.Bool(true),
	},
	StreamReader: file, // any struct implements the io.Reader interface
}

// if you want to overwrite default value, you can do it
// as: transfer.UploadRequest.AllowMultipartUploads = common.Bool(false) // default is true
// or: transfer.UploadRequest.AllowParallelUploads = common.Bool(false) // default is true
_, err := uploadManager.UploadStream(context.Background(), req)

if err != nil {
	fmt.Println(err)
}

defer deleteObject(ctx, c, namespace, bname, objectName)
fmt.Println("stream uploaded")
Output:

get namespace
create bucket
stream uploaded
delete object
delete bucket
Example (ObjectStorage_UploadManager_UploadFile)
c, clerr := objectstorage.NewObjectStorageClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)
// Disable timeout to support big file upload(Once need to specify the os client for Upload Manager)
c.HTTPClient = &http.Client{}

ctx := context.Background()
bname := "bname"
namespace := getNamespace(ctx, c)

createBucket(ctx, c, namespace, bname)
defer deleteBucket(ctx, c, namespace, bname)

contentlen := 1024 * 1024 * 300 // 300MB
fpath, _ := helpers.WriteTempFileOfSize(int64(contentlen))
filename := filepath.Base(fpath)
defer os.Remove(filename)

uploadManager := transfer.NewUploadManager()
objectName := "sampleFileUploadObj"

req := transfer.UploadFileRequest{
	UploadRequest: transfer.UploadRequest{
		NamespaceName:                       common.String(namespace),
		BucketName:                          common.String(bname),
		ObjectName:                          common.String(objectName),
		PartSize:                            common.Int64(128 * 1024 * 1024),
		CallBack:                            callBack,
		ObjectStorageClient:                 &c,
		EnableMultipartChecksumVerification: common.Bool(true),
	},
	FilePath: fpath,
}

// if you want to overwrite default value, you can do it
// as: transfer.UploadRequest.AllowMultipartUploads = common.Bool(false) // default is true
// or: transfer.UploadRequest.AllowParrallelUploads = common.Bool(false) // default is true
resp, err := uploadManager.UploadFile(ctx, req)

if err != nil && resp.IsResumable() {
	resp, err = uploadManager.ResumeUploadFile(ctx, *resp.MultipartUploadResponse.UploadID)
	if err != nil {
		fmt.Println(resp)
	}
}

defer deleteObject(ctx, c, namespace, bname, objectName)
fmt.Println("file uploaded")
Output:

get namespace
create bucket
One example of progress bar could be the above comment content.
One example of progress bar could be the above comment content.
One example of progress bar could be the above comment content.
file uploaded
delete object
delete bucket
Example (Quotas)

Example_quotas runs an example demonstrating the use of OCI Golang SDK for managing Quotas

// Example code for demonstrating how Quotas can be managed using the OCI Golang SDK
// This example will perform the following operations sequentially-
// - Create a Quota
// - Get the created Quota
// - List all Quotas
// - Update the previously created Quota
// - Delete this Quota

// Description of common parameters
// compartmentId	: The OCID of the compartment where Quotas will reside (this has to be the root compartment)
// name			    : Name of the Quota
// description		: Description for the Quota
// statements		: An array of Quota statements written in the declarative language

package main

// Import necessary packages
import (
	"context" // To supply to the Quotas client while making requests
	"fmt"     // To print to the console

	"github.com/oracle/oci-go-sdk/v65/common" // For common OCI types
	"github.com/oracle/oci-go-sdk/v65/limits" // For types and methods corresponding to Limits
)

// Creates a new Quota with the details given in createQuotaDetails
func createQuota(client limits.QuotasClient, createQuotaDetails limits.CreateQuotaDetails) limits.CreateQuotaResponse {
	var response limits.CreateQuotaResponse
	response, err := client.CreateQuota(context.Background(), limits.CreateQuotaRequest{CreateQuotaDetails: createQuotaDetails})
	if err != nil {
		panic(err)
	}
	return response
}

// Gets the Quota corresponding to given quotaId
func getQuota(client limits.QuotasClient, quotaId string) limits.GetQuotaResponse {
	var response limits.GetQuotaResponse
	quotaIdStr := common.String(quotaId)
	response, err := client.GetQuota(context.Background(), limits.GetQuotaRequest{QuotaId: quotaIdStr})
	if err != nil {
		panic(err)
	}
	return response
}

// Lists Quotas under the Compartment corresponding to given compartmentId
func listQuotas(client limits.QuotasClient, compartmentId string) limits.ListQuotasResponse {
	var response limits.ListQuotasResponse
	compartmentIdStr := common.String(compartmentId)
	response, err := client.ListQuotas(context.Background(), limits.ListQuotasRequest{CompartmentId: compartmentIdStr})
	if err != nil {
		panic(err)
	}
	return response
}

// Updates the Quota corresponding to given quotaId with values given in updateQuotaDetails
func updateQuota(client limits.QuotasClient, quotaId string, updateQuotaDetails limits.UpdateQuotaDetails) limits.UpdateQuotaResponse {
	var response limits.UpdateQuotaResponse
	quotaIdStr := common.String(quotaId)
	response, err := client.UpdateQuota(context.Background(), limits.UpdateQuotaRequest{QuotaId: quotaIdStr, UpdateQuotaDetails: updateQuotaDetails})
	if err != nil {
		panic(err)
	}
	return response
}

// Deletes the Quota corresponding to given quotaId
func deleteQuota(client limits.QuotasClient, quotaId string) limits.DeleteQuotaResponse {
	var response limits.DeleteQuotaResponse
	quotaIdStr := common.String(quotaId)
	response, err := client.DeleteQuota(context.Background(), limits.DeleteQuotaRequest{QuotaId: quotaIdStr})
	if err != nil {
		panic(err)
	}
	return response
}

// Example_quotas runs an example demonstrating the use of OCI Golang SDK for managing Quotas
func main() {

	// Initialize default config provider
	configProvider := common.DefaultConfigProvider()
	if ok, err := common.IsConfigurationProviderValid(configProvider); !ok {
		panic(err)
	}

	// Initialize sample inputs
	compartmentId, _ := configProvider.TenancyOCID()
	name := "MyQuota"
	description := "This is a sample Quota"
	newDescription := "This is an updated Quota"
	statements := []string{"Zero test-family quota 'test-quota-1' in tenancy"}

	// Initialize Quotas client
	client, err := limits.NewQuotasClientWithConfigurationProvider(configProvider)
	if err != nil {
		panic(err)
	}

	// Create Quota
	fmt.Println("Creating Quota")
	createQuotaDetails := limits.CreateQuotaDetails{CompartmentId: &compartmentId, Name: &name, Description: &description, Statements: statements}
	createResponse := createQuota(client, createQuotaDetails)

	// Get Quota
	fmt.Println("Getting Quota")
	getQuota(client, *createResponse.Quota.Id)

	// List Quotas
	fmt.Println("Listing Quotas")
	listResponse := listQuotas(client, compartmentId)

	// Update Quota
	fmt.Println("Updating Quota")
	quotaId := *listResponse.Items[0].Id
	updateQuotaDetails := limits.UpdateQuotaDetails{Description: &newDescription}
	updateQuota(client, quotaId, updateQuotaDetails)

	// Delete Quota
	fmt.Println("Deleting Quota")
	deleteQuota(client, quotaId)

	fmt.Println("Example_quotas completed")

}
Output:

Creating Quota
Getting Quota
Listing Quotas
Updating Quota
Deleting Quota
Example_quotas completed
Example (ResourceManager)

Example_resourceManager for how to do CRUD for Resource Manager Stack The comparement id is read from the environment variable OCI_COMPARTMENT_ID

// Example code for resourcemanager API

package main

import (
	"context"
	"fmt"

	"github.com/oracle/oci-go-sdk/v65/common"
	"github.com/oracle/oci-go-sdk/v65/example/helpers"
	"github.com/oracle/oci-go-sdk/v65/resourcemanager"
)

// Example_resourceManager for how to do CRUD for Resource Manager Stack
// The comparement id is read from the environment variable OCI_COMPARTMENT_ID
func main() {
	provider := common.DefaultConfigProvider()
	client, err := resourcemanager.NewResourceManagerClientWithConfigurationProvider(provider)
	helpers.FatalIfError(err)

	ctx := context.Background()

	stackID := createStack(ctx, provider, client)
	defer deleteStack(ctx, stackID, client)
	listStacks(ctx, client)
	updateStack(ctx, stackID, client)
	getStack(ctx, stackID, client)

}

func createStack(ctx context.Context, provider common.ConfigurationProvider, client resourcemanager.ResourceManagerClient) string {
	stackName := fmt.Sprintf("test-%s", helpers.GetRandomString(8))
	region, _ := provider.Region()
	tenancyOcid, _ := provider.TenancyOCID()

	// create resource manager stack with type ZIP_UPLOAD by passing a base64 encoded Terraform zip string
	// user has multiple ways to create stack, details check https://docs.oracle.com/iaas/iaas/api/#/en/resourcemanager/20180917/datatypes/CreateConfigSourceDetails
	req := resourcemanager.CreateStackRequest{
		CreateStackDetails: resourcemanager.CreateStackDetails{
			CompartmentId: helpers.CompartmentID(),
			ConfigSource: resourcemanager.CreateZipUploadConfigSourceDetails{
				WorkingDirectory:     common.String("vcn"),
				ZipFileBase64Encoded: common.String("[pls use your base64 encoded TF template]"),
			},
			DisplayName: common.String(stackName),
			Description: common.String(fmt.Sprintf("%s-description", stackName)),
			Variables: map[string]string{
				"compartment_ocid": *helpers.CompartmentID(),
				"region":           region,
				"tenancy_ocid":     tenancyOcid,
			},
		},
	}

	stackResp, err := client.CreateStack(ctx, req)
	helpers.FatalIfError(err)

	fmt.Println("create stack completed")
	return *stackResp.Stack.Id
}

func updateStack(ctx context.Context, stackID string, client resourcemanager.ResourceManagerClient) {
	stackName := fmt.Sprintf("test-v1-%s", helpers.GetRandomString(8))

	// update displayName and description of resource manager stack
	req := resourcemanager.UpdateStackRequest{
		StackId: common.String(stackID),
		UpdateStackDetails: resourcemanager.UpdateStackDetails{
			DisplayName: common.String(stackName),
			Description: common.String(fmt.Sprintf("%s-description", stackName)),
		},
	}

	_, err := client.UpdateStack(ctx, req)
	helpers.FatalIfError(err)

	fmt.Println("update stack completed")
}

func listStacks(ctx context.Context, client resourcemanager.ResourceManagerClient) {
	req := resourcemanager.ListStacksRequest{
		CompartmentId: helpers.CompartmentID(),
	}

	// list resource manager stack
	_, err := client.ListStacks(ctx, req)
	helpers.FatalIfError(err)

	fmt.Println("list stacks completed")
}

func getStack(ctx context.Context, stackID string, client resourcemanager.ResourceManagerClient) {
	req := resourcemanager.GetStackRequest{
		StackId: common.String(stackID),
	}

	// get details a particular resource manager stack
	_, err := client.GetStack(ctx, req)
	helpers.FatalIfError(err)

	fmt.Println("get stack completed")
}

func deleteStack(ctx context.Context, stackID string, client resourcemanager.ResourceManagerClient) {
	req := resourcemanager.DeleteStackRequest{
		StackId: common.String(stackID),
	}

	// delete a resource manager stack
	_, err := client.DeleteStack(ctx, req)
	helpers.FatalIfError(err)

	fmt.Println("delete stack completed")
}
Output:

create stack completed
list stacks completed
update stack completed
get stack completed
delete stack completed
Example (ResourceSearch)
client, err := resourcesearch.NewResourceSearchClientWithConfigurationProvider(common.DefaultConfigProvider())
ctx := context.Background()
helpers.FatalIfError(err)

// list resource types
listReq := resourcesearch.ListResourceTypesRequest{}
listResp, err := client.ListResourceTypes(ctx, listReq)
fmt.Println("list resource types")

for _, element := range listResp.Items {
	log.Printf("Resource: %s", *element.Name)
}

// get group type details
getReq := resourcesearch.GetResourceTypeRequest{
	Name: common.String("Group"),
}
getResp, err := client.GetResourceType(context.Background(), getReq)
helpers.FatalIfError(err)
fmt.Println("get group type details")
log.Printf("Resource type: %s", getResp.ResourceType)

// search resource by freetext
searchReq := resourcesearch.SearchResourcesRequest{
	SearchDetails: resourcesearch.FreeTextSearchDetails{
		Text: common.String("displayname"),
	},
}

freeSearchResp, err := client.SearchResources(context.Background(), searchReq)
helpers.FatalIfError(err)
fmt.Println("search resource by freetext")

for _, element := range freeSearchResp.Items {
	log.Printf("Resource: %s", element)
}

searchReq.SearchDetails = resourcesearch.StructuredSearchDetails{
	MatchingContextType: resourcesearch.SearchDetailsMatchingContextTypeHighlights,
	Query:               common.String("query all resources"),
}

structureSearchResp, err := client.SearchResources(context.Background(), searchReq)
helpers.FatalIfError(err)

// search resource by structured query
fmt.Println("search resource by structured query")

for _, element := range structureSearchResp.Items {
	log.Printf("Resource: %s", element)
}
Output:

list resource types
get group type details
search resource by freetext
search resource by structured query
Example (Tagging)

Example_tagging shows the sample for tag and tagNamespace operations: create, update, get, list etc...

c, err := identity.NewIdentityClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(err)

ctx := context.Background()
tagNamespaceID := createTagNamespace(ctx, c, common.String("GOSDKSampleTagNamespaceName"))
fmt.Println("tag namespace created")

tagName := common.String("GOSDKSampleTagName")
createTag(ctx, c, tagNamespaceID, tagName)
fmt.Println("tag created")

// get tag
getTagReq := identity.GetTagRequest{
	TagNamespaceId: tagNamespaceID,
	TagName:        tagName,
}
_, err = c.GetTag(ctx, getTagReq)
helpers.FatalIfError(err)
fmt.Println("get tag")

// list tags, list operations are paginated and take a "page" parameter
// to allow you to get the next batch of items from the server
// for pagination sample, please refer to 'example_core_pagination_test.go'
listTagReq := identity.ListTagsRequest{
	TagNamespaceId: tagNamespaceID,
}
_, err = c.ListTags(ctx, listTagReq)
helpers.FatalIfError(err)
fmt.Println("list tag")

// get tag namespace
getTagNamespaceReq := identity.GetTagNamespaceRequest{
	TagNamespaceId: tagNamespaceID,
}
_, err = c.GetTagNamespace(ctx, getTagNamespaceReq)
helpers.FatalIfError(err)
fmt.Println("get tag namespace")

// list tag namespaces
listTagNamespaceReq := identity.ListTagNamespacesRequest{
	CompartmentId: helpers.CompartmentID(),
}
_, err = c.ListTagNamespaces(ctx, listTagNamespaceReq)
helpers.FatalIfError(err)
fmt.Println("list tag namespace")

// retire a tag namespace by using the update tag namespace operation
updateTagNamespaceReq := identity.UpdateTagNamespaceRequest{
	TagNamespaceId: tagNamespaceID,
	UpdateTagNamespaceDetails: identity.UpdateTagNamespaceDetails{
		IsRetired: common.Bool(true),
	},
}

_, err = c.UpdateTagNamespace(ctx, updateTagNamespaceReq)
helpers.FatalIfError(err)
fmt.Println("tag namespace retired")

// retire a tag by using the update tag operation
updateTagReq := identity.UpdateTagRequest{
	TagNamespaceId: tagNamespaceID,
	TagName:        tagName,
	UpdateTagDetails: identity.UpdateTagDetails{
		IsRetired: common.Bool(true),
	},
}
_, err = c.UpdateTag(ctx, updateTagReq)
helpers.FatalIfError(err)
fmt.Println("tag retired")

// reactivate a tag namespace
updateTagNamespaceReq = identity.UpdateTagNamespaceRequest{
	TagNamespaceId: tagNamespaceID,
	UpdateTagNamespaceDetails: identity.UpdateTagNamespaceDetails{
		// reactivate a tag namespace by using the update tag namespace operation
		IsRetired: common.Bool(false),
	},
}

_, err = c.UpdateTagNamespace(ctx, updateTagNamespaceReq)
helpers.FatalIfError(err)
fmt.Println("tag namespace reactivated")
Output:

tag namespace created
tag created
get tag
list tag
get tag namespace
list tag namespace
tag namespace retired
tag retired
tag namespace reactivated
Example (UnlimitedAttemptsRetry)

Example_unlimitedAttemptsRetry shows how to use retry with unlimited retries, only limited by time, for Create and Delete groups, please refer to example_core_test.go->Example_launchInstance for more examples

// create and delete group with retry
client, clerr := identity.NewIdentityClientWithConfigurationProvider(common.DefaultConfigProvider())
ctx := context.Background()
helpers.FatalIfError(clerr)

request := identity.CreateGroupRequest{}
request.CompartmentId = helpers.RootCompartmentID()
request.Name = common.String("GoSDK_Sample_Group")
request.Description = common.String("GoSDK Sample Group Description")

maximumCumulativeBackoff := time.Duration(2) * time.Minute

// retry unlimited number of times, up to two minutes
customRetryPolicy := common.NewRetryPolicyWithOptions(
	common.WithUnlimitedAttempts(maximumCumulativeBackoff),
	common.WithShouldRetryOperation(func(r common.OCIOperationResponse) bool {
		durationSinceInitialAttempt := time.Since(r.InitialAttemptTime)
		tooLong := durationSinceInitialAttempt > maximumCumulativeBackoff
		return common.DefaultShouldRetryOperation(r) && !tooLong
	}),
	common.WithNextDuration(func(r common.OCIOperationResponse) time.Duration {
		return time.Duration(math.Pow(float64(2), float64(r.AttemptNumber-1))) * time.Second
	}),
)

// create request metadata for retry
request.RequestMetadata = common.RequestMetadata{
	RetryPolicy: &customRetryPolicy,
}

resp, err := client.CreateGroup(ctx, request)
helpers.FatalIfError(err)
fmt.Println("Creating Group")

// Get with polling
shouldRetry := func(r common.OCIOperationResponse) bool {
	if _, isServiceError := common.IsServiceError(r.Error); isServiceError {
		// not service error, could be network error or other errors which prevents
		// request send to server, will do retry here
		return true
	}

	if converted, ok := r.Response.(identity.GetGroupResponse); ok {
		// do the retry until lifecycle state become active
		return converted.LifecycleState != identity.GroupLifecycleStateActive
	}

	return true
}

// retry unlimited number of times, up to two minutes, until lifecycle state is active
lifecycleStateCheckRetryPolicy := common.NewRetryPolicyWithOptions(
	// since this retries on ANY error response, we don't need special handling for eventual consistency
	common.ReplaceWithValuesFromRetryPolicy(common.DefaultRetryPolicyWithoutEventualConsistency()),
	common.WithUnlimitedAttempts(maximumCumulativeBackoff),
	common.WithShouldRetryOperation(func(r common.OCIOperationResponse) bool {
		durationSinceInitialAttempt := time.Since(r.InitialAttemptTime)
		tooLong := durationSinceInitialAttempt > maximumCumulativeBackoff
		return shouldRetry(r) && !tooLong
	}),
	common.WithNextDuration(func(r common.OCIOperationResponse) time.Duration {
		return time.Duration(math.Pow(float64(2), float64(r.AttemptNumber-1))) * time.Second
	}),
)

getRequest := identity.GetGroupRequest{
	GroupId: resp.Id,
	RequestMetadata: common.RequestMetadata{
		RetryPolicy: &lifecycleStateCheckRetryPolicy,
	},
}

_, errAfterPolling := client.GetGroup(ctx, getRequest)
helpers.FatalIfError(errAfterPolling)
fmt.Println("Group Created")

defer func() {
	// if we've successfully created a group, make sure that we delete it
	rDel := identity.DeleteGroupRequest{
		GroupId: resp.Id,
		RequestMetadata: common.RequestMetadata{
			RetryPolicy: &customRetryPolicy,
		},
	}

	_, err = client.DeleteGroup(ctx, rDel)
	helpers.FatalIfError(err)
	fmt.Println("Group Deleted")
}()
Output:

Creating Group
Group Created
Group Deleted
Example (UpdateAdb)
c, clerr := database.NewDatabaseClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

updateDbDetails := database.UpdateAutonomousDatabaseDetails{
	CpuCoreCount:         common.Int(2),
	DataStorageSizeInTBs: common.Int(2),
	IsAutoScalingEnabled: common.Bool(false),
}

updateReq := database.UpdateAutonomousDatabaseRequest{
	AutonomousDatabaseId:            common.String("replacewithvalidocid"),
	UpdateAutonomousDatabaseDetails: updateDbDetails,
}
_, err := c.UpdateAutonomousDatabase(context.Background(), updateReq)
helpers.FatalIfError(err)

fmt.Println("update adb successful")
Output:

update adb successful
Example (UpdateAdbAcl)
c, clerr := database.NewDatabaseClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

updateDbDetails := database.UpdateAutonomousDatabaseDetails{
	WhitelistedIps: []string{"1.1.1.1/28", "3.3.3.3"},
}

updateReq := database.UpdateAutonomousDatabaseRequest{
	AutonomousDatabaseId:            common.String("replacewithvalidocid"),
	UpdateAutonomousDatabaseDetails: updateDbDetails,
}
_, err := c.UpdateAutonomousDatabase(context.Background(), updateReq)
helpers.FatalIfError(err)

fmt.Println("update adb acl successful")
Output:

update adb acl successful
Example (UpdateAdbLisenceType)
c, clerr := database.NewDatabaseClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

updateDbDetails := database.UpdateAutonomousDatabaseDetails{
	LicenseModel: database.UpdateAutonomousDatabaseDetailsLicenseModelLicenseIncluded,
}

updateReq := database.UpdateAutonomousDatabaseRequest{
	AutonomousDatabaseId:            common.String("replacewithvalidocid"),
	UpdateAutonomousDatabaseDetails: updateDbDetails,
}
_, err := c.UpdateAutonomousDatabase(context.Background(), updateReq)
helpers.FatalIfError(err)

fmt.Println("update adb license type successful")
Output:

update adb license type successful
Example (UpdateBackupDestination)
c, clerr := database.NewDatabaseClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

updateBackupDestinationDetails := database.UpdateBackupDestinationDetails{
	LocalMountPointPath: &localMountPath,
}

updatebackupdestinationReq := database.UpdateBackupDestinationRequest{
	UpdateBackupDestinationDetails: updateBackupDestinationDetails,
	BackupDestinationId:            common.String("backup-destination-ocid"),
}

_, err := c.UpdateBackupDestination(context.Background(), updatebackupdestinationReq)
helpers.FatalIfError(err)

fmt.Println("update backup destination is successful")
Output:

update backup destination is successful
Example (UpdateDbBackupBackupDestination)
c, clerr := database.NewDatabaseClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

backupDestination := database.BackupDestinationDetails{
	Type: database.BackupDestinationDetailsTypeEnum("NFS"),
	Id:   common.String("backup-destination-ocid"),
}

dbBackupConfig := database.DbBackupConfig{
	BackupDestinationDetails: []database.BackupDestinationDetails{backupDestination},
}

updatedatabaseDetails := database.UpdateDatabaseDetails{
	DbBackupConfig: &dbBackupConfig,
}

updateDatabaseReq := database.UpdateDatabaseRequest{
	UpdateDatabaseDetails: updatedatabaseDetails,
	DatabaseId:            common.String("database-ocid"),
}

_, err := c.UpdateDatabase(context.Background(), updateDatabaseReq)
helpers.FatalIfError(err)

fmt.Println("update backup destination is successful")
Output:

update backup destination is successful
Example (VaultOperations)

ExampleKeyManagement_VaultOperations shows how to create, schedule deletion and cancel a scheduled deletion of a KMS vault

vaultClient, clientError := keymanagement.NewKmsVaultClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clientError)

ctx := context.Background()
vaultName := "KmsVault"
updatedVaultName := "UpdatedKmsVault"

vault := createVault(ctx, vaultClient, vaultName)
defer cleanupResources(ctx, vaultClient, vault.Id)
// wait for instance lifecycle state becomes active
waitForStateVaultClient(ctx, vault.Id, vaultClient, keymanagement.VaultLifecycleStateActive)

updatedVault := updateVault(ctx, vaultClient, &updatedVaultName, vault.Id)
fmt.Printf("Updated vault display name %s\n", *updatedVault.DisplayName)

svdErr := scheduleVaultDeletion(ctx, vaultClient, vault.Id)
helpers.FatalIfError(svdErr)
waitForStateVaultClient(ctx, vault.Id, vaultClient, keymanagement.VaultLifecycleStatePendingDeletion)

cvdErr := cancelVaultDeletion(ctx, vaultClient, vault.Id)
helpers.FatalIfError(cvdErr)
waitForStateVaultClient(ctx, vault.Id, vaultClient, keymanagement.VaultLifecycleStateActive)

// Move to root compartment
changeVaultCompartment(ctx, vaultClient, helpers.RootCompartmentID(), vault.Id)
waitForStateVaultClient(ctx, vault.Id, vaultClient, keymanagement.VaultLifecycleStateActive)
Output:

create vault
update vault
schedule vault deletion
cancel vault deletion
change vault compartment
schedule vault deletion
Example (WorkRequestQuery)

Example for work request query

ctx := context.Background()
c, clerr := containerengine.NewContainerEngineClientWithConfigurationProvider(common.DefaultConfigProvider())
helpers.FatalIfError(clerr)

workRequestID := common.String("[YOUR WORK REQUEST ID]")
listErrorReq := containerengine.ListWorkRequestErrorsRequest{
	CompartmentId: helpers.CompartmentID(),
	WorkRequestId: workRequestID,
}

_, err := c.ListWorkRequestErrors(ctx, listErrorReq)
helpers.FatalIfError(err)
fmt.Println("list work request errors")

listLogReq := containerengine.ListWorkRequestLogsRequest{
	CompartmentId: helpers.CompartmentID(),
	WorkRequestId: workRequestID,
}

_, err = c.ListWorkRequestLogs(ctx, listLogReq)
helpers.FatalIfError(err)
fmt.Println("list work request logs")
Output:

list work request errors
list work request logs
Example (WorkRequests)
// Example code for workrequests API
// This script provides a basic example of how to use work requests using Go SDK.
// This script will:
//
//   * Read in user OCI config
//   * Retrieve a list of all Work Requests for the compartment
//   * Get Work Request details
//   * List errors related to a Work Request
//   * List logs related to a Work Request
//
// This script takes no arguments
//
// Usage:
// 		go test -v example/example_work_request_test.go
//

package main

import (
	"context"
	"fmt"
	"log"

	"github.com/oracle/oci-go-sdk/v65/common"
	"github.com/oracle/oci-go-sdk/v65/example/helpers"
	"github.com/oracle/oci-go-sdk/v65/workrequests"
)

func main() {
	client, err := workrequests.NewWorkRequestClientWithConfigurationProvider(common.DefaultConfigProvider())
	helpers.FatalIfError(err)

	compartmentID, err := common.DefaultConfigProvider().TenancyOCID()
	if err != nil {
		log.Println("Coulnd't read Tenancy from OCI config", err)
	}

	log.Println("Compartment ID: ", compartmentID)

	ctx := context.Background()

	workRequests := listWorkRequests(ctx, client, compartmentID)

	log.Println(len(workRequests), " Work Requests found.")

	for _, workRequest := range workRequests {
		getPrintSummary(ctx, client, workRequest.Id)

		getPrintErrors(ctx, client, workRequest.Id)

		getPrintLogs(ctx, client, workRequest.Id)
	}

	fmt.Println("Work Request example Completed")
}

func listWorkRequests(ctx context.Context, client workrequests.WorkRequestClient, compartmentID string) []workrequests.WorkRequestSummary {
	request := workrequests.ListWorkRequestsRequest{
		CompartmentId: &compartmentID,
		Limit:         common.Int(5),
	}

	resp, err := client.ListWorkRequests(ctx, request)
	helpers.FatalIfError(err)

	return resp.Items
}

func getPrintSummary(ctx context.Context, client workrequests.WorkRequestClient, workRequestId *string) {
	request := workrequests.GetWorkRequestRequest{
		WorkRequestId: workRequestId,
	}

	resp, err := client.GetWorkRequest(ctx, request)
	helpers.FatalIfError(err)

	printSummary(resp.WorkRequest)
}

func printSummary(w workrequests.WorkRequest) {
	log.Println("")
	log.Println("")
	log.Println("==========================================================")
	log.Printf("Work Request Details: %s\n", *w.Id)
	log.Println("==========================================================")
	log.Println("OperationType: ", *w.OperationType)
	log.Println("Status: ", w.Status)
	log.Println("ID: ", *w.Id)
	log.Println("CompartmentId: ", *w.CompartmentId)
	log.Println("PercentComplete: ", *w.PercentComplete)
	log.Println("TimeAccepted: ", *w.TimeAccepted)
	log.Println("TimeStarted: ", *w.TimeStarted)
	log.Println("TimeFinished: ", *w.TimeFinished)
	log.Println("")
}

func getPrintErrors(ctx context.Context, client workrequests.WorkRequestClient, workRequestId *string) {
	request := workrequests.ListWorkRequestErrorsRequest{
		WorkRequestId: workRequestId,
	}

	resp, err := client.ListWorkRequestErrors(ctx, request)
	helpers.FatalIfError(err)

	log.Println("==========================================================")
	log.Println("Work Request Errors")
	log.Println("==========================================================")

	for _, wrErr := range resp.Items {
		printErrors(wrErr)
	}
	log.Println("")
}

func printErrors(wrErr workrequests.WorkRequestError) {
	log.Println("{")
	log.Println(" Code: ", *wrErr.Code)
	log.Println(" Message: ", *wrErr.Message)
	log.Println(" Timestamp: ", *wrErr.Timestamp)
	log.Println("}")
}

func getPrintLogs(ctx context.Context, client workrequests.WorkRequestClient, workRequestId *string) {
	request := workrequests.ListWorkRequestLogsRequest{
		WorkRequestId: workRequestId,
		Limit:         common.Int(10),
	}

	// example showing how to use the pagination feature.
	// Other work request calls can also be paginated but aren't for simplicity.
	listLogsFunc := func(request workrequests.ListWorkRequestLogsRequest) (workrequests.ListWorkRequestLogsResponse, error) {
		return client.ListWorkRequestLogs(ctx, request)
	}

	log.Println("==========================================================")
	log.Println("Work Request Logs")
	log.Println("==========================================================")

	for resp, err := listLogsFunc(request); ; resp, err = listLogsFunc(request) {
		helpers.FatalIfError(err)

		for _, wrLog := range resp.Items {
			printLogs(wrLog)
		}

		if resp.OpcNextPage != nil {
			// if there are more items in next page, fetch items from next page
			request.Page = resp.OpcNextPage
		} else {
			// no more result, break the loop
			break
		}
	}

	log.Println("")
}

func printLogs(wrLog workrequests.WorkRequestLogEntry) {
	log.Println("{")
	log.Println(" Message: ", *wrLog.Message)
	log.Println(" Timestamp: ", *wrLog.Timestamp)
	log.Println("}")
}
Output:

Work Request example Completed

Index

Examples

Constants

This section is empty.

Variables

This section is empty.

Functions

func ExampleGenerateText

func ExampleGenerateText()

Types

This section is empty.

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL