input

package
v1.0.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Sep 1, 2025 License: Apache-2.0 Imports: 21 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var BaseCephDeployment = cephlcmv1alpha1.CephDeployment{
	ObjectMeta: LcmObjectMeta,
	Spec: cephlcmv1alpha1.CephDeploymentSpec{
		DashboardEnabled: false,
		Network: cephlcmv1alpha1.CephNetworkSpec{
			HostNetwork: true,
			ClusterNet:  "127.0.0.0/16",
			PublicNet:   "192.168.0.0/16",
		},
		Nodes: CephNodesOk,
	},
}
View Source
var BaseCephDeploymentDelete = func() cephlcmv1alpha1.CephDeployment {
	cd := BaseCephDeployment.DeepCopy()
	cd.Finalizers = []string{"cephdeployment.lcm.mirantis.com/finalizer"}
	cd.DeletionTimestamp = &metav1.Time{Time: time.Date(2021, 8, 15, 14, 30, 45, 0, time.Local)}
	return *cd
}()
View Source
var BaseCephDeploymentDeleting = func() cephlcmv1alpha1.CephDeployment {
	cd := BaseCephDeploymentDelete.DeepCopy()
	cd.Status = cephlcmv1alpha1.CephDeploymentStatus{
		Phase:   cephlcmv1alpha1.PhaseDeleting,
		Message: "Ceph cluster deletion is in progress",
	}
	return *cd
}()
View Source
var BaseCephDeploymentMultus = func() cephlcmv1alpha1.CephDeployment {
	cd := BaseCephDeployment.DeepCopy()
	cd.Spec.Network = cephlcmv1alpha1.CephNetworkSpec{
		Provider: "multus",
		Selector: map[cephv1.CephNetworkType]string{
			cephv1.CephNetworkPublic:  "192.168.0.0/16",
			cephv1.CephNetworkCluster: "127.0.0.0/16",
		},
	}
	return *cd
}()
View Source
var BaseRookConfigOverride = corev1.ConfigMap{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "rook-config-override",
		Annotations: map[string]string{
			"cephdeployment.lcm.mirantis.com/config-global-hash": "95b401f9fc7db148cf2cc3bbcbbe09f7722b2060acf714c142fdf07ee249f0bb",
			"cephdeployment.lcm.mirantis.com/config-mon-hash":    "52235ccf3c9f953de0fc2b8e2928f8119e1be19c14a4cf300c55e8498ec81fa2",
		},
	},
	Data: map[string]string{
		"config": `[global]
cluster_network = 127.0.0.0/16
public_network = 192.168.0.0/16
mon_max_pg_per_osd = 300
mon_target_pg_per_osd = 100

[mon]
mon_warn_on_insecure_global_id_reclaim = false
mon_warn_on_insecure_global_id_reclaim_allowed = false

[osd]
osd_class_dir = /usr/lib64/rados-classes
`,
		"runtime": "osd|bdev_async_discard_threads = 1\nosd|bdev_enable_discard = true\n",
	},
}
View Source
var BaseStorageClassDefault = storagev1.StorageClass{
	ObjectMeta: metav1.ObjectMeta{
		Name: "pool1-hdd",
		Labels: map[string]string{
			"rook-ceph-storage-class": "true",
		},
		Annotations: map[string]string{"storageclass.kubernetes.io/is-default-class": "true"},
	},
	Provisioner: "rook-ceph.rbd.csi.ceph.com",
	Parameters: map[string]string{
		"clusterID":     "rook-ceph",
		"pool":          "pool1-hdd",
		"imageFormat":   "2",
		"imageFeatures": "layering",
		"csi.storage.k8s.io/provisioner-secret-name":      "rook-csi-rbd-provisioner",
		"csi.storage.k8s.io/provisioner-secret-namespace": "rook-ceph",
		"csi.storage.k8s.io/node-stage-secret-name":       "rook-csi-rbd-node",
		"csi.storage.k8s.io/node-stage-secret-namespace":  "rook-ceph",
	},
}
View Source
var BuiltinMgrPool = &cephv1.CephBlockPool{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "builtin-mgr",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.NamedBlockPoolSpec{
		Name: ".mgr",
		PoolSpec: cephv1.PoolSpec{
			EnableCrushUpdates: true,
			DeviceClass:        "hdd",
			Replicated: cephv1.ReplicatedSpec{
				Size: 3,
			},
			FailureDomain: "host",
			CrushRoot:     "default",
		},
	},
}
View Source
var BuiltinRgwRootPool = &cephv1.CephBlockPool{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "builtin-rgw-root",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.NamedBlockPoolSpec{
		Name: ".rgw.root",
		PoolSpec: cephv1.PoolSpec{
			EnableCrushUpdates: true,
			DeviceClass:        "hdd",
			Replicated: cephv1.ReplicatedSpec{
				Size: 3,
			},
		},
	},
}
View Source
var CSICephFSNodeSecret = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "rook-csi-cephfs-node",
		Namespace: "rook-ceph",
	},
	Data: map[string][]byte{
		"adminID":  []byte("csi-cephfs-node"),
		"adminKey": []byte("AQDh+HRjCGpLDxAA1DqwfBPBGkW7+XM65JVChg=="),
	},
}
View Source
var CSICephFSProvisionerSecret = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "rook-csi-cephfs-provisioner",
		Namespace: "rook-ceph",
	},
	Data: map[string][]byte{
		"adminID":  []byte("csi-cephfs-provisioner"),
		"adminKey": []byte("AQDg+HRjKB9bLBAArfLLNtGN+KZRq4eaJf6Ptg=="),
	},
}
View Source
var CSIRBDNodeSecret = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "rook-csi-rbd-node",
		Namespace: "rook-ceph",
	},
	Data: map[string][]byte{
		"userID":  []byte("csi-rbd-node"),
		"userKey": []byte("AQDd+HRjKiMBOhAATVfdzSNdlOAG3vaPSeTBzw=="),
	},
}
View Source
var CSIRBDProvisionerSecret = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "rook-csi-rbd-provisioner",
		Namespace: "rook-ceph",
	},
	Data: map[string][]byte{
		"userID":  []byte("csi-rbd-provisioner"),
		"userKey": []byte("AQDd+HRjFAcRIBAA102qzSI0WO1JfBnfPf/R2w=="),
	},
}
View Source
var CephAdminKeyringSecret = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "rook-ceph-admin-keyring",
	},
	Data: map[string][]byte{
		"keyring": []byte("AQAcpuJiITYXMhAAXaOoAqOKJ4mhNOAqxFb1Hw=="),
	},
}
View Source
var CephBaseClusterReportNotOk = &lcmv1alpha1.CephDeploymentHealthReport{
	RookOperator: RookOperatorStatusOk,
	RookCephObjects: func() *lcmv1alpha1.RookCephObjectsStatus {
		status := RookCephObjectsReportOnlyCephCluster.DeepCopy()
		status.CephCluster = &ReefCephClusterHasHealthIssues.Status
		return status
	}(),
	CephDaemons:    CephDaemonsStatusUnhealthy,
	ClusterDetails: CephDetailsStatusNoIssues,
	OsdAnalysis: &lcmv1alpha1.OsdSpecAnalysisState{
		DiskDaemon: lcmv1alpha1.DaemonStatus{
			Status:   lcmv1alpha1.DaemonStateFailed,
			Messages: []string{"0/2 ready"},
			Issues:   []string{"daemonset 'lcm-namespace/pelagia-disk-daemon' is not ready"},
		},
	},
}
View Source
var CephBaseClusterReportOk = &lcmv1alpha1.CephDeploymentHealthReport{
	RookOperator:    RookOperatorStatusOk,
	RookCephObjects: RookCephObjectsReportOnlyCephCluster,
	CephDaemons:     CephDaemonsStatusHealthy,
	ClusterDetails:  CephDetailsStatusNoIssues,
	OsdAnalysis:     OsdSpecAnalysisOk,
}
View Source
var CephBaseUsageDetails = &lcmv1alpha1.UsageDetails{
	PoolsDetail: map[string]lcmv1alpha1.PoolUsageStats{
		"pool-hdd": {UsedBytes: "12288", UsedBytesPercentage: "0.000", TotalBytes: "104807096320", AvailableBytes: "104807084032"},
		".mgr":     {UsedBytes: "1388544", UsedBytesPercentage: "0.000", TotalBytes: "104807096320", AvailableBytes: "104805707776"},
	},
	ClassesDetail: map[string]lcmv1alpha1.ClassUsageStats{
		"hdd": {UsedBytes: "81630961664", TotalBytes: "509981204480", AvailableBytes: "428350242816"},
	},
}
View Source
var CephBlockPoolErasureCoded = cephv1.CephBlockPool{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "pool1-hdd",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.NamedBlockPoolSpec{
		PoolSpec: cephv1.PoolSpec{
			DeviceClass:   "hdd",
			CrushRoot:     "default",
			FailureDomain: "host",
			ErasureCoded: cephv1.ErasureCodedSpec{
				CodingChunks: 1,
				DataChunks:   2,
				Algorithm:    "fake",
			},
		},
	},
}
View Source
var CephBlockPoolListEmpty = cephv1.CephBlockPoolList{Items: []cephv1.CephBlockPool{}}
View Source
var CephBlockPoolListNotReady = cephv1.CephBlockPoolList{
	Items: []cephv1.CephBlockPool{
		{
			ObjectMeta: metav1.ObjectMeta{Name: "pool1", Namespace: RookNamespace},
			Status:     &cephv1.CephBlockPoolStatus{Phase: cephv1.ConditionFailure},
		},
		{
			ObjectMeta: metav1.ObjectMeta{Name: "pool2", Namespace: RookNamespace},
		},
	},
}
View Source
var CephBlockPoolListReady = cephv1.CephBlockPoolList{
	Items: []cephv1.CephBlockPool{
		{
			ObjectMeta: metav1.ObjectMeta{Name: "pool1", Namespace: RookNamespace},
			Status:     &cephv1.CephBlockPoolStatus{Phase: cephv1.ConditionReady},
		},
		{
			ObjectMeta: metav1.ObjectMeta{Name: "pool2", Namespace: RookNamespace},
			Status:     &cephv1.CephBlockPoolStatus{Phase: cephv1.ConditionReady},
		},
	},
}
View Source
var CephBlockPoolReplicated = cephv1.CephBlockPool{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "pool1-hdd",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.NamedBlockPoolSpec{
		PoolSpec: cephv1.PoolSpec{
			DeviceClass:   "hdd",
			CrushRoot:     "default",
			FailureDomain: "host",
			Replicated: cephv1.ReplicatedSpec{
				Size: 3,
			},
		},
	},
}
View Source
var CephBlockPoolReplicatedMirroring = cephv1.CephBlockPool{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "pool1-hdd",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.NamedBlockPoolSpec{
		PoolSpec: cephv1.PoolSpec{
			DeviceClass:   "hdd",
			CrushRoot:     "default",
			FailureDomain: "host",
			Replicated: cephv1.ReplicatedSpec{
				Size: 3,
			},
			Mirroring: cephv1.MirroringSpec{
				Enabled: true,
				Mode:    "pool",
			},
		},
	},
}
View Source
var CephCSIDaemonsNotReady = map[string]lcmv1alpha1.DaemonStatus{
	"csi-rbdplugin": {
		Status:   lcmv1alpha1.DaemonStateFailed,
		Messages: []string{"1/3 ready"},
		Issues:   []string{"daemonset 'rook-ceph/csi-rbdplugin' is not ready"},
	},
	"csi-cephfsplugin": {
		Status:   lcmv1alpha1.DaemonStateFailed,
		Messages: []string{"1/3 ready"},
		Issues:   []string{"daemonset 'rook-ceph/csi-cephfsplugin' is not ready"},
	},
}
View Source
var CephCSIDaemonsReady = map[string]lcmv1alpha1.DaemonStatus{
	"csi-rbdplugin": {
		Status:   lcmv1alpha1.DaemonStateOk,
		Messages: []string{"3/3 ready"},
	},
	"csi-cephfsplugin": {
		Status:   lcmv1alpha1.DaemonStateOk,
		Messages: []string{"3/3 ready"},
	},
}
View Source
var CephClientCinder = cephv1.CephClient{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "cinder",
	},
	Spec: cephv1.ClientSpec{
		Name: "cinder",
		Caps: map[string]string{
			"mon": "allow profile rbd",
			"osd": "profile rbd pool=volumes-hdd, profile rbd-read-only pool=images-hdd, profile rbd pool=backup-hdd",
		},
	},
}
View Source
var CephClientGlance = cephv1.CephClient{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "glance",
	},
	Spec: cephv1.ClientSpec{
		Name: "glance",
		Caps: map[string]string{
			"mon": "allow profile rbd",
			"osd": "profile rbd pool=images-hdd",
		},
	},
}
View Source
var CephClientListEmpty = cephv1.CephClientList{Items: []cephv1.CephClient{}}
View Source
var CephClientListNotReady = cephv1.CephClientList{
	Items: []cephv1.CephClient{
		{
			ObjectMeta: metav1.ObjectMeta{Name: "client1", Namespace: RookNamespace},
			Status:     &cephv1.CephClientStatus{Phase: cephv1.ConditionFailure},
		},
		{
			ObjectMeta: metav1.ObjectMeta{Name: "client2", Namespace: RookNamespace},
		},
	},
}
View Source
var CephClientListReady = cephv1.CephClientList{
	Items: []cephv1.CephClient{
		{
			ObjectMeta: metav1.ObjectMeta{Name: "client1", Namespace: RookNamespace},
			Status:     &cephv1.CephClientStatus{Phase: cephv1.ConditionReady},
		},
		{
			ObjectMeta: metav1.ObjectMeta{Name: "client2", Namespace: RookNamespace},
			Status:     &cephv1.CephClientStatus{Phase: cephv1.ConditionReady},
		},
	},
}
View Source
var CephClientManila = cephv1.CephClient{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "manila",
	},
	Spec: cephv1.ClientSpec{
		Name: "manila",
		Caps: map[string]string{
			"mds": "allow rw",
			"mgr": "allow rw",
			"osd": "allow rw tag cephfs *=*",
			"mon": `allow r, allow command "auth del", allow command "auth caps", allow command "auth get", allow command "auth get-or-create"`,
		},
	},
}
View Source
var CephClientNova = cephv1.CephClient{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "nova",
	},
	Spec: cephv1.ClientSpec{
		Name: "nova",
		Caps: map[string]string{
			"mon": "allow profile rbd",
			"osd": "profile rbd pool=vms-hdd, profile rbd pool=images-hdd, profile rbd pool=volumes-hdd",
		},
	},
}
View Source
var CephClientTest = cephv1.CephClient{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "test",
	},
	Spec: cephv1.ClientSpec{
		Name: "test",
		Caps: map[string]string{
			"osd": "custom-caps",
		},
	},
}
View Source
var CephClusterExternal = cephv1.CephCluster{
	ObjectMeta: metav1.ObjectMeta{
		Name:      LcmObjectMeta.Name,
		Namespace: RookNamespace,
	},
	Spec: cephv1.ClusterSpec{
		CephVersion: cephv1.CephVersionSpec{Image: PelagiaConfig.Data["DEPLOYMENT_CEPH_IMAGE"]},
		ContinueUpgradeAfterChecksEvenIfNotHealthy: true,
		DataDirHostPath:   "/var/lib/rook",
		SkipUpgradeChecks: true,
		External:          cephv1.ExternalSpec{Enable: true},
	},
	Status: cephv1.ClusterStatus{
		Phase: cephv1.ConditionConnected,
		CephStatus: &cephv1.CephStatus{
			Health:      "HEALTH_OK",
			FSID:        "8668f062-3faa-358a-85f3-f80fe6c1e306",
			LastChecked: time.Now().Format(time.RFC3339),
		},
		CephVersion: &cephv1.ClusterVersion{
			Image:   "some-registry.com/ceph:v18.2.4",
			Version: "18.2.4-0",
		},
	},
}
View Source
var CephClusterGenerated = cephv1.CephCluster{
	ObjectMeta: metav1.ObjectMeta{
		Name:      LcmObjectMeta.Name,
		Namespace: RookNamespace,
	},
	Spec: cephv1.ClusterSpec{
		CephVersion: cephv1.CephVersionSpec{Image: PelagiaConfig.Data["DEPLOYMENT_CEPH_IMAGE"]},
		Annotations: map[cephv1.KeyType]cephv1.Annotations{
			cephv1.KeyMon: map[string]string{
				"cephdeployment.lcm.mirantis.com/config-global-updated": "some-time",
				"cephdeployment.lcm.mirantis.com/config-mon-updated":    "some-time",
			},
			cephv1.KeyMgr: map[string]string{
				"cephdeployment.lcm.mirantis.com/config-global-updated": "some-time",
				"cephdeployment.lcm.mirantis.com/config-mgr-updated":    "some-time",
			},
		},
		ContinueUpgradeAfterChecksEvenIfNotHealthy: true,
		DataDirHostPath: "/var/lib/rook",
		Mon:             cephv1.MonSpec{Count: 3},
		Mgr: cephv1.MgrSpec{
			Count: 1,
			Modules: []cephv1.Module{
				{
					Name:    "balancer",
					Enabled: true,
				},
				{
					Name:    "pg_autoscaler",
					Enabled: true,
				},
			},
		},
		Network: cephv1.NetworkSpec{
			Provider: "host",
		},
		Placement: cephv1.PlacementSpec{
			cephv1.KeyMon: cephv1.Placement{
				NodeAffinity: &corev1.NodeAffinity{
					RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
						NodeSelectorTerms: []corev1.NodeSelectorTerm{
							{
								MatchExpressions: []corev1.NodeSelectorRequirement{
									{
										Key:      "ceph_role_mon",
										Operator: "In",
										Values: []string{
											"true",
										},
									},
								},
							},
						},
					},
				},
				PodAffinity:     &corev1.PodAffinity{},
				PodAntiAffinity: &corev1.PodAntiAffinity{},
				Tolerations: []corev1.Toleration{
					{
						Key:      "ceph_role_mon",
						Operator: "Exists",
					},
				},
			},
			cephv1.KeyMgr: cephv1.Placement{
				NodeAffinity: &corev1.NodeAffinity{
					RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
						NodeSelectorTerms: []corev1.NodeSelectorTerm{
							{
								MatchExpressions: []corev1.NodeSelectorRequirement{
									{
										Key:      "ceph_role_mgr",
										Operator: "In",
										Values: []string{
											"true",
										},
									},
								},
							},
						},
					},
				},
				PodAffinity:     &corev1.PodAffinity{},
				PodAntiAffinity: &corev1.PodAntiAffinity{},
				Tolerations: []corev1.Toleration{
					{
						Key:      "ceph_role_mgr",
						Operator: "Exists",
					},
				},
			},
		},
		Storage: cephv1.StorageScopeSpec{
			UseAllNodes: false,
			Selection:   cephv1.Selection{UseAllDevices: &[]bool{false}[0]},
			Nodes: []cephv1.Node{
				{
					Name: "node-1",
					Selection: cephv1.Selection{
						UseAllDevices: nil,
						DeviceFilter:  "",
						Devices: []cephv1.Device{
							{
								Name:   "sda",
								Config: map[string]string{"deviceClass": "hdd"},
							},
						},
						DevicePathFilter:     "",
						VolumeClaimTemplates: nil,
					},
					Config: nil,
				},
				{
					Name: "node-2",
					Selection: cephv1.Selection{
						UseAllDevices: nil,
						DeviceFilter:  "",
						Devices: []cephv1.Device{
							{
								Name:   "sda",
								Config: map[string]string{"osdsPerDevice": "1", "deviceClass": "hdd"},
							},
						},
						DevicePathFilter:     "",
						VolumeClaimTemplates: nil,
					},
					Config: nil,
				},
				{
					Name: "node-3",
					Selection: cephv1.Selection{
						UseAllDevices: nil,
						DeviceFilter:  "",
						Devices: []cephv1.Device{
							{
								Name:   "sda",
								Config: map[string]string{"deviceClass": "hdd"},
							},
						},
						DevicePathFilter:     "",
						VolumeClaimTemplates: nil,
					},
					Config: map[string]string{"osdsPerDevice": "2"},
				},
			},
		},
		SkipUpgradeChecks: true,
		HealthCheck: cephv1.CephClusterHealthCheckSpec{
			LivenessProbe: map[cephv1.KeyType]*cephv1.ProbeSpec{
				"osd": {
					Probe: &corev1.Probe{
						TimeoutSeconds:   5,
						FailureThreshold: 5,
					},
				},
				"mon": {
					Probe: &corev1.Probe{
						TimeoutSeconds:   5,
						FailureThreshold: 5,
					},
				},
				"mgr": {
					Probe: &corev1.Probe{
						TimeoutSeconds:   5,
						FailureThreshold: 5,
					},
				},
			},
		},
	},
}
View Source
var CephClusterListEmpty = cephv1.CephClusterList{Items: []cephv1.CephCluster{}}
View Source
var CephClusterListExternal = cephv1.CephClusterList{Items: []cephv1.CephCluster{CephClusterExternal}}
View Source
var CephClusterListNotReady = cephv1.CephClusterList{Items: []cephv1.CephCluster{ReefCephClusterNotReady}}
View Source
var CephClusterListNotSupported = cephv1.CephClusterList{Items: []cephv1.CephCluster{OctopusCephCluster}}
View Source
var CephConfigDumpDefaults = fmt.Sprintf("[%s]", cephConfigDumpDefaultsStr)
View Source
var CephConfigDumpOverride = fmt.Sprintf("[%s, %s, %s]",
	cephConfigDumpDefaultsStr,
	BuildCliOutput(CephConfigSectionTmpl, "", map[string]string{"section": "global", "name": "osd_max_backfills", "value": "32", "runtime_update": "true"}),
	BuildCliOutput(CephConfigSectionTmpl, "", map[string]string{"section": "global", "name": "osd_recovery_max_active", "value": "16", "runtime_update": "true"}))
View Source
var CephConfigDumpOverrideWithRgw = fmt.Sprintf("[%s, %s, %s, %s, %s]",
	cephConfigDumpDefaultsStr,
	BuildCliOutput(CephConfigSectionTmpl, "", map[string]string{"section": "global", "name": "osd_max_backfills", "value": "64", "runtime_update": "true"}),
	BuildCliOutput(CephConfigSectionTmpl, "", map[string]string{"section": "global", "name": "osd_recovery_max_active", "value": "16", "runtime_update": "true"}),
	BuildCliOutput(CephConfigSectionTmpl, "", map[string]string{"section": "client.rgw.rgw.store.a", "name": "rgw_keystone_admin_password", "value": "AMTqaDveAp8sWlLtf0fcg6RVjFRXs7FR", "runtime_update": "false"}),
	BuildCliOutput(CephConfigSectionTmpl, "", map[string]string{"section": "client.rgw.rgw.store.a", "name": "rgw_keystone_barbican_password", "value": "AMTqaDveAp8sWlLtf0fcg6RVjFRXs7FR", "runtime_update": "false"}))
View Source
var CephConfigSectionTmpl = `` /* 140-byte string literal not displayed */
View Source
var CephCrushRuleDumpTmpl = `` /* 2014-byte string literal not displayed */
View Source
var CephDaemonsBaseHealthy = map[string]lcmv1alpha1.DaemonStatus{
	"mon": {
		Status:   lcmv1alpha1.DaemonStateOk,
		Messages: []string{"3 mons, quorum [a b c]"},
	},
	"mgr": {
		Status:   lcmv1alpha1.DaemonStateOk,
		Messages: []string{"a is active mgr"},
	},
	"osd": {
		Status:   lcmv1alpha1.DaemonStateOk,
		Messages: []string{"3 osds, 3 up, 3 in"},
	},
}
View Source
var CephDaemonsBaseUnhealthy = map[string]lcmv1alpha1.DaemonStatus{
	"mon": {
		Status:   lcmv1alpha1.DaemonStateFailed,
		Messages: []string{"2 mons, quorum [a b]"},
		Issues:   []string{"not all (2/3) mons are running"},
	},
	"mgr": {
		Status: lcmv1alpha1.DaemonStateFailed,
		Issues: []string{"no active mgr"},
	},
	"osd": {
		Status:   lcmv1alpha1.DaemonStateFailed,
		Messages: []string{"3 osds, 2 up, 2 in"},
		Issues:   []string{"not all osds are in", "not all osds are up"},
	},
}
View Source
var CephDaemonsCephFsRgwHealthy = map[string]lcmv1alpha1.DaemonStatus{
	"mon": {
		Status:   lcmv1alpha1.DaemonStateOk,
		Messages: []string{"3 mons, quorum [a b c]"},
	},
	"mgr": {
		Status:   lcmv1alpha1.DaemonStateOk,
		Messages: []string{"a is active mgr"},
	},
	"osd": {
		Status:   lcmv1alpha1.DaemonStateOk,
		Messages: []string{"3 osds, 3 up, 3 in"},
	},
	"mds": {
		Status:   lcmv1alpha1.DaemonStateOk,
		Messages: []string{"mds active: 1/1 (cephfs 'cephfs-1')"},
	},
	"rgw": {
		Status:   lcmv1alpha1.DaemonStateOk,
		Messages: []string{"2 rgws running, daemons: [11556688 12065099]"},
	},
}
View Source
var CephDaemonsCephFsRgwUnhealthy = map[string]lcmv1alpha1.DaemonStatus{
	"mon": {
		Status:   lcmv1alpha1.DaemonStateOk,
		Messages: []string{"3 mons, quorum [a b c]"},
	},
	"mgr": {
		Status:   lcmv1alpha1.DaemonStateOk,
		Messages: []string{"a is active mgr"},
	},
	"osd": {
		Status:   lcmv1alpha1.DaemonStateOk,
		Messages: []string{"3 osds, 3 up, 3 in"},
	},
	"mds": {
		Status: lcmv1alpha1.DaemonStateFailed,
		Issues: []string{
			"unexpected number (0/1) of mds active are running for CephFS 'cephfs-1'", "unexpected number (0/1) of mds standby are running",
		},
		Messages: []string{"mds active: 0/1 (cephfs 'cephfs-1')"},
	},
	"rgw": {
		Status:   lcmv1alpha1.DaemonStateFailed,
		Issues:   []string{"not all (0/2) rgws are running"},
		Messages: []string{"0 rgws running, daemons: []"},
	},
}
View Source
var CephDaemonsStatusHealthy = &lcmv1alpha1.CephDaemonsStatus{
	CephDaemons:          CephDaemonsBaseHealthy,
	CephCSIPluginDaemons: CephCSIDaemonsReady,
}
View Source
var CephDaemonsStatusUnhealthy = &lcmv1alpha1.CephDaemonsStatus{
	CephDaemons:          CephDaemonsBaseUnhealthy,
	CephCSIPluginDaemons: CephCSIDaemonsNotReady,
}
View Source
var CephDeployClientCinder = cephlcmv1alpha1.CephClient{
	ClientSpec: cephlcmv1alpha1.ClientSpec{
		Name: "cinder",
		Caps: map[string]string{
			"mon": "allow profile rbd",
			"osd": "profile rbd pool=volumes-hdd, profile rbd-read-only pool=images-hdd, profile rbd pool=backup-hdd",
		},
	},
}
View Source
var CephDeployClientGlance = cephlcmv1alpha1.CephClient{
	ClientSpec: cephlcmv1alpha1.ClientSpec{
		Name: "glance",
		Caps: map[string]string{
			"mon": "allow profile rbd",
			"osd": "profile rbd pool=images-hdd",
		},
	},
}
View Source
var CephDeployClientManila = cephlcmv1alpha1.CephClient{
	ClientSpec: cephlcmv1alpha1.ClientSpec{
		Name: "manila",
		Caps: map[string]string{
			"mds": "allow rw",
			"mgr": "allow rw",
			"osd": "allow rw tag cephfs *=*",
			"mon": `allow r, allow command "auth del", allow command "auth caps", allow command "auth get", allow command "auth get-or-create"`,
		},
	},
}
View Source
var CephDeployClientNova = cephlcmv1alpha1.CephClient{
	ClientSpec: cephlcmv1alpha1.ClientSpec{
		Name: "nova",
		Caps: map[string]string{
			"mon": "allow profile rbd",
			"osd": "profile rbd pool=vms-hdd, profile rbd pool=images-hdd, profile rbd pool=volumes-hdd",
		},
	},
}
View Source
var CephDeployClientTest = cephlcmv1alpha1.CephClient{
	ClientSpec: cephlcmv1alpha1.ClientSpec{
		Name: "test",
		Caps: map[string]string{
			"osd": "custom-caps",
		},
	},
}
View Source
var CephDeployEnsureMonitorIP = cephlcmv1alpha1.CephDeployment{
	ObjectMeta: LcmObjectMeta,
	Spec: cephlcmv1alpha1.CephDeploymentSpec{
		Nodes: []cephlcmv1alpha1.CephDeploymentNode{
			{
				Node: cephv1.Node{
					Name: "node1",
				},
				Roles:     []string{"mon", "mgr", "rgw", "osd", "mds"},
				MonitorIP: "127.0.0.1",
			},
		},
	},
}
View Source
var CephDeployEnsureRbdMirror = func() cephlcmv1alpha1.CephDeployment {
	cd := BaseCephDeployment.DeepCopy()
	cd.Finalizers = []string{"cephdeployment.lcm.mirantis.com/finalizer"}
	cd.Spec.RBDMirror = &cephlcmv1alpha1.CephRBDMirrorSpec{
		Count: 1,
		Peers: []cephlcmv1alpha1.CephRBDMirrorSecret{
			{
				Site:  "mirror1",
				Token: "fake-token",
				Pools: []string{"pool-1", "pool-2"},
			},
		},
	}
	cd.Spec.Pools = []cephlcmv1alpha1.CephPool{CephDeployPoolReplicated}
	return *cd
}()
View Source
var CephDeployEnsureRolesCrush = cephlcmv1alpha1.CephDeployment{
	ObjectMeta: LcmObjectMeta,
	Spec: cephlcmv1alpha1.CephDeploymentSpec{
		Nodes: []cephlcmv1alpha1.CephDeploymentNode{
			{
				Node: cephv1.Node{
					Name: "node1",
					Selection: cephv1.Selection{
						DevicePathFilter: "/dev/vd*",
					},
				},
				Roles: []string{"mon", "mgr", "rgw", "osd", "mds"},
				Crush: map[string]string{
					"region": "region1",
					"zone":   "zone1",
					"rack":   "rack1",
				},
			},
		},
	},
}
View Source
var CephDeployExternal = cephlcmv1alpha1.CephDeployment{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: LcmObjectMeta.Namespace,
		Name:      LcmObjectMeta.Name,
		Finalizers: []string{
			"cephdeployment.lcm.mirantis.com/finalizer",
		},
	},
	Spec: cephlcmv1alpha1.CephDeploymentSpec{
		Network: cephlcmv1alpha1.CephNetworkSpec{
			ClusterNet: "127.0.0.0/32",
			PublicNet:  "127.0.0.0/32",
		},
		External: true,
		Pools:    []cephlcmv1alpha1.CephPool{CephDeployPoolReplicated},
	},
	Status: cephlcmv1alpha1.CephDeploymentStatus{
		Validation: cephlcmv1alpha1.CephDeploymentValidation{
			Result:                  cephlcmv1alpha1.ValidationSucceed,
			LastValidatedGeneration: int64(0),
		},
	},
}
View Source
var CephDeployExternalCephFS = func() cephlcmv1alpha1.CephDeployment {
	cd := CephDeployExternal.DeepCopy()
	cd.Spec.SharedFilesystem = CephSharedFileSystemOk
	return *cd
}()
View Source
var CephDeployExternalRgw = func() cephlcmv1alpha1.CephDeployment {
	cd := CephDeployExternal.DeepCopy()
	cd.Spec.ObjectStorage = &cephlcmv1alpha1.CephObjectStorage{
		Rgw: RgwExternalSslEnabled,
	}
	return *cd
}()
View Source
var CephDeployMosk = cephlcmv1alpha1.CephDeployment{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: LcmObjectMeta.Namespace,
		Name:      LcmObjectMeta.Name,
		Finalizers: []string{
			"cephdeployment.lcm.mirantis.com/finalizer",
		},
	},
	Spec: cephlcmv1alpha1.CephDeploymentSpec{
		Network: BaseCephDeployment.Spec.Network,
		Pools: []cephlcmv1alpha1.CephPool{
			CephDeployPoolReplicated,
			GetCephDeployPool("vms", "vms"),
			GetCephDeployPool("volumes", "volumes"),
			GetCephDeployPool("images", "images"),
			GetCephDeployPool("backup", "backup"),
		},
		Nodes: CephNodesExtendedOk,
		ObjectStorage: &cephlcmv1alpha1.CephObjectStorage{
			Rgw: CephRgwBaseSpec,
		},
		IngressConfig: &CephIngressConfig,
	},
	Status: cephlcmv1alpha1.CephDeploymentStatus{
		Validation: cephlcmv1alpha1.CephDeploymentValidation{
			Result:                  cephlcmv1alpha1.ValidationSucceed,
			LastValidatedGeneration: int64(0),
		},
		ObjectsRefs: CephDeploymentObjectsRefs,
	},
}
View Source
var CephDeployMoskWithCephFS = func() cephlcmv1alpha1.CephDeployment {
	cd := CephDeployMosk.DeepCopy()
	cd.Spec.SharedFilesystem = CephSharedFileSystemOk
	return *cd
}()
View Source
var CephDeployMoskWithoutIngress = func() cephlcmv1alpha1.CephDeployment {
	cd := CephDeployMosk.DeepCopy()
	cd.Spec.IngressConfig = nil
	return *cd
}()
View Source
var CephDeployMoskWithoutRgw = func() cephlcmv1alpha1.CephDeployment {
	cd := CephDeployMoskWithoutIngress.DeepCopy()
	cd.Spec.ObjectStorage = nil
	return *cd
}()
View Source
var CephDeployMultisiteMasterRgw = func() cephlcmv1alpha1.CephDeployment {
	cd := BaseCephDeployment.DeepCopy()
	cd.Spec.ObjectStorage = &cephlcmv1alpha1.CephObjectStorage{
		MultiSite: &cephlcmv1alpha1.CephMultiSite{
			Realms: []cephlcmv1alpha1.CephRGWRealm{
				{
					Name: "realm1",
				},
			},
			ZoneGroups: []cephlcmv1alpha1.CephRGWZoneGroup{
				{
					Name:  "zonegroup1",
					Realm: "realm1",
				},
			},
			Zones: []cephlcmv1alpha1.CephRGWZone{
				{
					Name:      "zone1",
					ZoneGroup: "zonegroup1",
					DataPool: cephlcmv1alpha1.CephPoolSpec{
						DeviceClass:   "hdd",
						FailureDomain: "host",
						ErasureCoded: &cephlcmv1alpha1.CephPoolErasureCodedSpec{
							CodingChunks: 2,
							DataChunks:   1,
						},
					},
					MetadataPool: cephlcmv1alpha1.CephPoolSpec{
						DeviceClass:   "hdd",
						FailureDomain: "host",
						Replicated: &cephlcmv1alpha1.CephPoolReplicatedSpec{
							Size: 3,
						},
					},
				},
			},
		},
		Rgw: cephlcmv1alpha1.CephRGW{
			Name:    "rgw-store",
			Gateway: CephRgwBaseSpec.Gateway,
			Zone: &cephv1.ZoneSpec{
				Name: "zone1",
			},
		},
	}
	return *cd
}()
View Source
var CephDeployMultisiteRgw = func() cephlcmv1alpha1.CephDeployment {
	cd := BaseCephDeployment.DeepCopy()
	cd.Spec.ObjectStorage = &cephlcmv1alpha1.CephObjectStorage{
		MultiSite: &cephlcmv1alpha1.CephMultiSite{
			Realms: []cephlcmv1alpha1.CephRGWRealm{
				{
					Name: "realm1",
					Pull: &cephlcmv1alpha1.CephRGWRealmPull{
						Endpoint:  "http://10.10.0.1",
						AccessKey: "fakekey",
						SecretKey: "fakesecret",
					},
				},
			},
			ZoneGroups: []cephlcmv1alpha1.CephRGWZoneGroup{
				{
					Name:  "zonegroup1",
					Realm: "realm1",
				},
			},
			Zones: []cephlcmv1alpha1.CephRGWZone{
				{
					Name:      "secondary-zone1",
					ZoneGroup: "zonegroup1",
					DataPool: cephlcmv1alpha1.CephPoolSpec{
						DeviceClass:   "hdd",
						CrushRoot:     "default",
						FailureDomain: "host",
						ErasureCoded: &cephlcmv1alpha1.CephPoolErasureCodedSpec{
							CodingChunks: 2,
							DataChunks:   1,
						},
					},
					MetadataPool: cephlcmv1alpha1.CephPoolSpec{
						DeviceClass:   "hdd",
						CrushRoot:     "default",
						FailureDomain: "host",
						Replicated: &cephlcmv1alpha1.CephPoolReplicatedSpec{
							Size: 3,
						},
					},
				},
			},
		},
		Rgw: cephlcmv1alpha1.CephRGW{
			Name:    "rgw-store",
			Gateway: CephRgwBaseSpec.Gateway,
			Zone: &cephv1.ZoneSpec{
				Name: "secondary-zone1",
			},
		},
	}
	return *cd
}()
View Source
var CephDeployNonMosk = cephlcmv1alpha1.CephDeployment{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: LcmObjectMeta.Namespace,
		Name:      LcmObjectMeta.Name,
		Finalizers: []string{
			"cephdeployment.lcm.mirantis.com/finalizer",
		},
		Generation: int64(10),
	},
	Spec: cephlcmv1alpha1.CephDeploymentSpec{
		Pools:   []cephlcmv1alpha1.CephPool{CephDeployPoolReplicated},
		Clients: []cephlcmv1alpha1.CephClient{CephDeployClientTest},
		Nodes:   CephNodesExtendedOk,
		Network: BaseCephDeployment.Spec.Network,
		ObjectStorage: &cephlcmv1alpha1.CephObjectStorage{
			Rgw: CephRgwSpecWithUsersBuckets,
		},
		SharedFilesystem: CephSharedFileSystemOk,
	},
	Status: cephlcmv1alpha1.CephDeploymentStatus{
		Validation: cephlcmv1alpha1.CephDeploymentValidation{
			Result:                  cephlcmv1alpha1.ValidationSucceed,
			LastValidatedGeneration: int64(10),
		},
		ObjectsRefs: CephDeploymentObjectsRefs,
	},
}
View Source
var CephDeployNonMoskForSecret = func() cephlcmv1alpha1.CephDeployment {
	cd := CephDeployNonMosk.DeepCopy()
	cd.Spec.ObjectStorage.Rgw.ObjectUsers = []cephlcmv1alpha1.CephRGWUser{{Name: "test-user"}}
	cd.Spec.ObjectStorage.Rgw.Buckets = []string{"test-bucket"}
	return *cd
}()
View Source
var CephDeployNonMoskWithIngress = func() cephlcmv1alpha1.CephDeployment {
	cd := CephDeployNonMosk.DeepCopy()
	cd.Spec.IngressConfig = &cephlcmv1alpha1.CephDeploymentIngressConfig{
		TLSConfig: &cephlcmv1alpha1.CephDeploymentIngressTLSConfig{
			Domain: "example.com",
		},
	}
	return *cd
}()
View Source
var CephDeployObjectStorageCeph = func() cephlcmv1alpha1.CephDeployment {
	cd := BaseCephDeployment.DeepCopy()
	cd.Spec.ObjectStorage = &cephlcmv1alpha1.CephObjectStorage{
		Rgw: CephRgwBaseSpec,
	}
	return *cd
}()
View Source
var CephDeployObjectStorageRookConfigCeph = func() cephlcmv1alpha1.CephDeployment {
	cd := CephDeployObjectStorageCeph.DeepCopy()
	cd.Spec.RookConfig = map[string]string{
		"mon_max_pg_per_osd":                  "400",
		"rgw enforce swift acls":              "false",
		"rgw_user_quota_bucket_sync_interval": "10",
		"rgw_dns_name":                        "rgw-store.ms2.wxlsd.com",
		"rgw keystone barbican user":          "override-user",
	}
	return *cd
}()
View Source
var CephDeployObjectStorageRookConfigNoBarbicanCeph = func() cephlcmv1alpha1.CephDeployment {
	cd := CephDeployObjectStorageCeph.DeepCopy()
	cd.Spec.RookConfig = map[string]string{
		"cluster network":           "10.0.0.0/24",
		"public network":            "172.16.0.0/24",
		"rgw_trust_forwarded_https": "false",
		"rgw keystone admin user":   "override-user",
	}
	return *cd
}()
View Source
var CephDeployPoolErasureCoded = cephlcmv1alpha1.CephPool{
	Name: "pool1",
	Role: "fake",
	CephPoolSpec: cephlcmv1alpha1.CephPoolSpec{
		DeviceClass:   "hdd",
		CrushRoot:     "default",
		FailureDomain: "host",
		ErasureCoded: &cephlcmv1alpha1.CephPoolErasureCodedSpec{
			CodingChunks: 1,
			DataChunks:   2,
			Algorithm:    "fake",
		},
	},
}
View Source
var CephDeployPoolMirroring = cephlcmv1alpha1.CephPool{
	Name: "pool1",
	Role: "fake",
	CephPoolSpec: cephlcmv1alpha1.CephPoolSpec{
		DeviceClass:   "hdd",
		CrushRoot:     "default",
		FailureDomain: "host",
		Replicated: &cephlcmv1alpha1.CephPoolReplicatedSpec{
			Size: 3,
		},
		Mirroring: &cephlcmv1alpha1.CephPoolMirrorSpec{
			Mode: "pool",
		},
	},
}
View Source
var CephDeployPoolReplicated = cephlcmv1alpha1.CephPool{
	Name: "pool1",
	Role: "fake",
	StorageClassOpts: cephlcmv1alpha1.CephStorageClassSpec{
		Default: true,
	},
	CephPoolSpec: cephlcmv1alpha1.CephPoolSpec{
		DeviceClass:   "hdd",
		CrushRoot:     "default",
		FailureDomain: "host",
		Replicated: &cephlcmv1alpha1.CephPoolReplicatedSpec{
			Size: 3,
		},
	},
}
View Source
var CephDeployRookConfigNoRuntimeNoOsd = func() cephlcmv1alpha1.CephDeployment {
	cd := BaseCephDeployment.DeepCopy()
	cd.Spec.RookConfig = map[string]string{"mon-max-pg-per-osd": "400"}
	return *cd
}()
View Source
var CephDeployRookConfigNoRuntimeOsdParams = func() cephlcmv1alpha1.CephDeployment {
	cd := BaseCephDeployment.DeepCopy()
	cd.Spec.RookConfig = map[string]string{
		"mon-max-pg-per-osd":       "400",
		"osd_max_backfills":        "64",
		"osd_recovery_max_active":  "16",
		"osd_recovery_op_priority": "3",
		"osd_recovery_sleep_hdd":   "0.000000",
	}
	return *cd
}()
View Source
var CephDeployWithWrongNodes = cephlcmv1alpha1.CephDeployment{
	ObjectMeta: metav1.ObjectMeta{
		Namespace:  LcmObjectMeta.Namespace,
		Name:       LcmObjectMeta.Name,
		Generation: int64(10),
	},
	Spec: cephlcmv1alpha1.CephDeploymentSpec{
		Nodes: []cephlcmv1alpha1.CephDeploymentNode{
			{
				Node: cephv1.Node{
					Name: "wrong-node-group",
					Selection: cephv1.Selection{
						Devices: []cephv1.Device{
							{
								Name:   "sda",
								Config: map[string]string{"osdsPerDevice": "2", "deviceClass": "ssd"},
							},
						},
					},
				},
				Roles:        make([]string, 0),
				Crush:        map[string]string{"rack": "A"},
				NodeGroup:    []string{"node-1-random-uuid", "node-2-random-uuid"},
				NodesByLabel: "test_label=test",
			},
		},
	},
}
View Source
var CephDeploymentHealth = lcmv1alpha1.CephDeploymentHealth{
	ObjectMeta: LcmObjectMeta,
}
View Source
var CephDeploymentHealthStatusNotOk = lcmv1alpha1.CephDeploymentHealth{
	ObjectMeta: LcmObjectMeta,
	Status: lcmv1alpha1.CephDeploymentHealthStatus{
		State:            lcmv1alpha1.HealthStateFailed,
		HealthReport:     CephBaseClusterReportNotOk,
		LastHealthCheck:  "time-now-fake",
		LastHealthUpdate: "time-now-fake",
		Issues: []string{
			"RECENT_MGR_MODULE_CRASH: 2 mgr modules have recently crashed",
			"cephcluster 'rook-ceph/cephcluster' object state is 'Failure'",
			"cephcluster 'rook-ceph/cephcluster' object status is not updated for last 5 minutes",
			"daemonset 'lcm-namespace/pelagia-disk-daemon' is not ready",
			"daemonset 'rook-ceph/csi-cephfsplugin' is not ready",
			"daemonset 'rook-ceph/csi-rbdplugin' is not ready",
			"failed to run 'ceph osd tree -f json' command to check replicas sizing",
			"no active mgr",
			"not all (2/3) mons are running",
			"not all osds are in",
			"not all osds are up",
		},
	},
}
View Source
var CephDeploymentHealthStatusOk = lcmv1alpha1.CephDeploymentHealth{
	ObjectMeta: LcmObjectMeta,
	Status: lcmv1alpha1.CephDeploymentHealthStatus{
		State:            lcmv1alpha1.HealthStateOk,
		HealthReport:     CephBaseClusterReportOk,
		LastHealthCheck:  "time-now-fake",
		LastHealthUpdate: "time-now-fake",
	},
}
View Source
var CephDeploymentObjectsRefs = []v1.ObjectReference{
	{
		APIVersion: "lcm.mirantis.com/v1alpha1",
		Kind:       "CephDeploymentHealth",
		Name:       "cephcluster",
		Namespace:  "lcm-namespace",
	},
	{
		APIVersion: "lcm.mirantis.com/v1alpha1",
		Kind:       "CephDeploymentSecret",
		Name:       "cephcluster",
		Namespace:  "lcm-namespace",
	},
	{
		APIVersion: "lcm.mirantis.com/v1alpha1",
		Kind:       "CephDeploymentMaintenance",
		Name:       "cephcluster",
		Namespace:  "lcm-namespace",
	},
}
View Source
var CephDetailsStatusNoIssues = &lcmv1alpha1.ClusterDetails{
	UsageDetails: CephBaseUsageDetails,
	CephEvents:   CephEventsIdle,
}
View Source
var CephDfBase = `` /* 1329-byte string literal not displayed */
View Source
var CephDfExtraPools = `` /* 5279-byte string literal not displayed */
View Source
var CephDiskDaemonDiskReportStringNode1 = GetDiskDaemonReportToString(&DiskDaemonReportOkNode1)
View Source
var CephEventsIdle = &lcmv1alpha1.CephEvents{
	RebalanceDetails:    lcmv1alpha1.CephEventDetails{State: lcmv1alpha1.CephEventIdle},
	PgAutoscalerDetails: lcmv1alpha1.CephEventDetails{State: lcmv1alpha1.CephEventIdle},
}
View Source
var CephEventsProgressing = &lcmv1alpha1.CephEvents{
	RebalanceDetails: lcmv1alpha1.CephEventDetails{
		State:    lcmv1alpha1.CephEventProgressing,
		Progress: "almost done",
		Messages: []lcmv1alpha1.CephEventMessage{
			{
				Message:  "Rebalancing after osd.3 marked in (33s)",
				Progress: "0.948051929473877",
			},
		},
	},
	PgAutoscalerDetails: lcmv1alpha1.CephEventDetails{
		State:    lcmv1alpha1.CephEventProgressing,
		Progress: "more than a half done",
		Messages: []lcmv1alpha1.CephEventMessage{
			{
				Message:  "PG autoscaler increasing pool 9 PGs from 32 to 128 (0s)",
				Progress: "0.5294585938568447",
			},
		},
	},
}
View Source
var CephExternalClusterReportOk = &lcmv1alpha1.CephDeploymentHealthReport{
	RookOperator: RookOperatorStatusOk,
	RookCephObjects: &lcmv1alpha1.RookCephObjectsStatus{
		CephCluster: &CephClusterExternal.Status,
		ObjectStorage: &lcmv1alpha1.ObjectStorageStatus{
			CephObjectStores: map[string]*cephv1.ObjectStoreStatus{
				"rgw-store-external": CephObjectStoreExternalReady.Status,
			},
		},
	},
	CephDaemons: func() *lcmv1alpha1.CephDaemonsStatus {
		daemonsStatus := CephDaemonsStatusHealthy.DeepCopy()
		daemonsStatus.CephDaemons["rgw"] = CephDaemonsCephFsRgwHealthy["rgw"]
		return daemonsStatus
	}(),
	ClusterDetails: &lcmv1alpha1.ClusterDetails{
		UsageDetails: CephBaseUsageDetails,
		CephEvents:   CephEventsIdle,
		RgwInfo: &lcmv1alpha1.RgwInfo{
			PublicEndpoint: "https://127.0.0.1:8443",
		},
	},
}
View Source
var CephExternalSecretReadySecretsInfo = &cephlcmv1alpha1.CephDeploymentSecretsInfo{
	ClientSecrets: []cephlcmv1alpha1.CephDeploymentSecretInfo{
		{
			ObjectName:      "client.test",
			SecretName:      "rook-ceph-client-test",
			SecretNamespace: "rook-ceph",
		},
	},
	RgwUserSecrets: []cephlcmv1alpha1.CephDeploymentSecretInfo{
		{
			ObjectName:      "test-user",
			SecretName:      "rgw-metrics-user-secret",
			SecretNamespace: "rook-ceph",
		},
	},
}
View Source
var CephExtraUsageDetails = &lcmv1alpha1.UsageDetails{
	PoolsDetail: map[string]lcmv1alpha1.PoolUsageStats{
		"pool-hdd":                     {UsedBytes: "251719680", UsedBytesPercentage: "0.080", TotalBytes: "104710103040", AvailableBytes: "104458383360"},
		".mgr":                         {UsedBytes: "2777088", UsedBytesPercentage: "0.001", TotalBytes: "104710103040", AvailableBytes: "104707325952"},
		".rgw.root":                    {UsedBytes: "196608", UsedBytesPercentage: "0.000", TotalBytes: "104710103040", AvailableBytes: "104709906432"},
		"rgw-store.rgw.log":            {UsedBytes: "1990656", UsedBytesPercentage: "0.001", TotalBytes: "104710103040", AvailableBytes: "104708112384"},
		"rgw-store.rgw.buckets.non-ec": {UsedBytes: "0", UsedBytesPercentage: "0.000", TotalBytes: "104710103040", AvailableBytes: "104710103040"},
		"rgw-store.rgw.buckets.index":  {UsedBytes: "0", UsedBytesPercentage: "0.000", TotalBytes: "104710103040", AvailableBytes: "104710103040"},
		"rgw-store.rgw.otp":            {UsedBytes: "0", UsedBytesPercentage: "0.000", TotalBytes: "104710103040", AvailableBytes: "104710103040"},
		"rgw-store.rgw.control":        {UsedBytes: "0", UsedBytesPercentage: "0.000", TotalBytes: "104710103040", AvailableBytes: "104710103040"},
		"rgw-store.rgw.meta":           {UsedBytes: "49152", UsedBytesPercentage: "0.000", TotalBytes: "104710103040", AvailableBytes: "104710053888"},
		"rgw-store.rgw.buckets.data":   {UsedBytes: "0", UsedBytesPercentage: "0.000", TotalBytes: "209420206080", AvailableBytes: "209420206080"},
		"my-cephfs-metadata":           {UsedBytes: "114688", UsedBytesPercentage: "0.000", TotalBytes: "157065150464", AvailableBytes: "157065035776"},
		"my-cephfs-data-1":             {UsedBytes: "0", UsedBytesPercentage: "0.000", TotalBytes: "157065150464", AvailableBytes: "157065150464"},
		"my-cephfs-data-2":             {UsedBytes: "0", UsedBytesPercentage: "0.000", TotalBytes: "209420206080", AvailableBytes: "209420206080"},
	},
	ClassesDetail: map[string]lcmv1alpha1.ClassUsageStats{
		"hdd": {UsedBytes: "82097160192", TotalBytes: "509981204480", AvailableBytes: "427884044288"},
		"ssd": {UsedBytes: "77127680", TotalBytes: "53682896896", AvailableBytes: "53605769216"},
	},
}
View Source
var CephFSNewOk = cephlcmv1alpha1.CephFS{
	Name: "test-cephfs",
	MetadataPool: cephlcmv1alpha1.CephPoolSpec{
		DeviceClass: "hdd",
		Replicated: &cephlcmv1alpha1.CephPoolReplicatedSpec{
			Size: 3,
		},
	},
	DataPools: []cephlcmv1alpha1.CephFSPool{
		{
			Name: "some-pool-name",
			CephPoolSpec: cephlcmv1alpha1.CephPoolSpec{
				DeviceClass: "hdd",
				Replicated: &cephlcmv1alpha1.CephPoolReplicatedSpec{
					Size: 3,
				},
			},
		},
	},
	MetadataServer: cephlcmv1alpha1.CephMetadataServer{
		ActiveCount:   1,
		ActiveStandby: true,
	},
}
View Source
var CephFSOkWithResources = func() cephlcmv1alpha1.CephFS {
	fs := CephFSNewOk.DeepCopy()
	fs.MetadataServer.Resources = &v1.ResourceRequirements{
		Limits: v1.ResourceList{
			v1.ResourceCPU:    resource.MustParse("100m"),
			v1.ResourceMemory: resource.MustParse("156Mi"),
		},
		Requests: v1.ResourceList{
			v1.ResourceMemory: resource.MustParse("28Mi"),
			v1.ResourceCPU:    resource.MustParse("10m"),
		},
	}
	return *fs
}()
View Source
var CephFSStorageClass = storagev1.StorageClass{
	ObjectMeta: metav1.ObjectMeta{
		Name: "test-cephfs-some-pool-name",
		Labels: map[string]string{
			"rook-ceph-storage-class":                     "true",
			"rook-ceph-storage-class-keep-on-spec-remove": "false",
		},
	},
	Provisioner: "rook-ceph.cephfs.csi.ceph.com",
	Parameters: map[string]string{
		"clusterID": "rook-ceph",
		"pool":      "test-cephfs-some-pool-name",
		"fsName":    "test-cephfs",
		"csi.storage.k8s.io/provisioner-secret-name":            "rook-csi-cephfs-provisioner",
		"csi.storage.k8s.io/provisioner-secret-namespace":       "rook-ceph",
		"csi.storage.k8s.io/node-stage-secret-name":             "rook-csi-cephfs-node",
		"csi.storage.k8s.io/node-stage-secret-namespace":        "rook-ceph",
		"csi.storage.k8s.io/controller-expand-secret-name":      "rook-csi-cephfs-provisioner",
		"csi.storage.k8s.io/controller-expand-secret-namespace": "rook-ceph",
	},
	AllowVolumeExpansion: &TrueVarForPointer,
	ReclaimPolicy:        &DeleteReclaimPolicyForPointer,
}
View Source
var CephFilesystemActiveStandbyReady = cephv1.CephFilesystem{
	ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "cephfs-2"},
	Spec: cephv1.FilesystemSpec{
		MetadataServer: cephv1.MetadataServerSpec{
			ActiveCount:   1,
			ActiveStandby: true,
		},
	},
	Status: &cephv1.CephFilesystemStatus{Phase: cephv1.ConditionReady},
}
View Source
var CephFilesystemListEmpty = cephv1.CephFilesystemList{Items: []cephv1.CephFilesystem{}}
View Source
var CephFilesystemListMultipleNotReady = cephv1.CephFilesystemList{
	Items: []cephv1.CephFilesystem{
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "cephfs-1"},
			Spec:       CephFilesystemNoActiveStandbyReady.Spec,
			Status:     &cephv1.CephFilesystemStatus{Phase: cephv1.ConditionFailure},
		},
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "cephfs-2"},
			Spec:       CephFilesystemActiveStandbyReady.Spec,
		},
	},
}
View Source
var CephFilesystemNoActiveStandbyReady = cephv1.CephFilesystem{
	ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "cephfs-1"},
	Spec: cephv1.FilesystemSpec{
		MetadataServer: cephv1.MetadataServerSpec{
			ActiveCount: 1,
		},
	},
	Status: &cephv1.CephFilesystemStatus{Phase: cephv1.ConditionReady},
}
View Source
var CephIngressConfig = cephlcmv1alpha1.CephDeploymentIngressConfig{
	TLSConfig: &cephlcmv1alpha1.CephDeploymentIngressTLSConfig{
		Domain: "test",
		TLSCerts: &cephlcmv1alpha1.CephDeploymentCert{
			Cacert:  "spec-cacert",
			TLSCert: "spec-tlscert",
			TLSKey:  "spec-tlskey",
		},
	},
	Annotations: map[string]string{
		"fake": "fake",
	},
	ControllerClassName: "fake-class-name",
}
View Source
var CephKeysOpenstackSecretBase = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "openstack-ceph-keys",
		Namespace: "openstack-ceph-shared",
	},
	Data: map[string][]byte{
		"client.admin":  []byte("AQAcpuJiITYXMhAAXaOoAqOKJ4mhNOAqxFb1Hw=="),
		"glance":        []byte("client.glance;glance\n;images-hdd:images:hdd"),
		"nova":          []byte("client.nova;nova\n;vms-hdd:vms:hdd;volumes-hdd:volumes:hdd;images-hdd:images:hdd"),
		"cinder":        []byte("client.cinder;cinder\n;volumes-hdd:volumes:hdd;images-hdd:images:hdd;backup-hdd:backup:hdd"),
		"mon_endpoints": []byte("127.0.0.1,127.0.0.2,127.0.0.3"),
	},
}
View Source
var CephKeysOpenstackSecretRgwBase = func() corev1.Secret {
	secret := CephKeysOpenstackSecretBase.DeepCopy()
	secret.Data["rgw_internal"] = []byte("https://rook-ceph-rgw-rgw-store.rook-ceph.svc:8443/")
	secret.Data["rgw_external"] = []byte("https://rgw-store.test/")
	secret.Data["rgw_external_custom_cacert"] = []byte("spec-cacert")
	return *secret
}()
View Source
var CephMgrDumpBaseHealthy = BuildCliOutput(CephMgrDumpTmpl, "mgr dump", nil)
View Source
var CephMgrDumpBaseUnhealthy = BuildCliOutput(CephMgrDumpTmpl, "mgr dump", map[string]string{"available": "false"})
View Source
var CephMgrDumpHAHealthy = BuildCliOutput(CephMgrDumpTmpl, "mgr dump", map[string]string{"standbys": `[{"name": "b"}]`})
View Source
var CephMgrDumpHAUnealthy = BuildCliOutput(CephMgrDumpTmpl, "mgr dump", map[string]string{"activename": `"b"`})
View Source
var CephMgrDumpTmpl = `{
  "active_name": {activename},
  "available": {available},
  "standbys": {standbys}
}`
View Source
var CephMultisiteClusterReportOk = &lcmv1alpha1.CephDeploymentHealthReport{
	RookOperator:    RookOperatorStatusOk,
	RookCephObjects: RookCephObjectsReportReadyFull,
	CephDaemons: &lcmv1alpha1.CephDaemonsStatus{
		CephDaemons: map[string]lcmv1alpha1.DaemonStatus{
			"mon": CephDaemonsCephFsRgwHealthy["mon"],
			"mgr": CephDaemonsCephFsRgwHealthy["mgr"],
			"osd": CephDaemonsCephFsRgwHealthy["osd"],
			"mds": {
				Status: lcmv1alpha1.DaemonStateOk,
				Messages: []string{
					"mds active: 1/1 (cephfs 'cephfs-1')", "mds active: 1/1, standby-replay: 1/1 (cephfs 'cephfs-2')",
				},
			},
			"rgw": {
				Status:   lcmv1alpha1.DaemonStateOk,
				Messages: []string{"3 rgws running, daemons: [10223488 11556688 12065099]"},
			},
		},
		CephCSIPluginDaemons: CephCSIDaemonsReady,
	},
	ClusterDetails: &lcmv1alpha1.ClusterDetails{
		UsageDetails: CephExtraUsageDetails,
		CephEvents:   CephEventsIdle,
		RgwInfo: &lcmv1alpha1.RgwInfo{
			PublicEndpoint:   "https://rgw-store.example.com",
			MultisiteDetails: CephMultisiteStateOk,
		},
	},
	OsdAnalysis: OsdSpecAnalysisOk,
}
View Source
var CephMultisiteStateFailed = &lcmv1alpha1.MultisiteState{
	MetadataSyncState: lcmv1alpha1.MultiSiteFailed,
	DataSyncState:     lcmv1alpha1.MultiSiteFailed,
	Messages:          []string{"failed to run 'radosgw-admin sync status --rgw-zonegroup=zonegroup1 --rgw-zone=zone1' command to check multisite status for zone 'zone1'"},
}
View Source
var CephMultisiteStateOk = &lcmv1alpha1.MultisiteState{
	MetadataSyncState: lcmv1alpha1.MultiSiteSyncing,
	DataSyncState:     lcmv1alpha1.MultiSiteSyncing,
	MasterZone:        true,
}
View Source
var CephNodesExtendedInvalid = []cephlcmv1alpha1.CephDeploymentNode{
	{
		Node: cephv1.Node{
			Name: "node-1",
			Selection: cephv1.Selection{
				Devices: []cephv1.Device{
					{
						Name:   "sda",
						Config: map[string]string{"deviceClass": "hdd"},
					},
					{
						Name:   "sdb",
						Config: map[string]string{},
					},
				},
			},
			Config: map[string]string{"osdsPerDevice": "3.5"},
		},
		Crush: map[string]string{"datecenter": "fr"},
		Roles: []string{"mon"},
	},
	{
		Node: cephv1.Node{
			Name: "node-2",
			Selection: cephv1.Selection{
				Devices: []cephv1.Device{
					{
						Name:   "sda",
						Config: map[string]string{"osdsPerDevice": "1", "deviceClass": "hdd"},
					},
					{
						Name:   "sdb",
						Config: map[string]string{"osdsPerDevice": "2", "deviceClass": "hdd"},
					},
					{
						Name:   "sdc",
						Config: map[string]string{"metadataDevice": "sde", "deviceClass": "some-custom-class"},
					},
				},
			},
		},
		Roles: []string{"mon"},
	},
	{
		Node: cephv1.Node{
			Name:   "node-3",
			Config: map[string]string{"osdsPerDevice": "2"},
			Selection: cephv1.Selection{
				Devices: []cephv1.Device{
					{
						Name:   "sda",
						Config: map[string]string{"deviceClass": "unknown-class", "osdsPerDevice": "3.5"},
					},
				},
			},
		},
		Roles: []string{"mon"},
	},
	{
		Node: cephv1.Node{
			Name:   "node-4",
			Config: map[string]string{"osdsPerDevice": "2"},
			Selection: cephv1.Selection{
				Devices: []cephv1.Device{
					{
						Name:   "sda",
						Config: map[string]string{"deviceClass": "hdd"},
					},
				},
			},
		},
		Roles: []string{"mon"},
	},
	{
		Node: cephv1.Node{
			Name: "node-5",
			Selection: cephv1.Selection{
				UseAllDevices: &[]bool{true}[0],
			},
		},
	},
	{
		Node: cephv1.Node{
			Name: "node-6",
			Selection: cephv1.Selection{
				DeviceFilter: "sda",
			},
		},
	},
}
View Source
var CephNodesExtendedOk = []cephlcmv1alpha1.CephDeploymentNode{
	{
		Node: cephv1.Node{
			Name: "node-1",
			Selection: cephv1.Selection{
				Devices: []cephv1.Device{
					{
						Name:   "sda",
						Config: map[string]string{"deviceClass": "hdd"},
					},
					{
						Name:   "sdb",
						Config: map[string]string{"deviceClass": "hdd"},
					},
				},
			},
		},
		Roles: []string{"mon", "mgr", "mds"},
	},
	{
		Node: cephv1.Node{
			Name: "node-2",
			Selection: cephv1.Selection{
				Devices: []cephv1.Device{
					{
						Name:   "sda",
						Config: map[string]string{"osdsPerDevice": "1", "deviceClass": "hdd"},
					},
					{
						Name:   "sdb",
						Config: map[string]string{"osdsPerDevice": "2", "deviceClass": "hdd"},
					},
					{
						Name:   "sdc",
						Config: map[string]string{"metadataDevice": "sde", "deviceClass": "hdd"},
					},
				},
			},
		},
		Roles: []string{"mon"},
	},
	{
		Node: cephv1.Node{
			Name:   "node-3",
			Config: map[string]string{"osdsPerDevice": "2"},
			Selection: cephv1.Selection{
				Devices: []cephv1.Device{
					{
						Name:   "sda",
						Config: map[string]string{"deviceClass": "hdd"},
					},
				},
			},
		},
		Roles: []string{"mon"},
	},
}
View Source
var CephNodesOk = []cephlcmv1alpha1.CephDeploymentNode{
	{
		Node: cephv1.Node{
			Name: "node-1",
			Selection: cephv1.Selection{
				Devices: []cephv1.Device{
					{
						Name:   "sda",
						Config: map[string]string{"deviceClass": "hdd"},
					},
				},
			},
		},
		Roles: []string{"mon", "mgr"},
	},
	{
		Node: cephv1.Node{
			Name: "node-2",
			Selection: cephv1.Selection{
				Devices: []cephv1.Device{
					{
						Name:   "sda",
						Config: map[string]string{"osdsPerDevice": "1", "deviceClass": "hdd"},
					},
				},
			},
		},
		Roles: []string{"mon"},
	},
	{
		Node: cephv1.Node{
			Name:   "node-3",
			Config: map[string]string{"osdsPerDevice": "2"},
			Selection: cephv1.Selection{
				Devices: []cephv1.Device{
					{
						Name:   "sda",
						Config: map[string]string{"deviceClass": "hdd"},
					},
				},
			},
		},
		Roles: []string{"mon"},
	},
}
View Source
var CephObjectRealmListEmpty = cephv1.CephObjectRealmList{Items: []cephv1.CephObjectRealm{}}
View Source
var CephObjectRealmListNotReady = cephv1.CephObjectRealmList{
	Items: []cephv1.CephObjectRealm{
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "realm-1"},
			Status:     &cephv1.Status{Phase: "Failed"},
		},
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "realm-2"},
		},
	},
}
View Source
var CephObjectRealmListReady = cephv1.CephObjectRealmList{
	Items: []cephv1.CephObjectRealm{
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "realm-1"},
			Status:     &cephv1.Status{Phase: "Ready"},
		},
	},
}
View Source
var CephObjectStoreBase = &cephv1.CephObjectStore{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "rgw-store",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.ObjectStoreSpec{
		DefaultRealm:          true,
		PreservePoolsOnDelete: false,
		DataPool: cephv1.PoolSpec{
			DeviceClass: "hdd",
			ErasureCoded: cephv1.ErasureCodedSpec{
				CodingChunks: 2,
				DataChunks:   1,
			},
		},
		MetadataPool: cephv1.PoolSpec{
			DeviceClass: "hdd",
			Replicated: cephv1.ReplicatedSpec{
				Size: 3,
			},
		},
		Gateway: cephv1.GatewaySpec{
			Annotations: map[string]string{
				"cephdeployment.lcm.mirantis.com/config-global-updated":                 "some-time",
				"cephdeployment.lcm.mirantis.com/ssl-cert-generated":                    "some-time",
				"cephdeployment.lcm.mirantis.com/config-client.rgw.rgw.store.a-updated": "some-time",
			},
			SSLCertificateRef: "rgw-ssl-certificate",
			CaBundleRef:       "rgw-ssl-certificate",
			Instances:         2,
			Port:              80,
			SecurePort:        8443,
			Placement: cephv1.Placement{
				NodeAffinity: &corev1.NodeAffinity{
					RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
						NodeSelectorTerms: []corev1.NodeSelectorTerm{
							{
								MatchExpressions: []corev1.NodeSelectorRequirement{
									{
										Key:      "ceph_role_mon",
										Operator: "In",
										Values: []string{
											"true",
										},
									},
								},
							},
						},
					},
				},
				Tolerations: []corev1.Toleration{
					{
						Key:      "ceph_role_mon",
						Operator: "Exists",
					},
				},
			},
		},
	},
}
View Source
var CephObjectStoreBaseListReady = cephv1.CephObjectStoreList{
	Items: []cephv1.CephObjectStore{*CephObjectStoreBaseReady},
}
View Source
var CephObjectStoreBaseReady = func() *cephv1.CephObjectStore {
	store := CephObjectStoreBase.DeepCopy()
	store.Status = &cephv1.ObjectStoreStatus{
		Phase: "Ready",
	}
	return store
}()
View Source
var CephObjectStoreExternal = &cephv1.CephObjectStore{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "rgw-store",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.ObjectStoreSpec{
		Gateway: cephv1.GatewaySpec{
			Annotations: map[string]string{
				"cephdeployment.lcm.mirantis.com/ssl-cert-generated": "some-time",
			},
			Port:              80,
			SecurePort:        8443,
			SSLCertificateRef: "rgw-ssl-certificate",
			CaBundleRef:       "rgw-ssl-certificate",
			ExternalRgwEndpoints: []cephv1.EndpointAddress{
				{
					IP:       "127.0.0.1",
					Hostname: "fake-1",
				},
			},
		},
	},
}
View Source
var CephObjectStoreExternalReady = cephv1.CephObjectStore{
	ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "rgw-store-external"},
	Spec: cephv1.ObjectStoreSpec{
		Gateway: cephv1.GatewaySpec{
			ExternalRgwEndpoints: []cephv1.EndpointAddress{
				{
					IP:       "127.0.0.1",
					Hostname: "external-rgw-endpoint",
				},
			},
		},
	},
	Status: &cephv1.ObjectStoreStatus{
		Phase: cephv1.ConditionReady,
		Info: map[string]string{
			"endpont":        "http://127.0.0.1:80",
			"secureEndpoint": "https://127.0.0.1:8443",
		},
	},
}
View Source
var CephObjectStoreListEmpty = cephv1.CephObjectStoreList{Items: []cephv1.CephObjectStore{}}
View Source
var CephObjectStoreListReady = cephv1.CephObjectStoreList{
	Items: []cephv1.CephObjectStore{CephObjectStoreReady},
}
View Source
var CephObjectStoreMultisiteSyncList = cephv1.CephObjectStoreList{
	Items: []cephv1.CephObjectStore{
		func() cephv1.CephObjectStore {
			rgw := CephObjectStoreWithZone.DeepCopy()
			rgw.Spec.Zone.Name = "secondary-zone1"
			rgw.Spec.Gateway.DisableMultisiteSyncTraffic = true
			return *rgw
		}(),
		*CephObjectStoreWithSyncDaemon,
	},
}
View Source
var CephObjectStoreReady = cephv1.CephObjectStore{
	ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "rgw-store"},
	Spec: cephv1.ObjectStoreSpec{
		Gateway: cephv1.GatewaySpec{
			Instances:   2,
			CaBundleRef: "rgw-ssl-certificate",
		},
	},
	Status: &cephv1.ObjectStoreStatus{Phase: cephv1.ConditionReady},
}
View Source
var CephObjectStoreSyncReady = cephv1.CephObjectStore{
	ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "rgw-store-sync"},
	Spec: cephv1.ObjectStoreSpec{
		Gateway: cephv1.GatewaySpec{
			Instances:                   1,
			DisableMultisiteSyncTraffic: false,
		},
	},
	Status: &cephv1.ObjectStoreStatus{Phase: cephv1.ConditionReady},
}
View Source
var CephObjectStoreUserListEmpty = cephv1.CephObjectStoreUserList{Items: []cephv1.CephObjectStoreUser{}}
View Source
var CephObjectStoreUserListMetrics = cephv1.CephObjectStoreUserList{
	Items: []cephv1.CephObjectStoreUser{*RgwUserWithStatus(RgwCeilometerUser, "Ready")},
}
View Source
var CephObjectStoreUserListNotReady = cephv1.CephObjectStoreUserList{
	Items: []cephv1.CephObjectStoreUser{
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "rgw-user-1"},
			Status:     &cephv1.ObjectStoreUserStatus{Phase: "Failed"},
		},
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "rgw-user-2"},
		},
	},
}
View Source
var CephObjectStoreUserListReady = cephv1.CephObjectStoreUserList{
	Items: []cephv1.CephObjectStoreUser{
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "rgw-user-1"},
			Status:     &cephv1.ObjectStoreUserStatus{Phase: "Ready"},
		},
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "rgw-user-2"},
			Status:     &cephv1.ObjectStoreUserStatus{Phase: "Ready"},
		},
	},
}
View Source
var CephObjectStoreWithSyncDaemon = func() *cephv1.CephObjectStore {
	store := CephObjectStoreWithZone.DeepCopy()
	store.Name = "rgw-store-sync"
	store.Spec.Zone.Name = "secondary-zone1"
	delete(store.Spec.Gateway.Annotations, "cephdeployment.lcm.mirantis.com/config-client.rgw.rgw.store.a-updated")
	store.Spec.Gateway.Annotations["cephdeployment.lcm.mirantis.com/config-client.rgw.rgw.store.sync.a-updated"] = "some-time-sync"
	store.Spec.Gateway.DisableMultisiteSyncTraffic = false
	store.Spec.Gateway.Instances = 1
	store.Spec.Gateway.SecurePort = 0
	store.Spec.Gateway.Port = 8380
	store.Spec.Gateway.SSLCertificateRef = ""
	return store
}()
View Source
var CephObjectStoreWithZone = &cephv1.CephObjectStore{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "rgw-store",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.ObjectStoreSpec{
		PreservePoolsOnDelete: false,
		Zone: cephv1.ZoneSpec{
			Name: "zone1",
		},
		Gateway: cephv1.GatewaySpec{
			Annotations: map[string]string{
				"cephdeployment.lcm.mirantis.com/config-global-updated":                 "some-time",
				"cephdeployment.lcm.mirantis.com/ssl-cert-generated":                    "some-time",
				"cephdeployment.lcm.mirantis.com/config-client.rgw.rgw.store.a-updated": "some-time",
			},
			SSLCertificateRef: "rgw-ssl-certificate",
			CaBundleRef:       "rgw-ssl-certificate",
			Instances:         2,
			Port:              80,
			SecurePort:        8443,
			Placement: cephv1.Placement{
				NodeAffinity: &corev1.NodeAffinity{
					RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
						NodeSelectorTerms: []corev1.NodeSelectorTerm{
							{
								MatchExpressions: []corev1.NodeSelectorRequirement{
									{
										Key:      "ceph_role_mon",
										Operator: "In",
										Values: []string{
											"true",
										},
									},
								},
							},
						},
					},
				},
				Tolerations: []corev1.Toleration{
					{
						Key:      "ceph_role_mon",
						Operator: "Exists",
					},
				},
			},
		},
	},
}
View Source
var CephObjectStoresMultisiteSyncDaemonPhaseNotReady = cephv1.CephObjectStoreList{
	Items: []cephv1.CephObjectStore{
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "rgw-store"},
			Spec: cephv1.ObjectStoreSpec{
				Gateway: cephv1.GatewaySpec{
					DisableMultisiteSyncTraffic: true,
					Instances:                   2,
				},
			},
			Status: &cephv1.ObjectStoreStatus{Phase: cephv1.ConditionFailure},
		},
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "rgw-store-sync"},
			Spec:       CephObjectStoreSyncReady.Spec,
		},
	},
}
View Source
var CephObjectStoresMultisiteSyncDaemonPhaseReady = cephv1.CephObjectStoreList{
	Items: []cephv1.CephObjectStore{
		func() cephv1.CephObjectStore {
			rgw := CephObjectStoreReady.DeepCopy()
			rgw.Spec.Gateway.DisableMultisiteSyncTraffic = true
			return *rgw
		}(),
		CephObjectStoreSyncReady,
	},
}
View Source
var CephObjectZoneGroupListEmpty = cephv1.CephObjectZoneGroupList{Items: []cephv1.CephObjectZoneGroup{}}
View Source
var CephObjectZoneGroupListNotReady = cephv1.CephObjectZoneGroupList{
	Items: []cephv1.CephObjectZoneGroup{
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "zonegroup-1"},
			Status:     &cephv1.Status{Phase: "Failed"},
		},
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "zonegroup-2"},
		},
	},
}
View Source
var CephObjectZoneGroupListReady = cephv1.CephObjectZoneGroupList{
	Items: []cephv1.CephObjectZoneGroup{
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "zonegroup-1"},
			Status:     &cephv1.Status{Phase: "Ready"},
		},
	},
}
View Source
var CephObjectZoneListEmpty = cephv1.CephObjectZoneList{Items: []cephv1.CephObjectZone{}}
View Source
var CephObjectZoneListNotReady = cephv1.CephObjectZoneList{
	Items: []cephv1.CephObjectZone{
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "zone-1"},
			Spec:       cephv1.ObjectZoneSpec{ZoneGroup: "zonegroup-1"},
			Status:     &cephv1.Status{Phase: "Failed"},
		},
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "zone-2"},
			Spec:       cephv1.ObjectZoneSpec{ZoneGroup: "zonegroup-2"},
		},
	},
}
View Source
var CephObjectZoneListReady = cephv1.CephObjectZoneList{
	Items: []cephv1.CephObjectZone{
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "zone-1"},
			Spec:       cephv1.ObjectZoneSpec{ZoneGroup: "zonegroup-1"},
			Status:     &cephv1.Status{Phase: "Ready"},
		},
	},
}
View Source
var CephOsdCrushRuleDump = BuildCliOutput(CephCrushRuleDumpTmpl, "osd crush rule dump", nil)
View Source
var CephOsdInfoOutput = BuildCliOutput(CephOsdInfoOutputTmpl, "", map[string]string{"stray": `{"osd":  2,"uuid": "61869d90-2c45-4f02-b7c3-96955f41e2ca"},`})
View Source
var CephOsdInfoOutputNoStray = BuildCliOutput(CephOsdInfoOutputTmpl, "", map[string]string{"stray": "\n"})
View Source
var CephOsdInfoOutputTmpl = `` /* 767-byte string literal not displayed */
View Source
var CephOsdLspools = `` /* 236-byte string literal not displayed */
View Source
var CephOsdLspoolsWithRgwDefault = `` /* 298-byte string literal not displayed */
View Source
var CephOsdMetadataOutput = BuildCliOutput(CephOsdMetadataOutputTmpl, "", map[string]string{"stray": `{"id": 2},`})
View Source
var CephOsdMetadataOutputNoStray = BuildCliOutput(CephOsdMetadataOutputTmpl, "", map[string]string{"stray": "\n"})
View Source
var CephOsdMetadataOutputTmpl = `` /* 2518-byte string literal not displayed */
View Source
var CephOsdRemoveTaskBase = lcmv1alpha1.CephOsdRemoveTask{
	ObjectMeta: getObjectMeta("0"),
	TypeMeta: metav1.TypeMeta{
		APIVersion: "lcm.mirantis.com/v1alpha1",
		Kind:       "CephOsdRemoveTask",
	},
}
View Source
var CephOsdRemoveTaskCompleted = func() *lcmv1alpha1.CephOsdRemoveTask {
	task := CephOsdRemoveTaskProcessing.DeepCopy()
	task.Spec = &lcmv1alpha1.CephOsdRemoveTaskSpec{Approve: true}
	task.Status.Phase = lcmv1alpha1.TaskPhaseCompleted
	task.Status.Messages = append(task.Status.Messages, "cephosdremovetask moved to 'Completed' phase: osd remove completed")
	task.Status.Conditions = append(task.Status.Conditions, lcmv1alpha1.CephOsdRemoveTaskCondition{
		Phase:     lcmv1alpha1.TaskPhaseCompleted,
		Timestamp: "time-17",
		CephClusterSpecVersion: &lcmv1alpha1.CephClusterSpecVersion{
			Generation: 4,
		},
	})
	task.Status.PhaseInfo = "osd remove completed"
	task.Status.RemoveInfo = NodesRemoveFullFinishedStatus.DeepCopy()
	task.Status.RemoveInfo.Warnings = nil
	return task
}()
View Source
var CephOsdRemoveTaskCompletedWithWarnings = func() *lcmv1alpha1.CephOsdRemoveTask {
	task := CephOsdRemoveTaskProcessing.DeepCopy()
	task.Spec = &lcmv1alpha1.CephOsdRemoveTaskSpec{Approve: true}
	task.Status.Phase = lcmv1alpha1.TaskPhaseCompletedWithWarnings
	task.Status.Messages = append(task.Status.Messages, "cephosdremovetask moved to 'CompletedWithWarnings' phase: osd remove completed")
	task.Status.Conditions = append(task.Status.Conditions, lcmv1alpha1.CephOsdRemoveTaskCondition{
		Phase:     lcmv1alpha1.TaskPhaseCompletedWithWarnings,
		Timestamp: "time-16",
		CephClusterSpecVersion: &lcmv1alpha1.CephClusterSpecVersion{
			Generation: 4,
		},
	})
	task.Status.PhaseInfo = "osd remove completed"
	task.Status.RemoveInfo = GetInfoWithStatus(StrayOnlyInCrushRemoveMap,
		map[string]*lcmv1alpha1.RemoveResult{
			"2": {
				OsdRemoveStatus:    &lcmv1alpha1.RemoveStatus{Status: lcmv1alpha1.RemoveFinished, FinishedAt: "time-13"},
				DeviceCleanUpJob:   &lcmv1alpha1.RemoveStatus{Status: lcmv1alpha1.RemoveSkipped},
				DeployRemoveStatus: &lcmv1alpha1.RemoveStatus{Status: lcmv1alpha1.RemoveFinished},
			},
		})
	return task
}()
View Source
var CephOsdRemoveTaskFailed = func() *lcmv1alpha1.CephOsdRemoveTask {
	task := CephOsdRemoveTaskProcessing.DeepCopy()
	task.Spec = &lcmv1alpha1.CephOsdRemoveTaskSpec{Approve: true}
	task.Status = CephOsdRemoveTaskProcessing.Status.DeepCopy()
	task.Status.Phase = lcmv1alpha1.TaskPhaseFailed
	task.Status.PhaseInfo = "osd remove failed"
	task.Status.Messages = append(task.Status.Messages, "cephosdremovetask moved to 'Failed' phase: osd remove failed")
	task.Status.RemoveInfo = GetInfoWithStatus(StrayOnlyInCrushRemoveMap,
		map[string]*lcmv1alpha1.RemoveResult{
			"2": {OsdRemoveStatus: &lcmv1alpha1.RemoveStatus{Status: lcmv1alpha1.RemoveFailed}},
		},
	)
	task.Status.RemoveInfo.Issues = []string{"[node '__stray'] failed to remove osd '2'"}
	task.Status.Conditions = append(task.Status.Conditions, lcmv1alpha1.CephOsdRemoveTaskCondition{
		Phase:     lcmv1alpha1.TaskPhaseFailed,
		Timestamp: "time-18",
		CephClusterSpecVersion: &lcmv1alpha1.CephClusterSpecVersion{
			Generation: 4,
		},
	})
	return task
}()
View Source
var CephOsdRemoveTaskFullInited = lcmv1alpha1.CephOsdRemoveTask{
	ObjectMeta: metav1.ObjectMeta{
		Name:              "osdremove-task",
		Namespace:         LcmObjectMeta.Namespace,
		CreationTimestamp: metav1.Time{Time: time.Date(2025, 4, 7, 14, 30, 45, 0, time.Local)},
		ResourceVersion:   "1",
		OwnerReferences: []metav1.OwnerReference{
			{
				APIVersion: "lcm.mirantis.com/v1alpha1",
				Kind:       "CephDeploymentHealth",
				Name:       LcmObjectMeta.Name,
			},
		},
	},
	TypeMeta: metav1.TypeMeta{
		APIVersion: "lcm.mirantis.com/v1alpha1",
		Kind:       "CephOsdRemoveTask",
	},
	Status: &lcmv1alpha1.CephOsdRemoveTaskStatus{
		Phase:      lcmv1alpha1.TaskPhasePending,
		Messages:   initMessages,
		Conditions: initConditions,
	},
}
View Source
var CephOsdRemoveTaskInited = lcmv1alpha1.CephOsdRemoveTask{
	ObjectMeta: getObjectMeta("1"),
	TypeMeta: metav1.TypeMeta{
		APIVersion: "lcm.mirantis.com/v1alpha1",
		Kind:       "CephOsdRemoveTask",
	},
	Status: &lcmv1alpha1.CephOsdRemoveTaskStatus{
		Phase:      lcmv1alpha1.TaskPhasePending,
		PhaseInfo:  "initializing",
		Messages:   initMessages,
		Conditions: initConditions,
	},
}
View Source
var CephOsdRemoveTaskListEmpty = &lcmv1alpha1.CephOsdRemoveTaskList{}
View Source
var CephOsdRemoveTaskOld = lcmv1alpha1.CephOsdRemoveTask{
	ObjectMeta: metav1.ObjectMeta{
		Name:              "old-osdremove-task",
		Namespace:         LcmObjectMeta.Namespace,
		CreationTimestamp: metav1.Time{Time: time.Date(2025, 4, 6, 14, 30, 45, 0, time.Local)},
	},
}
View Source
var CephOsdRemoveTaskOldCompleted = lcmv1alpha1.CephOsdRemoveTask{
	ObjectMeta: metav1.ObjectMeta{
		Name:              "old-completed-osdremove-task",
		Namespace:         LcmObjectMeta.Namespace,
		CreationTimestamp: metav1.Time{Time: time.Date(2025, 4, 6, 13, 30, 45, 0, time.Local)},
	},
	Status: &lcmv1alpha1.CephOsdRemoveTaskStatus{
		Phase: lcmv1alpha1.TaskPhaseCompleted,
	},
}
View Source
var CephOsdRemoveTaskOnApproveWaiting = func() *lcmv1alpha1.CephOsdRemoveTask {
	task := CephOsdRemoveTaskOnValidation.DeepCopy()
	task.Status.Phase = lcmv1alpha1.TaskPhaseApproveWaiting
	task.Status.Messages = append(task.Status.Messages, "cephosdremovetask moved to 'ApproveWaiting' phase: validation completed, waiting approve")
	task.Status.Conditions = append(task.Status.Conditions, lcmv1alpha1.CephOsdRemoveTaskCondition{
		Phase:     lcmv1alpha1.TaskPhaseApproveWaiting,
		Timestamp: "time-5",
		CephClusterSpecVersion: &lcmv1alpha1.CephClusterSpecVersion{
			Generation: 4,
		},
	})
	task.Status.PhaseInfo = "validation completed, waiting approve"
	task.Status.RemoveInfo = StrayOnlyInCrushRemoveMap.DeepCopy()
	return task
}()
View Source
var CephOsdRemoveTaskOnApproved = func() *lcmv1alpha1.CephOsdRemoveTask {
	task := CephOsdRemoveTaskOnApproveWaiting.DeepCopy()
	task.Spec = &lcmv1alpha1.CephOsdRemoveTaskSpec{Approve: true}
	task.Status.Phase = lcmv1alpha1.TaskPhaseWaitingOperator
	task.Status.Messages = append(task.Status.Messages, "cephosdremovetask moved to 'WaitingOperator' phase: approve received, wait rook-operator stop")
	task.Status.Conditions = append(task.Status.Conditions, lcmv1alpha1.CephOsdRemoveTaskCondition{
		Phase:     lcmv1alpha1.TaskPhaseWaitingOperator,
		Timestamp: "time-9",
		CephClusterSpecVersion: &lcmv1alpha1.CephClusterSpecVersion{
			Generation: 4,
		},
	})
	task.Status.PhaseInfo = "approve received, wait rook-operator stop"
	return task
}()
View Source
var CephOsdRemoveTaskOnValidation = func() *lcmv1alpha1.CephOsdRemoveTask {
	task := CephOsdRemoveTaskFullInited.DeepCopy()
	task.Status.Phase = lcmv1alpha1.TaskPhaseValidating
	task.Status.Messages = append(task.Status.Messages, "cephosdremovetask moved to 'Validating' phase: validation")
	task.Status.Conditions = append(task.Status.Conditions, lcmv1alpha1.CephOsdRemoveTaskCondition{
		Phase:     lcmv1alpha1.TaskPhaseValidating,
		Timestamp: "time-1",
		CephClusterSpecVersion: &lcmv1alpha1.CephClusterSpecVersion{
			Generation: 4,
		},
	})
	task.Status.PhaseInfo = "validation"
	return task
}()
View Source
var CephOsdRemoveTaskProcessing = func() *lcmv1alpha1.CephOsdRemoveTask {
	task := CephOsdRemoveTaskOnApproved.DeepCopy()
	task.Spec = &lcmv1alpha1.CephOsdRemoveTaskSpec{Approve: true}
	task.Status.Phase = lcmv1alpha1.TaskPhaseProcessing
	task.Status.Messages = append(task.Status.Messages, "cephosdremovetask moved to 'Processing' phase: processing")
	task.Status.Conditions = append(task.Status.Conditions, lcmv1alpha1.CephOsdRemoveTaskCondition{
		Phase:     lcmv1alpha1.TaskPhaseProcessing,
		Timestamp: "time-13",
		CephClusterSpecVersion: &lcmv1alpha1.CephClusterSpecVersion{
			Generation: 4,
		},
	})
	task.Status.PhaseInfo = "processing"
	return task
}()
View Source
var CephOsdTreeForSizingCheck = `` /* 2903-byte string literal not displayed */
View Source
var CephOsdTreeOutput = BuildCliOutput(CephOsdTreeOutputTmpl, "", map[string]string{"childs_1": "20,25,30", "childs_2": "0,4,5"})
View Source
var CephOsdTreeOutputNoOsdsOnHost = BuildCliOutput(CephOsdTreeOutputTmpl, "", map[string]string{"childs_1": "\n", "childs_2": "\n"})
View Source
var CephOsdTreeOutputTmpl = `` /* 623-byte string literal not displayed */
View Source
var CephPoolsDetails = `` /* 167-byte string literal not displayed */
View Source
var CephRBDMirror = cephv1.CephRBDMirror{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "cephcluster",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.RBDMirroringSpec{
		Count: 1,
		Peers: cephv1.MirroringPeerSpec{
			SecretNames: []string{"rbd-mirror-token-mirror1-pool-1", "rbd-mirror-token-mirror1-pool-2"},
		},
	},
}
View Source
var CephRBDMirrorSecret1 = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "rbd-mirror-token-mirror1-pool-1",
		Namespace: "rook-ceph",
	},
	Data: map[string][]byte{
		"pool":  []byte("pool-1"),
		"token": []byte("fake-token"),
	},
	Type: "RBDPeer",
}
View Source
var CephRBDMirrorSecret2 = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "rbd-mirror-token-mirror1-pool-2",
		Namespace: "rook-ceph",
	},
	Data: map[string][]byte{
		"pool":  []byte("pool-2"),
		"token": []byte("fake-token"),
	},
	Type: "RBDPeer",
}
View Source
var CephRBDMirrorUpdatedReady = cephv1.CephRBDMirror{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "cephcluster",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.RBDMirroringSpec{
		Count: 2,
		Peers: cephv1.MirroringPeerSpec{
			SecretNames: []string{"rbd-mirror-token-mirror1-pool-1", "rbd-mirror-token-mirror1-pool-2"},
		},
	},
	Status: &cephv1.Status{Phase: "Ready"},
}
View Source
var CephRBDMirrorsEmpty = cephv1.CephRBDMirrorList{
	Items: []cephv1.CephRBDMirror{},
}
View Source
var CephRBDMirrorsList = cephv1.CephRBDMirrorList{
	Items: []cephv1.CephRBDMirror{CephRBDMirror},
}
View Source
var CephRBDMirrorsListReady = cephv1.CephRBDMirrorList{
	Items: []cephv1.CephRBDMirror{
		func() cephv1.CephRBDMirror {
			mirror := CephRBDMirrorUpdatedReady.DeepCopy()
			mirror.Spec.Count = 1
			return *mirror
		}(),
	},
}
View Source
var CephRgwBaseSpec = cephlcmv1alpha1.CephRGW{
	Name:                  "rgw-store",
	PreservePoolsOnDelete: false,
	DataPool: &cephlcmv1alpha1.CephPoolSpec{
		DeviceClass: "hdd",
		ErasureCoded: &cephlcmv1alpha1.CephPoolErasureCodedSpec{
			CodingChunks: 2,
			DataChunks:   1,
		},
	},
	MetadataPool: &cephlcmv1alpha1.CephPoolSpec{
		DeviceClass: "hdd",
		Replicated: &cephlcmv1alpha1.CephPoolReplicatedSpec{
			Size: 3,
		},
	},
	Gateway: cephlcmv1alpha1.CephRGWGateway{
		Instances:  2,
		Port:       80,
		SecurePort: 8443,
	},
}
View Source
var CephRgwBucketsList = bktv1alpha1.ObjectBucketClaimList{
	Items: []bktv1alpha1.ObjectBucketClaim{
		{
			ObjectMeta: metav1.ObjectMeta{
				Name:      "fake-bucket-1",
				Namespace: "rook-ceph",
			},
			Spec: bktv1alpha1.ObjectBucketClaimSpec{
				GenerateBucketName: "fake-bucket-1",
				StorageClassName:   "rgw-storage-class",
			},
			Status: bktv1alpha1.ObjectBucketClaimStatus{
				Phase: bktv1alpha1.ObjectBucketClaimStatusPhaseBound,
			},
		},
		{
			ObjectMeta: metav1.ObjectMeta{
				Name:      "fake-bucket-2",
				Namespace: "rook-ceph",
			},
			Spec: bktv1alpha1.ObjectBucketClaimSpec{
				GenerateBucketName: "fake-bucket-2",
				StorageClassName:   "rgw-storage-class",
			},
			Status: bktv1alpha1.ObjectBucketClaimStatus{
				Phase: bktv1alpha1.ObjectBucketClaimStatusPhaseBound,
			},
		},
	},
}
View Source
var CephRgwSpecWithUsersBuckets = func() cephlcmv1alpha1.CephRGW {
	rgw := CephRgwBaseSpec.DeepCopy()
	rgw.ObjectUsers = []cephlcmv1alpha1.CephRGWUser{
		{Name: "fake-user-1"}, {Name: "fake-user-2"},
	}
	rgw.Buckets = []string{"fake-bucket-1", "fake-bucket-2"}
	return *rgw
}()
View Source
var CephRgwUsersList = cephv1.CephObjectStoreUserList{
	Items: []cephv1.CephObjectStoreUser{
		{
			ObjectMeta: metav1.ObjectMeta{
				Name:      "fake-user-1",
				Namespace: "rook-ceph",
			},
			Spec: cephv1.ObjectStoreUserSpec{
				Store:       "rgw-store",
				DisplayName: "fake-user-1",
			},
			Status: &cephv1.ObjectStoreUserStatus{
				Phase: "Ready",
			},
		},
		{
			ObjectMeta: metav1.ObjectMeta{
				Name:      "fake-user-2",
				Namespace: "rook-ceph",
			},
			Spec: cephv1.ObjectStoreUserSpec{
				Store:       "rgw-store",
				DisplayName: "fake-user-2",
			},
			Status: &cephv1.ObjectStoreUserStatus{
				Phase: "Ready",
			},
		},
	},
}
View Source
var CephSecretNotReady = &cephlcmv1alpha1.CephDeploymentSecret{
	ObjectMeta: GetNewCephSecret().ObjectMeta,
	Status: &cephlcmv1alpha1.CephDeploymentSecretStatus{
		State:            cephlcmv1alpha1.HealthStateFailed,
		SecretsInfo:      &cephlcmv1alpha1.CephDeploymentSecretsInfo{},
		LastSecretUpdate: time.Date(2021, 8, 15, 14, 30, 53, 0, time.Local).Format(time.RFC3339),
		LastSecretCheck:  time.Date(2021, 8, 15, 14, 30, 53, 0, time.Local).Format(time.RFC3339),
		Messages:         []string{"admin keyring secret is not available: secrets \"rook-ceph-admin-keyring\" not found"},
	},
}
View Source
var CephSecretReady = &cephlcmv1alpha1.CephDeploymentSecret{
	ObjectMeta: GetNewCephSecret().ObjectMeta,
	Status: &cephlcmv1alpha1.CephDeploymentSecretStatus{
		State: cephlcmv1alpha1.HealthStateOk,
		SecretsInfo: &cephlcmv1alpha1.CephDeploymentSecretsInfo{
			ClientSecrets: []cephlcmv1alpha1.CephDeploymentSecretInfo{
				{
					ObjectName:      "client.admin",
					SecretName:      "rook-ceph-admin-keyring",
					SecretNamespace: "rook-ceph",
				},
			},
		},
		LastSecretCheck:  time.Date(2021, 8, 15, 14, 30, 52, 0, time.Local).Format(time.RFC3339),
		LastSecretUpdate: time.Date(2021, 8, 15, 14, 30, 52, 0, time.Local).Format(time.RFC3339),
	},
}
View Source
var CephSecretReadySecretsInfo = &cephlcmv1alpha1.CephDeploymentSecretsInfo{
	ClientSecrets: []cephlcmv1alpha1.CephDeploymentSecretInfo{
		{
			ObjectName:      "client.admin",
			SecretName:      "rook-ceph-admin-keyring",
			SecretNamespace: "rook-ceph",
		},
		{
			ObjectName:      "client.test",
			SecretName:      "rook-ceph-client-test",
			SecretNamespace: "rook-ceph",
		},
	},
	RgwUserSecrets: []cephlcmv1alpha1.CephDeploymentSecretInfo{
		{
			ObjectName:      "test-user",
			SecretName:      "rgw-metrics-user-secret",
			SecretNamespace: "rook-ceph",
		},
	},
}
View Source
var CephSharedFileSystemMultiple = &cephlcmv1alpha1.CephSharedFilesystem{
	CephFS: []cephlcmv1alpha1.CephFS{
		CephFSNewOk,
		func() cephlcmv1alpha1.CephFS {
			newCephFS := CephFSNewOk.DeepCopy()
			newCephFS.Name = "second-test-cephfs"
			return *newCephFS
		}(),
	},
}
View Source
var CephSharedFileSystemOk = &cephlcmv1alpha1.CephSharedFilesystem{
	CephFS: []cephlcmv1alpha1.CephFS{
		CephFSNewOk,
	},
}
View Source
var CephStatusBaseHealthy = BuildCliOutput(CephStatusTmpl, "status", nil)
View Source
var CephStatusBaseUnhealthy = BuildCliOutput(CephStatusTmpl, "status", map[string]string{"quorum_names": `["a", "b"]`, "osdmap": `{"num_osds": 3, "num_up_osds": 2, "num_in_osds": 2}`})
View Source
var CephStatusCephFewFsRgwHealthy = BuildCliOutput(CephStatusTmpl, "status", map[string]string{
	"fsmap":      `{"by_rank": [{"name": "cephfs-1-a", "status": "up:active"}, {"name": "cephfs-2-a", "status": "up:active"}, {"name": "cephfs-2-b", "status": "up:standby-replay"}],"up:standby": 1}`,
	"servicemap": `{"services": {"rgw": {"daemons": {"10223488": {"gid": 10223488},"11556688": {"gid": 11556688},"12065099":{"gid": 12065099},"summary": ""}}}}`,
})
View Source
var CephStatusCephFewFsRgwUnhealthy = BuildCliOutput(CephStatusTmpl, "status", map[string]string{
	"fsmap":      `{"by_rank": [{"name": "cephfs-1-a", "status": "down:inactive"}, {"name": "cephfs-2-a", "status": "up:active"}, {"name": "cephfs-3-a", "status": "down:inactive"}],"up:standby": 0}`,
	"servicemap": `{"services": {"rgw": {"daemons": {"10223488": {"gid": 10223488},"11556688": {"gid": 11556688},"12065099":{"gid": 12065099}, "12065109":{"gid": 12065109},"summary": ""}}}}`,
})
View Source
var CephStatusCephFsRgwHealthy = BuildCliOutput(CephStatusTmpl, "status", map[string]string{
	"fsmap":      `{"by_rank": [{"name": "cephfs-1-a", "status": "up:active"}],"up:standby": 1}`,
	"servicemap": `{"services": {"rgw": {"daemons": {"11556688": {"gid": 11556688},"12065099":{"gid": 12065099},"summary": ""}}}}`,
})
View Source
var CephStatusCephFsRgwUnhealthy = BuildCliOutput(CephStatusTmpl, "status", map[string]string{
	"fsmap": `{"by_rank": [{"name": "cephfs-1-a", "status": "down:inactive"}],"up:standby": 0}`,
})
View Source
var CephStatusTmpl = `` /* 170-byte string literal not displayed */
View Source
var CephStatusWithEvents = BuildCliOutput(CephStatusTmpl, "status", map[string]string{"progress_events": `{
  "12b640c7-9734-429e-a67d-a00ab20a7635": {
    "message":"Rebalancing after osd.3 marked in (33s)\n      [==========================..] (remaining: 1s)",
    "progress":0.94805192947387695
  },
  "eb643ce4-af7d-4297-b136-0cbddb5cd14f":{
    "message":"PG autoscaler increasing pool 9 PGs from 32 to 128 (0s)\n      [............................] ",
    "progress":0.52945859385684473
  }
}`})
View Source
var CephVersionsLatest = fmt.Sprintf(CephVersionsTemplate, cephVersionsOutputTmplLatest)
View Source
var CephVersionsLatestWithExtraDaemons = fmt.Sprintf(CephVersionsTemplateWithExtraDaemons, cephVersionsOutputTmplLatest)
View Source
var CephVersionsPrevious = fmt.Sprintf(CephVersionsTemplate, cephVersionsOutputTmplPrevious)
View Source
var CephVersionsPreviousWithExtraDaemons = fmt.Sprintf(CephVersionsTemplateWithExtraDaemons, cephVersionsOutputTmplPrevious)
View Source
var CephVersionsTemplate = `` /* 252-byte string literal not displayed */
View Source
var CephVersionsTemplateWithExtraDaemons = `` /* 375-byte string literal not displayed */
View Source
var CephVolumeAttachment = storagev1.VolumeAttachment{
	ObjectMeta: metav1.ObjectMeta{
		Name: "ceph-volumeattachment",
	},
	Spec: storagev1.VolumeAttachmentSpec{
		Attacher: "rook-ceph.rbd.csi.ceph.com",
		NodeName: "node-1",
	},
}
View Source
var CephZoneGroupInfoEmptyHostnames = BuildCliOutput(CephZoneGroupInfoHostnamesTmpl, "", map[string]string{"hostnames": "[]"})
View Source
var CephZoneGroupInfoHostnamesFromConfig = BuildCliOutput(CephZoneGroupInfoHostnamesTmpl, "", map[string]string{"hostnames": `["rook-ceph-rgw-rgw-store.rook-ceph.svc","rgw-store.ms2.wxlsd.com"]`})
View Source
var CephZoneGroupInfoHostnamesFromIngress = BuildCliOutput(CephZoneGroupInfoHostnamesTmpl, "", map[string]string{"hostnames": `["rook-ceph-rgw-rgw-store.rook-ceph.svc","rgw-store.test"]`})
View Source
var CephZoneGroupInfoHostnamesFromOpenstack = BuildCliOutput(CephZoneGroupInfoHostnamesTmpl, "", map[string]string{"hostnames": `["rook-ceph-rgw-rgw-store.rook-ceph.svc","rgw-store.openstack.com"]`})
View Source
var CephZoneGroupInfoHostnamesTmpl = `` /* 136-byte string literal not displayed */
View Source
var ConfigMapListEmpty = &corev1.ConfigMapList{Items: []corev1.ConfigMap{}}
View Source
var CsiDrainedAnnotationsNode = GetNodeWithLabels("node-1", map[string]string{},
	map[string]string{"kaas.mirantis.com/lcm-drained": "true", "kaas.mirantis.com/csi-drained": "true"})
View Source
var CsiRbdPod = corev1.Pod{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "csi-rbdplugin",
		Labels: map[string]string{
			"app": "csi-rbdplugin",
		},
	},
	Spec: corev1.PodSpec{
		NodeName: "node-1",
	},
}
View Source
var DaemonSetLabelsNode = GetNodeWithLabels("node1", map[string]string{"ceph-daemonset-available-node": "true"}, nil)
View Source
var DaemonSetListEmpty = &appsv1.DaemonSetList{Items: []appsv1.DaemonSet{}}
View Source
var DaemonSetListNotReady = &appsv1.DaemonSetList{
	Items: []appsv1.DaemonSet{
		*DaemonSetWithStatus(RookNamespace, "csi-rbdplugin", 3, 1), *DaemonSetWithStatus(RookNamespace, "csi-cephfsplugin", 3, 1),
		*DaemonSetWithStatus(LcmObjectMeta.Namespace, "pelagia-disk-daemon", 2, 0),
	},
}
View Source
var DaemonSetListReady = &appsv1.DaemonSetList{
	Items: []appsv1.DaemonSet{
		*DaemonSetWithStatus(RookNamespace, "csi-rbdplugin", 3, 3), *DaemonSetWithStatus(RookNamespace, "csi-cephfsplugin", 3, 3),
		*DaemonSetWithStatus(LcmObjectMeta.Namespace, "pelagia-disk-daemon", 2, 2),
	},
}
View Source
var DeleteReclaimPolicyForPointer = corev1.PersistentVolumeReclaimDelete
View Source
var DeploymentListEmpty = &appsv1.DeploymentList{Items: []appsv1.Deployment{}}
View Source
var DevNotInSpecRemoveMap = &lcmv1alpha1.TaskRemoveInfo{
	CleanupMap: map[string]lcmv1alpha1.HostMapping{
		"node-1": {
			OsdMapping: map[string]lcmv1alpha1.OsdMapping{
				"20": AdaptOsdMapping("node-1", "20",
					map[string]bool{"inCrush": true}, map[string]map[string]bool{"/dev/vde": {"zap": true}, "/dev/vdd": {}}),
			},
		},
		"node-2": {
			OsdMapping: map[string]lcmv1alpha1.OsdMapping{
				"4": AdaptOsdMapping("node-2", "4",
					map[string]bool{"inCrush": true}, map[string]map[string]bool{"/dev/vdd": {"zap": true}}),
				"5": AdaptOsdMapping("node-2", "5",
					map[string]bool{"inCrush": true}, map[string]map[string]bool{"/dev/vdd": {"zap": true}}),
			},
		},
	},
	Issues: []string{},
	Warnings: []string{
		"[node 'node-1'] found osd db partition '/dev/ceph-metadata/part-1' for osd '20', which is created not by rook, skipping disk/partition zap",
	},
}
View Source
var DiskDaemonDaemonset = appsv1.DaemonSet{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "pelagia-disk-daemon",
		Namespace: LcmObjectMeta.Namespace,
		Labels: map[string]string{
			"app": "pelagia-disk-daemon",
		},
		OwnerReferences: []metav1.OwnerReference{
			{
				APIVersion: "lcm.mirantis.com/v1alpha1",
				Kind:       "CephDeploymentHealth",
				Name:       LcmObjectMeta.Name,
			},
		},
	},
	Spec: appsv1.DaemonSetSpec{
		MinReadySeconds:      5,
		RevisionHistoryLimit: &[]int32{5}[0],
		UpdateStrategy: appsv1.DaemonSetUpdateStrategy{
			Type: appsv1.RollingUpdateDaemonSetStrategyType,
			RollingUpdate: &appsv1.RollingUpdateDaemonSet{
				MaxUnavailable: &intstr.IntOrString{
					Type:   1,
					StrVal: "30%",
				},
				MaxSurge: &intstr.IntOrString{
					Type:   0,
					IntVal: 0,
				},
			},
		},
		Selector: &metav1.LabelSelector{
			MatchLabels: map[string]string{
				"app": "pelagia-disk-daemon",
			},
		},
		Template: corev1.PodTemplateSpec{
			ObjectMeta: metav1.ObjectMeta{
				Labels: map[string]string{
					"app": "pelagia-disk-daemon",
				},
			},
			Spec: corev1.PodSpec{
				DNSPolicy: "ClusterFirstWithHostNet",
				SecurityContext: &corev1.PodSecurityContext{
					RunAsUser:  &[]int64{0}[0],
					RunAsGroup: &[]int64{0}[0],
				},
				RestartPolicy:                 corev1.RestartPolicyAlways,
				TerminationGracePeriodSeconds: &[]int64{10}[0],
				InitContainers: []corev1.Container{
					{
						Name:  "bin-downloader",
						Image: "some-registry/lcm-controller:v1",
						Command: []string{
							"cp",
						},
						TerminationMessagePath:   "/dev/termination-log",
						TerminationMessagePolicy: "File",
						Args: []string{
							"/usr/local/bin/pelagia-disk-daemon",
							"/usr/local/bin/tini",
							"/tmp/bin/",
						},
						ImagePullPolicy: "IfNotPresent",
						SecurityContext: &corev1.SecurityContext{
							Capabilities: &corev1.Capabilities{
								Drop: []corev1.Capability{
									"ALL",
								},
							},
						},
						VolumeMounts: []corev1.VolumeMount{
							{
								Name:      "pelagia-disk-daemon-bin",
								MountPath: "/tmp/bin",
							},
						},
					},
				},
				Containers: []corev1.Container{
					{
						Name:  "pelagia-disk-daemon",
						Image: "some-registry.com/ceph:v18.2.4",
						Command: []string{
							"/usr/local/bin/tini", "--",
						},
						TerminationMessagePath:   "/dev/termination-log",
						TerminationMessagePolicy: "File",
						Args: []string{
							"/usr/local/bin/pelagia-disk-daemon", "--daemon", "--port", "9999",
						},
						ImagePullPolicy: "IfNotPresent",
						VolumeMounts: []corev1.VolumeMount{
							{
								Name:      "pelagia-disk-daemon-bin",
								MountPath: "/usr/local/bin",
							},
							{
								Name:      "devices",
								MountPath: "/dev",
								ReadOnly:  true,
							},
							{
								Name:      "run-udev",
								MountPath: "/run/udev",
								ReadOnly:  true,
							},
						},
						Env: []corev1.EnvVar{
							{
								Name:  "DM_DISABLE_UDEV",
								Value: "0",
							},
						},
						SecurityContext: &corev1.SecurityContext{
							Privileged: &[]bool{true}[0],
							RunAsUser:  &[]int64{0}[0],
							Capabilities: &corev1.Capabilities{
								Drop: []corev1.Capability{
									"ALL",
								},
							},
						},
						LivenessProbe: &corev1.Probe{
							ProbeHandler: corev1.ProbeHandler{
								Exec: &corev1.ExecAction{
									Command: []string{
										"/usr/local/bin/pelagia-disk-daemon",
										"--api-check", "--port", "9999",
									},
								},
							},
							InitialDelaySeconds: 5,
							PeriodSeconds:       10,
							FailureThreshold:    3,
							TimeoutSeconds:      1,
							SuccessThreshold:    1,
						},
						ReadinessProbe: &corev1.Probe{
							ProbeHandler: corev1.ProbeHandler{
								Exec: &corev1.ExecAction{
									Command: []string{
										"/usr/local/bin/pelagia-disk-daemon",
										"--api-check", "--port", "9999",
									},
								},
							},
							PeriodSeconds:    10,
							FailureThreshold: 3,
							TimeoutSeconds:   1,
							SuccessThreshold: 1,
						},
					},
				},
				NodeSelector: map[string]string{"pelagia-disk-daemon": "true"},
				Volumes: []corev1.Volume{
					{
						Name: "pelagia-disk-daemon-bin",
						VolumeSource: corev1.VolumeSource{
							EmptyDir: &corev1.EmptyDirVolumeSource{},
						},
					},
					{
						Name: "devices",
						VolumeSource: corev1.VolumeSource{
							HostPath: &corev1.HostPathVolumeSource{
								Path: "/dev",
								Type: &[]corev1.HostPathType{corev1.HostPathDirectory}[0],
							},
						},
					},
					{
						Name: "run-udev",
						VolumeSource: corev1.VolumeSource{
							HostPath: &corev1.HostPathVolumeSource{
								Path: "/run/udev",
								Type: &[]corev1.HostPathType{corev1.HostPathDirectory}[0],
							},
						},
					},
				},
			},
		},
	},
}
View Source
var DiskDaemonDaemonsetWithOsdTolerations = func() *appsv1.DaemonSet {
	ds := DiskDaemonDaemonset.DeepCopy()
	ds.Spec.Template.Spec.Tolerations = []corev1.Toleration{
		{
			Key:      "test.kubernetes.io/testkey",
			Effect:   "Schedule",
			Operator: "Exists",
		},
	}
	return ds
}()
View Source
var DiskDaemonReportOkNode1 = lcmcommon.DiskDaemonReport{
	State:       lcmcommon.DiskDaemonStateOk,
	DisksReport: lcmdiskdaemoninput.DiskInfoReportCephVolumeFromNode1,
	OsdsReport: &lcmcommon.DiskDaemonOsdsReport{
		Warnings: []string{
			"found physical osd db partition '/dev/vda14' for osd '30'",
		},
		Osds: lcmdiskdaemoninput.OsdDevicesInfoNode1,
	},
}
View Source
var DiskDaemonReportOkNode1SomeDevLost = lcmcommon.DiskDaemonReport{
	State:       lcmcommon.DiskDaemonStateOk,
	DisksReport: lcmdiskdaemoninput.DiskInfoReportCephVolumeSomeOsdLostFromNode1,
	OsdsReport: &lcmcommon.DiskDaemonOsdsReport{
		Warnings: []string{
			"found physical osd db partition '/dev/vda14' for osd '30'",
		},
		Osds: map[string][]lcmcommon.OsdDaemonInfo{
			"20": lcmdiskdaemoninput.OsdDevicesSomeLostInfoNode1["20"],
			"25": lcmdiskdaemoninput.OsdDevicesSomeLostInfoNode1["25"],
			"30": lcmdiskdaemoninput.OsdDevicesInfoNode1["30"],
		},
	},
}
View Source
var DiskDaemonReportOkNode2SomeDevLost = lcmcommon.DiskDaemonReport{
	State: lcmcommon.DiskDaemonStateOk,
	OsdsReport: &lcmcommon.DiskDaemonOsdsReport{
		Warnings: []string{},
		Osds: map[string][]lcmcommon.OsdDaemonInfo{
			"0": lcmdiskdaemoninput.OsdDevicesInfoNode2["0"],
		},
	},
}
View Source
var EmptyAnnotationsNode = GetNodeWithLabels("node1", map[string]string{}, map[string]string{})
View Source
var EmptyCephDeploymentMaintenance = cephlcmv1alpha1.CephDeploymentMaintenance{
	ObjectMeta: LcmObjectMeta,
}
View Source
var EmptyCephSecret = &cephlcmv1alpha1.CephDeploymentSecret{
	ObjectMeta: LcmObjectMeta,
}
View Source
var EmptyLabelsNode = GetNodeWithLabels("node1", map[string]string{}, nil)
View Source
var EmptyRemoveMap = &lcmv1alpha1.TaskRemoveInfo{
	CleanupMap: map[string]lcmv1alpha1.HostMapping{},
	Issues:     []string{},
	Warnings:   []string{},
}
View Source
var EmptyRookConfigOverride = corev1.ConfigMap{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "rook-config-override",
	},
	Data: map[string]string{
		"config":  "",
		"runtime": "",
	},
}
View Source
var ExternalConnectionSecretNonAdmin = GetExternalConnectionSecret(
	[]byte(`{"client_name":"test","client_keyring":"AQAcpuJiITYXMhAAXaOoAqOKJ4mhNOAqxFb1Hw==","fsid":"8668f062-3faa-358a-85f3-f80fe6c1e306","mon_endpoints_map":"cmn01=10.0.0.1:6969,cmn02=10.0.0.2:6969,cmn03=10.0.0.3:6969","rbd_keyring_info":{"node_key":"AQDd+HRjKiMBOhAATVfdzSNdlOAG3vaPSeTBzw==","provisioner_key":"AQDd+HRjFAcRIBAA102qzSI0WO1JfBnfPf/R2w=="},"cephfs_keyring_info":{"node_key":"AQDh+HRjCGpLDxAA1DqwfBPBGkW7+XM65JVChg==","provisioner_key":"AQDg+HRjKB9bLBAArfLLNtGN+KZRq4eaJf6Ptg=="}}`))
View Source
var ExternalConnectionSecretWithAdmin = GetExternalConnectionSecret(
	[]byte(`{"client_name":"admin","client_keyring":"AQAcpuJiITYXMhAAXaOoAqOKJ4mhNOAqxFb1Hw==","fsid":"8668f062-3faa-358a-85f3-f80fe6c1e306","mon_endpoints_map":"cmn01=10.0.0.1:6969,cmn02=10.0.0.2:6969,cmn03=10.0.0.3:6969"}`))
View Source
var ExternalConnectionSecretWithAdminAndRgw = GetExternalConnectionSecret(
	[]byte(`{"client_name":"admin","client_keyring":"AQAcpuJiITYXMhAAXaOoAqOKJ4mhNOAqxFb1Hw==","fsid":"8668f062-3faa-358a-85f3-f80fe6c1e306","mon_endpoints_map":"cmn01=10.0.0.1:6969,cmn02=10.0.0.2:6969,cmn03=10.0.0.3:6969","rgw_admin_keys":{"accessKey":"5TABLO7H0I6BTW6N25X5","secretKey":"Wd8SDDrtyyAuiD1klOGn9vJqOJh5dOSVlJ6kir9Q"}}`))
View Source
var ExternalStorageClassDefault = storagev1.StorageClass{
	ObjectMeta: metav1.ObjectMeta{
		Name: "pool1-hdd",
		Labels: map[string]string{
			"rook-ceph-storage-class": "true",
		},
		Annotations: map[string]string{"storageclass.kubernetes.io/is-default-class": "true"},
	},
	Provisioner:          "rook-ceph.rbd.csi.ceph.com",
	AllowVolumeExpansion: &TrueVarForPointer,
	Parameters: map[string]string{
		"clusterID":     "rook-ceph",
		"pool":          "pool1-hdd",
		"imageFormat":   "2",
		"imageFeatures": "layering",
		"csi.storage.k8s.io/provisioner-secret-name":            "rook-csi-rbd-provisioner",
		"csi.storage.k8s.io/provisioner-secret-namespace":       "rook-ceph",
		"csi.storage.k8s.io/node-stage-secret-name":             "rook-csi-rbd-node",
		"csi.storage.k8s.io/node-stage-secret-namespace":        "rook-ceph",
		"csi.storage.k8s.io/controller-expand-secret-name":      "rook-csi-rbd-provisioner",
		"csi.storage.k8s.io/controller-expand-secret-namespace": "rook-ceph",
		"csi.storage.k8s.io/fstype":                             "ext4",
	},
}
View Source
var FullNodesInfoFromDaemon = map[string]lcmv1alpha1.HostMapping{
	"node-1": {
		OsdMapping: map[string]lcmv1alpha1.OsdMapping{
			"20": {
				UUID:          "vbsgs3a3-sdcv-casq-sd11-asd12dasczsf",
				ClusterFSID:   "8668f062-3faa-358a-85f3-f80fe6c1e306",
				HostDirectory: "/var/lib/rook/rook-ceph/8668f062-3faa-358a-85f3-f80fe6c1e306_vbsgs3a3-sdcv-casq-sd11-asd12dasczsf",
				DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
					"/dev/vde": {
						ID:         "2926ff77-7491-4447-a",
						Rotational: true,
						Path:       "/dev/disk/by-path/pci-0000:00:0f.0",
						Partition:  "/dev/ceph-21312wds-sdfv-vs3f-scv3-sdfdsg23edaa/osd-block-vbsgs3a3-sdcv-casq-sd11-asd12dasczsf",
						Type:       "block",
						Alive:      true,
					},
					"/dev/vdd": {
						ID:         "e8d89e2f-ffc6-4988-9",
						Rotational: true,
						Path:       "/dev/disk/by-path/pci-0000:00:0e.0",
						Partition:  "/dev/ceph-metadata/part-1",
						Type:       "db",
						Alive:      true,
					},
				},
			},
			"25": {
				UUID:          "d49fd9bf-d2dd-4c3d-824d-87f3f17ea44a",
				ClusterFSID:   "8668f062-3faa-358a-85f3-f80fe6c1e306",
				HostDirectory: "/var/lib/rook/rook-ceph/8668f062-3faa-358a-85f3-f80fe6c1e306_d49fd9bf-d2dd-4c3d-824d-87f3f17ea44a",
				DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
					"/dev/vdf": {
						ID:         "b7ea1c8c-89b8-4354-8",
						Rotational: true,
						Path:       "/dev/disk/by-path/pci-0000:00:10.0",
						Partition:  "/dev/ceph-2efce189-afb7-452f-bd32-c73b5017a0da/osd-block-d49fd9bf-d2dd-4c3d-824d-87f3f17ea44a",
						Type:       "block",
						Alive:      true,
					},
					"/dev/vdd": {
						ID:         "e8d89e2f-ffc6-4988-9",
						Rotational: true,
						Path:       "/dev/disk/by-path/pci-0000:00:0e.0",
						Partition:  "/dev/ceph-metadata/part-2",
						Type:       "db",
						Alive:      true,
					},
				},
			},
			"30": {
				UUID:          "f4edb5cd-fb1e-4620-9419-3f9a4fcecba5",
				ClusterFSID:   "8668f062-3faa-358a-85f3-f80fe6c1e306",
				HostDirectory: "/var/lib/rook/rook-ceph/8668f062-3faa-358a-85f3-f80fe6c1e306_f4edb5cd-fb1e-4620-9419-3f9a4fcecba5",
				DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
					"/dev/vda": {
						ID:         "8dad5ae9-ddf7-40bf-8",
						Rotational: true,
						Path:       "/dev/disk/by-path/pci-0000:00:09.0",
						Partition:  "/dev/vda14",
						Type:       "db",
						Alive:      true,
					},
					"/dev/vdb": {
						ID:         "996ea59f-7f47-4fac-b",
						Rotational: true,
						Path:       "/dev/disk/by-path/pci-0000:00:0a.0",
						Partition:  "/dev/ceph-992bbd78-3d8e-4cc3-93dc-eae387309364/osd-block-f4edb5cd-fb1e-4620-9419-3f9a4fcecba5",
						Type:       "block",
						Alive:      true,
					},
				},
			},
		},
	},
	"node-2": {
		OsdMapping: map[string]lcmv1alpha1.OsdMapping{
			"0": {
				UUID:          "69481cd1-38b1-42fd-ac07-06bf4d7c0e19",
				ClusterFSID:   "8668f062-3faa-358a-85f3-f80fe6c1e306",
				HostDirectory: "/var/lib/rook/rook-ceph/8668f062-3faa-358a-85f3-f80fe6c1e306_69481cd1-38b1-42fd-ac07-06bf4d7c0e19",
				DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
					"/dev/vdb": {
						ID:         "b4eaf39c-b561-4269-1",
						Rotational: true,
						Path:       "/dev/disk/by-path/pci-0000:00:0a.0",
						Partition:  "/dev/ceph-cf7c8b53-27c7-4cfc-94de-6ad4c7d9f92d/osd-block-af39b794-e1c6-41c0-8997-d6b6c631b8f2",
						Type:       "block",
						Alive:      true,
					},
				},
			},
			"4": {
				UUID:          "ad76cf53-5cb5-48fe-a39a-343734f5ccde",
				ClusterFSID:   "8668f062-3faa-358a-85f3-f80fe6c1e306",
				HostDirectory: "/var/lib/rook/rook-ceph/8668f062-3faa-358a-85f3-f80fe6c1e306_ad76cf53-5cb5-48fe-a39a-343734f5ccde",
				DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
					"/dev/vdd": {
						ID:         "35a15532-8b56-4f83-9",
						Rotational: false,
						Path:       "/dev/disk/by-path/pci-0000:00:1e.0",
						Partition:  "/dev/ceph-dada9f25-41b4-4c26-9a20-448ac01e1d06/osd-block-ad76cf53-5cb5-48fe-a39a-343734f5ccde",
						Type:       "block",
						Alive:      true,
					},
				},
			},
			"5": {
				UUID:          "af39b794-e1c6-41c0-8997-d6b6c631b8f2",
				ClusterFSID:   "8668f062-3faa-358a-85f3-f80fe6c1e306",
				HostDirectory: "/var/lib/rook/rook-ceph/8668f062-3faa-358a-85f3-f80fe6c1e306_af39b794-e1c6-41c0-8997-d6b6c631b8f2",
				DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
					"/dev/vdd": {
						ID:         "35a15532-8b56-4f83-9",
						Rotational: false,
						Path:       "/dev/disk/by-path/pci-0000:00:1e.0",
						Partition:  "/dev/ceph-dada9f25-41b4-4c26-9a20-448ac01e1d06/osd-block-7d09cceb-4de0-478e-9d8d-bd09cb0c904e",
						Type:       "block",
						Alive:      true,
					},
				},
			},
		},
	},
}
View Source
var FullNodesInfoFromOsdMeta = map[string]lcmv1alpha1.HostMapping{
	"node-1": {
		OsdMapping: map[string]lcmv1alpha1.OsdMapping{
			"20": {
				UUID:          "vbsgs3a3-sdcv-casq-sd11-asd12dasczsf",
				ClusterFSID:   "8668f062-3faa-358a-85f3-f80fe6c1e306",
				HostDirectory: "/var/lib/rook/rook-ceph/8668f062-3faa-358a-85f3-f80fe6c1e306_vbsgs3a3-sdcv-casq-sd11-asd12dasczsf",
				DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
					"/dev/vde": {
						Rotational: true,
						Path:       "/dev/disk/by-path/pci-0000:00:0f.0",
						Partition:  "/dev/dm-0",
						ID:         "2926ff77-7491-4447-a",
						Type:       "block",
					},
					"/dev/vdd": {
						Rotational: true,
						Path:       "/dev/disk/by-path/pci-0000:00:0e.0",
						Partition:  "/dev/dm-1",
						ID:         "e8d89e2f-ffc6-4988-9",
						Type:       "db",
					},
				},
			},
			"25": {
				UUID:          "d49fd9bf-d2dd-4c3d-824d-87f3f17ea44a",
				ClusterFSID:   "8668f062-3faa-358a-85f3-f80fe6c1e306",
				HostDirectory: "/var/lib/rook/rook-ceph/8668f062-3faa-358a-85f3-f80fe6c1e306_d49fd9bf-d2dd-4c3d-824d-87f3f17ea44a",
				DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
					"/dev/vdf": {
						Rotational: true,
						Path:       "/dev/disk/by-path/pci-0000:00:10.0",
						Partition:  "/dev/dm-2",
						ID:         "b7ea1c8c-89b8-4354-8",
						Type:       "block",
					},
					"/dev/vdd": {
						Rotational: true,
						Path:       "/dev/disk/by-path/pci-0000:00:0e.0",
						Partition:  "/dev/dm-3",
						ID:         "e8d89e2f-ffc6-4988-9",
						Type:       "db",
					},
				},
			},
			"30": {
				UUID:          "f4edb5cd-fb1e-4620-9419-3f9a4fcecba5",
				ClusterFSID:   "8668f062-3faa-358a-85f3-f80fe6c1e306",
				HostDirectory: "/var/lib/rook/rook-ceph/8668f062-3faa-358a-85f3-f80fe6c1e306_f4edb5cd-fb1e-4620-9419-3f9a4fcecba5",
				DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
					"/dev/vda": {
						Rotational: true,
						Path:       "/dev/disk/by-path/pci-0000:00:09.0",
						Partition:  "/dev/vda14",
						ID:         "8dad5ae9-ddf7-40bf-8",
						Type:       "db",
					},
					"/dev/vdb": {
						Rotational: true,
						Path:       "/dev/disk/by-path/pci-0000:00:0a.0",
						Partition:  "/dev/dm-4",
						ID:         "996ea59f-7f47-4fac-b",
						Type:       "block",
					},
				},
			},
		},
	},
	"node-2": {
		OsdMapping: map[string]lcmv1alpha1.OsdMapping{
			"0": {
				UUID:          "69481cd1-38b1-42fd-ac07-06bf4d7c0e19",
				ClusterFSID:   "8668f062-3faa-358a-85f3-f80fe6c1e306",
				HostDirectory: "/var/lib/rook/rook-ceph/8668f062-3faa-358a-85f3-f80fe6c1e306_69481cd1-38b1-42fd-ac07-06bf4d7c0e19",
				DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
					"/dev/vdb": {
						Rotational: true,
						Path:       "/dev/disk/by-path/pci-0000:00:0a.0",
						ID:         "b4eaf39c-b561-4269-1",
						Partition:  "/dev/dm-0",
						Type:       "block",
					},
				},
			},
			"4": {
				UUID:          "ad76cf53-5cb5-48fe-a39a-343734f5ccde",
				ClusterFSID:   "8668f062-3faa-358a-85f3-f80fe6c1e306",
				HostDirectory: "/var/lib/rook/rook-ceph/8668f062-3faa-358a-85f3-f80fe6c1e306_ad76cf53-5cb5-48fe-a39a-343734f5ccde",
				DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
					"/dev/vdd": {
						Rotational: false,
						Path:       "/dev/disk/by-path/pci-0000:00:1e.0",
						Partition:  "/dev/dm-2",
						ID:         "35a15532-8b56-4f83-9",
						Type:       "block",
					},
				},
			},
			"5": {
				UUID:          "af39b794-e1c6-41c0-8997-d6b6c631b8f2",
				ClusterFSID:   "8668f062-3faa-358a-85f3-f80fe6c1e306",
				HostDirectory: "/var/lib/rook/rook-ceph/8668f062-3faa-358a-85f3-f80fe6c1e306_af39b794-e1c6-41c0-8997-d6b6c631b8f2",
				DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
					"/dev/vdd": {
						Rotational: false,
						Path:       "/dev/disk/by-path/pci-0000:00:1e.0",
						Partition:  "/dev/dm-3",
						ID:         "35a15532-8b56-4f83-9",
						Type:       "block",
					},
				},
			},
		},
	},
}
View Source
var FullNodesRemoveMap = &lcmv1alpha1.TaskRemoveInfo{
	CleanupMap: map[string]lcmv1alpha1.HostMapping{
		"node-1": {
			CompleteCleanup: true,
			OsdMapping: map[string]lcmv1alpha1.OsdMapping{
				"20": AdaptOsdMapping("node-1", "20",
					map[string]bool{"inCrush": true}, map[string]map[string]bool{"/dev/vde": {"zap": true}, "/dev/vdd": {}}),
				"25": AdaptOsdMapping("node-1", "25",
					map[string]bool{"inCrush": true}, map[string]map[string]bool{"/dev/vdf": {"zap": true}, "/dev/vdd": {}}),
				"30": AdaptOsdMapping("node-1", "30",
					map[string]bool{"inCrush": true}, map[string]map[string]bool{"/dev/vda": {}, "/dev/vdb": {"zap": true}}),
			},
		},
		"node-2": {
			CompleteCleanup: true,
			OsdMapping: map[string]lcmv1alpha1.OsdMapping{
				"0": AdaptOsdMapping("node-2", "0",
					map[string]bool{"inCrush": true}, map[string]map[string]bool{"/dev/vdb": {"zap": true}}),
				"4": AdaptOsdMapping("node-2", "4",
					map[string]bool{"inCrush": true}, map[string]map[string]bool{"/dev/vdd": {"zap": true}}),
				"5": AdaptOsdMapping("node-2", "5",
					map[string]bool{"inCrush": true}, map[string]map[string]bool{"/dev/vdd": {"zap": true}}),
			},
		},
	},
	Issues: []string{},
	Warnings: []string{
		"[node 'node-1'] found osd db partition '/dev/ceph-metadata/part-1' for osd '20', which is created not by rook, skipping disk/partition zap",
		"[node 'node-1'] found osd db partition '/dev/ceph-metadata/part-2' for osd '25', which is created not by rook, skipping disk/partition zap",
		"[node 'node-1'] found osd db partition '/dev/vda14' for osd '30', which is created not by rook, skipping disk/partition zap",
		"[node 'node-1'] found physical osd db partition '/dev/vda14' for osd '30'",
	},
}
View Source
var HyperConvergeCephDeploy = &cephlcmv1alpha1.CephDeploymentHyperConverge{
	Resources: cephv1.ResourceSpec{
		"osd-nvme": v1.ResourceRequirements{
			Limits: v1.ResourceList{
				v1.ResourceCPU:    resource.MustParse("100m"),
				v1.ResourceMemory: resource.MustParse("156Mi"),
			},
			Requests: v1.ResourceList{
				v1.ResourceMemory: resource.MustParse("28Mi"),
				v1.ResourceCPU:    resource.MustParse("10m"),
			},
		},
	},
	Tolerations: map[string]cephlcmv1alpha1.CephDeploymentToleration{
		"all": {
			Rules: []v1.Toleration{
				{
					Key:      "test.kubernetes.io/testkey",
					Effect:   "Schedule",
					Operator: "Exists",
				},
			},
		},
		"mgr": {
			Rules: []v1.Toleration{
				{
					Key:      "test.kubernetes.io/testkey",
					Effect:   "Schedule",
					Operator: "Exists",
				},
			},
		},
		"mon": {
			Rules: []v1.Toleration{
				{
					Key:      "test.kubernetes.io/testkey",
					Effect:   "Schedule",
					Operator: "Exists",
				},
			},
		},
		"osd": {
			Rules: []v1.Toleration{
				{
					Key:      "test.kubernetes.io/testkey",
					Effect:   "Schedule",
					Operator: "Exists",
				},
			},
		},
	},
}
View Source
var HyperConvergeForExtraSVC = &cephlcmv1alpha1.CephDeploymentHyperConverge{
	Tolerations: map[string]cephlcmv1alpha1.CephDeploymentToleration{
		"mds": {
			Rules: []v1.Toleration{
				{
					Key:      "test.kubernetes.io/testkey",
					Effect:   "Schedule",
					Operator: "Exists",
				},
			},
		},
		"rgw": {
			Rules: []v1.Toleration{
				{
					Key:      "rgw-toleration",
					Operator: "Exists",
				},
			},
		},
	},
	Resources: cephv1.ResourceSpec{
		"mds": v1.ResourceRequirements{
			Limits: v1.ResourceList{
				v1.ResourceCPU: resource.MustParse("120m"),
			},
			Requests: v1.ResourceList{
				v1.ResourceCPU: resource.MustParse("10m"),
			},
		},
		"rgw": v1.ResourceRequirements{
			Limits: v1.ResourceList{
				v1.ResourceCPU: resource.MustParse("50m"),
			},
			Requests: v1.ResourceList{
				v1.ResourceCPU: resource.MustParse("20m"),
			},
		},
	},
}
View Source
var IngressRuleSecret = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "rgw-store-ingress-secret",
		Namespace: "rook-ceph",
		Labels: map[string]string{
			"objectStore": "rgw-store",
			"ingress":     "rook-ceph-rgw-rgw-store-ingress",
			"cephdeployment.lcm.mirantis.com/ingress": "ceph-object-store-ingress",
		},
	},
	Data: map[string][]byte{
		"ca.crt":  []byte("ingress-cacert"),
		"tls.crt": []byte("ingress-crt"),
		"tls.key": []byte("ingress-key"),
	},
}
View Source
var IngressRuleSecretCustom = func() corev1.Secret {
	secret := IngressRuleSecret.DeepCopy()
	secret.Data = map[string][]byte{
		"ca.crt":  []byte("spec-cacert"),
		"tls.crt": []byte("spec-tlscert"),
		"tls.key": []byte("spec-tlskey"),
	}
	return *secret
}()
View Source
var IngressRuleSecretOpenstack = func() corev1.Secret {
	secret := IngressRuleSecret.DeepCopy()
	secret.Data = map[string][]byte{
		"ca.crt":  OpenstackRgwCredsSecret.Data["ca_cert"],
		"tls.crt": OpenstackRgwCredsSecret.Data["tls_crt"],
		"tls.key": OpenstackRgwCredsSecret.Data["tls_key"],
	}
	return *secret
}()
View Source
var IngressesListEmpty = networkingv1.IngressList{Items: []networkingv1.Ingress{}}
View Source
var LatestCephVersion = strings.ToLower(lcmcommon.LatestRelease.Name)
View Source
var LatestCephVersionImage = fmt.Sprintf("%s.%s", lcmcommon.LatestRelease.MajorVersion, lcmcommon.LatestRelease.SupportedMinors[len(lcmcommon.LatestRelease.SupportedMinors)-1])
View Source
var LcmDrainedAnnotationsNode = GetNodeWithLabels("node-1", map[string]string{"ceph-daemonset-available-node": "true"},
	map[string]string{"kaas.mirantis.com/lcm-drained": "true"})
View Source
var LcmDrainedNoCsiLabelNode = GetNodeWithLabels("node-1", map[string]string{},
	map[string]string{"kaas.mirantis.com/lcm-drained": "true"})
View Source
var LcmObjectMeta = metav1.ObjectMeta{
	Name:      "cephcluster",
	Namespace: "lcm-namespace",
}
View Source
var MgrModuleLsNoPrometheus = BuildCliOutput(MgrModuleLsTmpl, "", map[string]string{"modules": `"iostat","nfs","restful"`})
View Source
var MgrModuleLsTmpl = `` /* 284-byte string literal not displayed */
View Source
var MgrModuleLsWithPrometheus = BuildCliOutput(MgrModuleLsTmpl, "", map[string]string{"modules": `"iostat","nfs","prometheus","restful"`})
View Source
var MultisiteCabundleSecret = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "extra-rook-ceph-cabundle",
	},
	Data: map[string][]byte{
		"cabundle": []byte("fake-extra-cabundle"),
	},
}
View Source
var MultisiteRealmSecret = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "realm1-keys",
	},
	Data: map[string][]byte{
		"access-key": []byte("fakekey"),
		"secret-key": []byte("fakesecret"),
	},
}
View Source
var MultisiteRgwWithSyncDaemon = func() cephlcmv1alpha1.CephDeployment {
	cd := CephDeployMultisiteRgw.DeepCopy()
	cd.Spec.ObjectStorage.Rgw.Gateway.SplitDaemonForMultisiteTrafficSync = true
	return *cd
}()
View Source
var NetworkPolicyMds = GetNetworkPolicy("rook-ceph-mds", []networkingv1.NetworkPolicyPort{
	{
		Port: &startPort, Protocol: &netpolProtocol, EndPort: &endPort,
	},
})
View Source
var NetworkPolicyMgr = GetNetworkPolicy("rook-ceph-mgr", []networkingv1.NetworkPolicyPort{
	{
		Port: &mgrPort, Protocol: &netpolProtocol,
	},
	{
		Port: &startPort, Protocol: &netpolProtocol, EndPort: &endPort,
	},
})
View Source
var NetworkPolicyMon = GetNetworkPolicy("rook-ceph-mon", []networkingv1.NetworkPolicyPort{
	{
		Port: &monPort, Protocol: &netpolProtocol,
	},
	{
		Port: &monPort2, Protocol: &netpolProtocol,
	},
})
View Source
var NetworkPolicyOsd = GetNetworkPolicy("rook-ceph-osd", []networkingv1.NetworkPolicyPort{
	{
		Port: &startPort, Protocol: &netpolProtocol, EndPort: &endPort,
	},
})
View Source
var NetworkPolicyRgw = GetNetworkPolicy("rook-ceph-rgw", []networkingv1.NetworkPolicyPort{
	{
		Port: &rgwPort, Protocol: &netpolProtocol,
	},
	{
		Port: &rgwSecurePort, Protocol: &netpolProtocol,
	},
})
View Source
var NodeMonitorIPAnnotation = GetNodeWithLabels("node1", map[string]string{}, map[string]string{"network.rook.io/mon-ip": "127.0.0.1"})
View Source
var NodesRemoveFullFinishedStatus = func() *lcmv1alpha1.TaskRemoveInfo {
	info := NodesRemoveMapOsdFinishedStatus.DeepCopy()
	host1 := info.CleanupMap["node-1"]
	host1.HostRemoveStatus = &lcmv1alpha1.RemoveStatus{Status: lcmv1alpha1.RemoveFinished}
	host2 := info.CleanupMap["node-2"]
	host2.HostRemoveStatus = &lcmv1alpha1.RemoveStatus{Status: lcmv1alpha1.RemoveFinished}
	info.CleanupMap["node-1"] = host1
	info.CleanupMap["node-2"] = host2
	return info
}()
View Source
var NodesRemoveMapEmptyRemoveStatus = GetInfoWithStatus(FullNodesRemoveMap, map[string]*lcmv1alpha1.RemoveResult{"*": nil})
View Source
var NodesRemoveMapOsdFinishedStatus = GetInfoWithStatus(FullNodesRemoveMap,
	map[string]*lcmv1alpha1.RemoveResult{
		"*": {
			OsdRemoveStatus:    &lcmv1alpha1.RemoveStatus{Status: lcmv1alpha1.RemoveFinished},
			DeviceCleanUpJob:   &lcmv1alpha1.RemoveStatus{Status: lcmv1alpha1.RemoveCompleted},
			DeployRemoveStatus: &lcmv1alpha1.RemoveStatus{Status: lcmv1alpha1.RemoveFinished},
		},
	},
)
View Source
var NonCephVolumeAttachment = storagev1.VolumeAttachment{
	ObjectMeta: metav1.ObjectMeta{
		Name: "non-ceph-volumeattachment",
	},
	Spec: storagev1.VolumeAttachmentSpec{
		Attacher: "another-attacher",
		NodeName: "node-1",
	},
}
View Source
var NotAllTopologyLabelsNode = GetNodeWithLabels("node1", map[string]string{
	"topology.kubernetes.io/region":              "region1",
	"cephdpl-prev-topology.kubernetes.io/region": "region1",
	"topology.rook.io/rack":                      "rack1",
}, nil)
View Source
var NotAvailableNodesFullRemoveMap = func() *lcmv1alpha1.TaskRemoveInfo {
	newMap := NotLabeledNodesFullRemoveMap.DeepCopy()
	for node, mapping := range newMap.CleanupMap {
		mapping.VolumesInfoMissed = false
		mapping.NodeIsDown = true
		newMap.CleanupMap[node] = mapping
	}
	newMap.Warnings = []string{
		"[node 'node-1'] node is not available, device cleanup jobs will be skipped",
		"[node 'node-2'] node is not available, device cleanup jobs will be skipped",
	}
	return newMap
}()
View Source
var NotLabeledNodesFullRemoveMap = &lcmv1alpha1.TaskRemoveInfo{
	CleanupMap: map[string]lcmv1alpha1.HostMapping{
		"node-1": {
			CompleteCleanup:   true,
			VolumesInfoMissed: true,
			OsdMapping: map[string]lcmv1alpha1.OsdMapping{
				"20": AdaptOsdMapping("node-1", "20", map[string]bool{"inCrush": true, "noDaemon": true}, nil),
				"25": AdaptOsdMapping("node-1", "25", map[string]bool{"inCrush": true, "noDaemon": true}, nil),
				"30": AdaptOsdMapping("node-1", "30", map[string]bool{"inCrush": true, "noDaemon": true}, nil),
			},
		},
		"node-2": {
			DropFromCrush:     true,
			VolumesInfoMissed: true,
			OsdMapping: map[string]lcmv1alpha1.OsdMapping{
				"0": AdaptOsdMapping("node-2", "0", map[string]bool{"inCrush": true, "noDaemon": true}, nil),
				"4": AdaptOsdMapping("node-2", "4", map[string]bool{"inCrush": true, "noDaemon": true}, nil),
				"5": AdaptOsdMapping("node-2", "5", map[string]bool{"inCrush": true, "noDaemon": true}, nil),
			},
		},
	},
	Issues: []string{},
	Warnings: []string{
		"[node 'node-1'] node is available, but has no disk daemon running, device cleanup jobs will be skipped",
		"[node 'node-2'] node is available, but has no disk daemon running, device cleanup jobs will be skipped",
	},
}
View Source
var ObjectBucketClaimListEmpty = bktv1alpha1.ObjectBucketClaimList{Items: []bktv1alpha1.ObjectBucketClaim{}}
View Source
var ObjectBucketClaimListNotReady = bktv1alpha1.ObjectBucketClaimList{
	Items: []bktv1alpha1.ObjectBucketClaim{
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "bucket-1"},
			Status:     bktv1alpha1.ObjectBucketClaimStatus{Phase: bktv1alpha1.ObjectBucketClaimStatusPhaseFailed},
		},
	},
}
View Source
var ObjectBucketClaimListReady = bktv1alpha1.ObjectBucketClaimList{
	Items: []bktv1alpha1.ObjectBucketClaim{
		{
			ObjectMeta: metav1.ObjectMeta{Namespace: RookNamespace, Name: "bucket-1"},
			Status:     bktv1alpha1.ObjectBucketClaimStatus{Phase: bktv1alpha1.ObjectBucketClaimStatusPhaseBound},
		},
	},
}
View Source
var OctopusCephCluster = func() cephv1.CephCluster {
	newcluster := BuildBaseCephCluster(LcmObjectMeta.Name, RookNamespace)
	newcluster.Status = cephv1.ClusterStatus{
		Phase: cephv1.ConditionReady,
		CephStatus: &cephv1.CephStatus{
			Health: "HEALTH_OK",
			FSID:   "8668f062-3faa-358a-85f3-f80fe6c1e306",
		},
		CephVersion: &cephv1.ClusterVersion{
			Image:   "some-registry.com/ceph:v15.2.8",
			Version: "15.2.8-0",
		},
	}
	return newcluster
}()
View Source
var OpenstackCaCert = `` /* 1024-byte string literal not displayed */
View Source
var OpenstackCephBlockPoolsList = cephv1.CephBlockPoolList{
	Items: []cephv1.CephBlockPool{
		GetOpenstackPool("vms-hdd", false, 0.2), GetOpenstackPool("volumes-hdd", false, 0.4), GetOpenstackPool("images-hdd", false, 0.1), GetOpenstackPool("backup-hdd", false, 0.1),
	},
}
View Source
var OpenstackCephBlockPoolsListReady = cephv1.CephBlockPoolList{
	Items: []cephv1.CephBlockPool{
		GetOpenstackPool("vms-hdd", true, 0.2), GetOpenstackPool("volumes-hdd", true, 0.4), GetOpenstackPool("images-hdd", true, 0.1), GetOpenstackPool("backup-hdd", true, 0.1),
	},
}
View Source
var OpenstackRgwCredsSecret = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "openstack-ceph-shared",
		Name:      "openstack-rgw-creds",
	},
	Data: map[string][]byte{
		"auth_url":            []byte("https://keystone.openstack.com"),
		"username":            []byte("auth-user"),
		"password":            []byte("auth-password"),
		"project_domain_name": []byte("os-domain"),
		"project_name":        []byte("os-project"),
		"public_domain":       []byte("openstack.com"),
		"barbican_url":        []byte("https://barbican.openstack.com"),
		"tls_key":             []byte(OpenstackTLSKey),
		"tls_crt":             []byte(OpenstackTLSCert),
		"ca_cert":             []byte(OpenstackCaCert),
	},
}
View Source
var OpenstackRgwCredsSecretNoBarbican = func() corev1.Secret {
	secret := OpenstackRgwCredsSecret.DeepCopy()
	delete(secret.Data, "barbican_url")
	return *secret
}()
View Source
var OpenstackSecretGenerated = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "openstack-ceph-keys",
		Namespace: "openstack-ceph-shared",
	},
	Data: map[string][]byte{
		"client.admin":               []byte("AQAcpuJiITYXMhAAXaOoAqOKJ4mhNOAqxFb1Hw=="),
		"glance":                     []byte("client.glance;glance\n;images-hdd:images:hdd"),
		"nova":                       []byte("client.nova;nova\n;vms-hdd:vms:hdd;volumes-hdd:volumes:hdd;images-hdd:images:hdd"),
		"cinder":                     []byte("client.cinder;cinder\n;volumes-hdd:volumes:hdd;images-hdd:images:hdd;backup-hdd:backup:hdd"),
		"mon_endpoints":              []byte("127.0.0.1,127.0.0.2,127.0.0.3"),
		"rgw_internal":               []byte("https://rook-ceph-rgw-rgw-store.rook-ceph.svc:8443/"),
		"rgw_external":               []byte("https://rgw-store.test/"),
		"rgw_internal_cacert":        []byte(RgwCaCert),
		"rgw_external_custom_cacert": []byte("spec-cacert"),
	},
}
View Source
var OpenstackSecretGeneratedCephFS = func() corev1.Secret {
	secret := OpenstackSecretGenerated.DeepCopy()
	secret.Data["manila"] = []byte("client.manila;manila\n")
	return *secret
}()
View Source
var OpenstackTLSCert = `` /* 1325-byte string literal not displayed */
View Source
var OpenstackTLSKey = `` /* 1678-byte string literal not displayed */
View Source
var OsdSpecAnalysisNotOk = &lcmv1alpha1.OsdSpecAnalysisState{
	DiskDaemon: lcmv1alpha1.DaemonStatus{
		Status:   lcmv1alpha1.DaemonStateOk,
		Messages: []string{"2/2 ready"},
	},
	CephClusterSpecGeneration: &ReefCephClusterReady.Generation,
	SpecAnalysis:              OsdStorageSpecAnalysisFailed,
}
View Source
var OsdSpecAnalysisOk = &lcmv1alpha1.OsdSpecAnalysisState{
	DiskDaemon: lcmv1alpha1.DaemonStatus{
		Status:   lcmv1alpha1.DaemonStateOk,
		Messages: []string{"2/2 ready"},
	},
	CephClusterSpecGeneration: &ReefCephClusterReady.Generation,
	SpecAnalysis:              OsdStorageSpecAnalysisOk,
}
View Source
var OsdStorageSpecAnalysisFailed = map[string]lcmv1alpha1.DaemonStatus{
	"node-1": {
		Status: lcmv1alpha1.DaemonStateFailed,
		Issues: []string{"failed to run 'pelagia-disk-daemon --full-report --port 9999' command to get disk report from pelagia-disk-daemon"},
	},
	"node-2": {
		Status: lcmv1alpha1.DaemonStateFailed,
		Issues: []string{"failed to run 'pelagia-disk-daemon --full-report --port 9999' command to get disk report from pelagia-disk-daemon"},
	},
}
View Source
var OsdStorageSpecAnalysisOk = map[string]lcmv1alpha1.DaemonStatus{
	"node-1": {
		Status: lcmv1alpha1.DaemonStateOk,
	},
	"node-2": {
		Status: lcmv1alpha1.DaemonStateOk,
		Messages: []string{
			"found ceph block partition '/dev/ceph-0e03d5c6-d0e9-4f04-b9af-38d15e14369f/osd-block-61869d90-2c45-4f02-b7c3-96955f41e2ca', belongs to osd '2' (osd fsid '61869d90-2c45-4f02-b7c3-96955f41e2ca'), placed on '/dev/vde' device, which seems to be stray, can be cleaned up",
			"found ceph block partition '/dev/ceph-c5628abe-ae41-4c3d-bdc6-ef86c54bf78c/osd-block-69481cd1-38b1-42fd-ac07-06bf4d7c0e19', belongs to osd '0' (osd fsid '06bf4d7c-9603-41a4-b250-284ecf3ecb2f'), placed on '/dev/vdc' device, which seems to be stray, can be cleaned up",
		},
	},
}
View Source
var PelagiaConfig = corev1.ConfigMap{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: LcmObjectMeta.Namespace,
		Name:      "pelagia-lcmconfig",
	},
	Data: map[string]string{
		"DEPLOYMENT_CEPH_IMAGE":     fmt.Sprintf("mirantis.azurecr.io/ceph/ceph:%s", LatestCephVersionImage),
		"DEPLOYMENT_ROOK_IMAGE":     "mirantis.azurecr.io/mirantis/rook:v1.17.4-15",
		"DEPLOYMENT_NETPOL_ENABLED": "true",
		"DEPLOYMENT_LOG_LEVEL":      "trace",
	},
}
View Source
var PelagiaConfigForPrevCephVersion = corev1.ConfigMap{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: LcmObjectMeta.Namespace,
		Name:      "pelagia-lcmconfig",
	},
	Data: map[string]string{
		"DEPLOYMENT_CEPH_RELEASE":   PreviousCephVersion,
		"DEPLOYMENT_CEPH_IMAGE":     fmt.Sprintf("mirantis.azurecr.io/ceph/ceph:%s", PreviousCephVersionImage),
		"DEPLOYMENT_ROOK_IMAGE":     "mirantis.azurecr.io/mirantis/rook:v1.16.7-1",
		"DEPLOYMENT_NETPOL_ENABLED": "true",
		"DEPLOYMENT_LOG_LEVEL":      "trace",
	},
}
View Source
var PersistentVolumeClaimList = corev1.PersistentVolumeClaimList{
	Items: []corev1.PersistentVolumeClaim{
		{
			ObjectMeta: metav1.ObjectMeta{Name: "test-pvc"},
			Spec: corev1.PersistentVolumeClaimSpec{
				StorageClassName: &StorageClassesList.Items[0].Name,
			},
			Status: corev1.PersistentVolumeClaimStatus{
				Phase: corev1.ClaimPending,
			},
		},
		{
			ObjectMeta: metav1.ObjectMeta{Name: "test-pvc-2"},
			Spec: corev1.PersistentVolumeClaimSpec{
				StorageClassName: &StorageClassesList.Items[0].Name,
			},
			Status: corev1.PersistentVolumeClaimStatus{
				Phase: corev1.ClaimBound,
			},
		},
	},
}
View Source
var PersistentVolumeClaimListEmpty = corev1.PersistentVolumeClaimList{
	Items: []corev1.PersistentVolumeClaim{},
}
View Source
var PersistentVolumeList = corev1.PersistentVolumeList{
	Items: []corev1.PersistentVolume{
		{
			ObjectMeta: metav1.ObjectMeta{Name: "test"},
			Spec: corev1.PersistentVolumeSpec{
				StorageClassName: "pool1-hdd",
			},
			Status: corev1.PersistentVolumeStatus{
				Phase: corev1.VolumePending,
			},
		},
		{
			ObjectMeta: metav1.ObjectMeta{Name: "test-2"},
			Spec: corev1.PersistentVolumeSpec{
				StorageClassName: "pool1-hdd",
			},
			Status: corev1.PersistentVolumeStatus{
				Phase: corev1.VolumeBound,
			},
		},
	},
}
View Source
var PersistentVolumeListEmpty = corev1.PersistentVolumeList{
	Items: []corev1.PersistentVolume{},
}
View Source
var PodListEmpty = &corev1.PodList{}
View Source
var PreviousCephVersion = strings.ToLower(previousRelease.Name)
View Source
var PreviousCephVersionImage = fmt.Sprintf("%s.%s", previousRelease.MajorVersion, previousRelease.SupportedMinors[len(previousRelease.SupportedMinors)-1])
View Source
var RadosgwAdminMasterSyncStatusOk = `` /* 645-byte string literal not displayed */
View Source
var RadosgwAdminSecondarySyncStatusOk = `` /* 751-byte string literal not displayed */
View Source
var ReconcileOpenstackSecret = func() corev1.Secret {
	secret := OpenstackSecretGenerated.DeepCopy()
	secret.Data["rgw_metrics_user_access_key"] = []byte("metrics-user-access-key")
	secret.Data["rgw_metrics_user_secret_key"] = []byte("metrics-user-secret-key")
	return *secret
}()
View Source
var ReefCephClusterHasHealthIssues = func() cephv1.CephCluster {
	newcluster := BuildBaseCephCluster(LcmObjectMeta.Name, RookNamespace)
	newcluster.Spec.Storage.Nodes = StorageNodesForAnalysisNotAllSpecified
	newcluster.Status = cephv1.ClusterStatus{
		Phase: cephv1.ConditionFailure,
		CephStatus: &cephv1.CephStatus{
			Health: "HEALTH_WARN",
			FSID:   "8668f062-3faa-358a-85f3-f80fe6c1e306",
			Details: map[string]cephv1.CephHealthMessage{
				"RECENT_MGR_MODULE_CRASH": {
					Severity: "HEALTH_WARN",
					Message:  "2 mgr modules have recently crashed",
				},
			},
		},
		CephVersion: &cephv1.ClusterVersion{
			Image:   "some-registry.com/ceph:v18.2.4",
			Version: "18.2.4-0",
		},
	}
	return newcluster
}()
View Source
var ReefCephClusterNotReady = func() cephv1.CephCluster {
	newcluster := BuildBaseCephCluster(LcmObjectMeta.Name, RookNamespace)
	newcluster.Status = cephv1.ClusterStatus{
		Phase:      cephv1.ConditionProgressing,
		State:      cephv1.ClusterStateCreated,
		CephStatus: ReefCephClusterReady.Status.CephStatus,
	}
	return newcluster
}()
View Source
var ReefCephClusterReady = func() cephv1.CephCluster {
	newcluster := BuildBaseCephCluster(LcmObjectMeta.Name, RookNamespace)
	newcluster.Spec.CephVersion.Image = "some-registry.com/ceph:v18.2.4"
	newcluster.Status = cephv1.ClusterStatus{
		Phase: cephv1.ConditionReady,
		State: cephv1.ClusterStateCreated,
		CephStatus: &cephv1.CephStatus{
			Health:      "HEALTH_OK",
			FSID:        "8668f062-3faa-358a-85f3-f80fe6c1e306",
			LastChecked: time.Now().Format(time.RFC3339),
		},
		CephVersion: &cephv1.ClusterVersion{
			Image:   "some-registry.com/ceph:v18.2.4",
			Version: "18.2.4-0",
		},
	}
	return newcluster
}()
View Source
var RequestRemoveByDevice = map[string]lcmv1alpha1.NodeCleanUpSpec{
	"node-1": {
		CleanupByDevice: []lcmv1alpha1.DeviceCleanupSpec{
			{
				Device: "/dev/disk/by-path/virtio-pci-0000:00:0f.0",
			},
		},
	},
	"node-2": {
		CleanupByDevice: []lcmv1alpha1.DeviceCleanupSpec{
			{
				Device: "vdd",
			},
		},
	},
}
View Source
var RequestRemoveByOsdID = map[string]lcmv1alpha1.NodeCleanUpSpec{
	"node-1": {
		CleanupByOsd: []lcmv1alpha1.OsdCleanupSpec{
			{ID: 20}, {ID: 30},
		},
	},
	"node-2": {
		CleanupByOsd: []lcmv1alpha1.OsdCleanupSpec{
			{ID: 4}, {ID: 88},
		},
	},
}
View Source
var RequestRemoveFullNodeRemove = map[string]lcmv1alpha1.NodeCleanUpSpec{
	"node-1": {
		CompleteCleanup: true,
	},
	"node-2": {
		DropFromCrush: true,
	},
}
View Source
var ResourceListLimitsDefault = corev1.ResourceList{
	corev1.ResourceCPU:    resource.MustParse("200m"),
	corev1.ResourceMemory: resource.MustParse("256Mi"),
}
View Source
var ResourceListRequestsDefault = corev1.ResourceList{
	corev1.ResourceMemory: resource.MustParse("128Mi"),
	corev1.ResourceCPU:    resource.MustParse("100m"),
}
View Source
var RgwCaCert = `` /* 1784-byte string literal not displayed */
View Source
var RgwCacertExpired = `` /* 1784-byte string literal not displayed */
View Source
var RgwCeilometerUser = cephv1.CephObjectStoreUser{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "rgw-ceilometer",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.ObjectStoreUserSpec{
		Store:       "rgw-store",
		DisplayName: "rgw-ceilometer",
		Capabilities: &cephv1.ObjectUserCapSpec{
			User:     "read",
			Bucket:   "read",
			MetaData: "read",
			Usage:    "read",
		},
	},
}
View Source
var RgwCert = `` /* 3242-byte string literal not displayed */
View Source
var RgwExternalService = func() corev1.Service {
	svc := RgwExternalServiceGenerated.DeepCopy()
	svc.Status = corev1.ServiceStatus{
		LoadBalancer: corev1.LoadBalancerStatus{
			Ingress: []corev1.LoadBalancerIngress{
				{
					IP: "192.168.100.150",
				},
			},
		},
	}
	return *svc
}()
View Source
var RgwExternalServiceGenerated = corev1.Service{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "rook-ceph-rgw-rgw-store-external",
		Namespace: "rook-ceph",
		Labels: map[string]string{
			"app":               "rook-ceph-rgw",
			"rook_object_store": "rgw-store",
			"external_access":   "rgw",
		},
	},
	Spec: corev1.ServiceSpec{
		Ports: []corev1.ServicePort{
			{
				Name:     "http",
				Port:     80,
				Protocol: "TCP",
				TargetPort: intstr.IntOrString{
					Type:   intstr.Int,
					IntVal: 80,
				},
			},
			{
				Name:     "https",
				Port:     443,
				Protocol: "TCP",
				TargetPort: intstr.IntOrString{
					Type:   intstr.Int,
					IntVal: 8443,
				},
			},
		},
		Type:            "LoadBalancer",
		SessionAffinity: "None",
		Selector: map[string]string{
			"app":               "rook-ceph-rgw",
			"rook_cluster":      "rook-ceph",
			"rook_object_store": "rgw-store",
		},
	},
}
View Source
var RgwExternalSslEnabled = cephlcmv1alpha1.CephRGW{
	Name: "rgw-store",
	Gateway: cephlcmv1alpha1.CephRGWGateway{
		Instances:  2,
		Port:       80,
		SecurePort: 8443,
		ExternalRgwEndpoint: &cephv1.EndpointAddress{
			IP:       "127.0.0.1",
			Hostname: "fake-1",
		},
	},
}
View Source
var RgwIngress = networkingv1.Ingress{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "rook-ceph-rgw-rgw-store-ingress",
		Namespace: "rook-ceph",
		Labels: map[string]string{
			"ingress-type": "openstack-ingress-nginx-rgw",
			"cephdeployment.lcm.mirantis.com/ingress": "ceph-object-store-ingress",
			"app":               "rook-ceph-rgw",
			"rook_object_store": "rgw-store",
			"external_access":   "rgw",
		},
		Annotations: map[string]string{
			"nginx.ingress.kubernetes.io/proxy-body-size": "0",
			"nginx.ingress.kubernetes.io/rewrite-target":  "/",
			"nginx.ingress.kubernetes.io/upstream-vhost":  "rgw-store.example.com",
			"kubernetes.io/ingress.class":                 "openstack-ingress-nginx",
		},
	},
	Spec: networkingv1.IngressSpec{
		Rules: []networkingv1.IngressRule{
			{
				Host: "rgw-store.example.com",
				IngressRuleValue: networkingv1.IngressRuleValue{
					HTTP: &networkingv1.HTTPIngressRuleValue{
						Paths: []networkingv1.HTTPIngressPath{
							{
								Backend: networkingv1.IngressBackend{
									Service: &networkingv1.IngressServiceBackend{
										Name: "rook-ceph-rgw-rgw-store",
										Port: networkingv1.ServiceBackendPort{
											Name: "http",
										},
									},
								},
								Path:     "/",
								PathType: &IngressPathType,
							},
						},
					},
				},
			},
		},
		TLS: []networkingv1.IngressTLS{
			{
				Hosts:      []string{"rgw-store.example.com"},
				SecretName: "rgw-store-ingress-secret",
			},
		},
	},
}
View Source
var RgwMultisiteMasterPullRealm1 = cephv1.CephObjectRealm{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "realm1",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.ObjectRealmSpec{
		DefaultRealm: true,
		Pull: cephv1.PullSpec{
			Endpoint: "http://10.10.0.1",
		},
	},
}
View Source
var RgwMultisiteMasterRealm1 = cephv1.CephObjectRealm{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "realm1",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.ObjectRealmSpec{
		DefaultRealm: true,
	},
}
View Source
var RgwMultisiteMasterZone1 = cephv1.CephObjectZone{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "zone1",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.ObjectZoneSpec{
		ZoneGroup: "zonegroup1",
		DataPool: cephv1.PoolSpec{
			DeviceClass:   "hdd",
			FailureDomain: "host",
			ErasureCoded: cephv1.ErasureCodedSpec{
				CodingChunks: 2,
				DataChunks:   1,
			},
		},
		MetadataPool: cephv1.PoolSpec{
			DeviceClass:   "hdd",
			FailureDomain: "host",
			Replicated: cephv1.ReplicatedSpec{
				Size: 3,
			},
		},
	},
}
View Source
var RgwMultisiteMasterZoneGroup1 = cephv1.CephObjectZoneGroup{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "zonegroup1",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.ObjectZoneGroupSpec{
		Realm: "realm1",
	},
}
View Source
var RgwMultisiteSecondaryZone1 = cephv1.CephObjectZone{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "secondary-zone1",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.ObjectZoneSpec{
		ZoneGroup: "zonegroup1",
		DataPool: cephv1.PoolSpec{
			DeviceClass:   "hdd",
			FailureDomain: "host",
			ErasureCoded: cephv1.ErasureCodedSpec{
				CodingChunks: 2,
				DataChunks:   1,
			},
		},
		MetadataPool: cephv1.PoolSpec{
			DeviceClass:   "hdd",
			FailureDomain: "host",
			Replicated: cephv1.ReplicatedSpec{
				Size: 3,
			},
		},
	},
}
View Source
var RgwOpenstackIngress = func(host string) *networkingv1.Ingress {
	ingress := RgwIngress.DeepCopy()
	ingress.Annotations["nginx.ingress.kubernetes.io/upstream-vhost"] = host
	ingress.Spec.Rules[0].Host = host
	ingress.Spec.TLS[0].Hosts = []string{host}
	return ingress
}
View Source
var RgwSSLCertExpiredSecret = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "rgw-ssl-certificate",
	},
	Data: map[string][]byte{
		"cert":     []byte(RgwCert),
		"cacert":   []byte(RgwCacertExpired),
		"cabundle": []byte(RgwCaCert + "\n"),
	},
}
View Source
var RgwSSLCertSecret = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "rgw-ssl-certificate",
	},
	Data: map[string][]byte{
		"cert":     []byte(RgwCert),
		"cacert":   []byte(RgwCaCert),
		"cabundle": []byte(RgwCaCert + "\n"),
	},
}
View Source
var RgwSSLCertSecretSelfSigned = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "rgw-ssl-certificate",
	},
	Data: map[string][]byte{
		"cert":     []byte("fake-keyfake-crtfake-ca"),
		"cacert":   []byte("fake-ca"),
		"cabundle": []byte("fake-ca\n"),
	},
}
View Source
var RgwStorageClass = storagev1.StorageClass{
	ObjectMeta: metav1.ObjectMeta{
		Name: "rgw-storage-class",
	},
	Provisioner: "rook-ceph.ceph.rook.io/bucket",
	Parameters: map[string]string{
		"objectStoreName":      "rgw-store",
		"objectStoreNamespace": "rook-ceph",
		"region":               "rgw-store",
	},
}
View Source
var RgwUserBase = GetCephRgwUser("test-user", "rook-ceph", "rgw-store")
View Source
var RgwUserWithCapsAndQuotas = cephv1.CephObjectStoreUser{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "test-user",
		Namespace: "rook-ceph",
	},
	Spec: cephv1.ObjectStoreUserSpec{
		Store:       "rgw-store",
		DisplayName: "test-user",
		Capabilities: &cephv1.ObjectUserCapSpec{
			User: "*",
		},
		Quotas: &cephv1.ObjectUserQuotaSpec{
			MaxBuckets: &[]int{1}[0],
		},
	},
}
View Source
var RoleLabelsNode = GetNodeWithLabels("node1", map[string]string{
	"ceph_role_mon": "true",
	"ceph_role_mgr": "true",
	"ceph_role_osd": "true",
	"ceph_role_rgw": "true",
	"ceph_role_mds": "true",
}, nil)
View Source
var RolesTopologyLabelsNode = GetNodeWithLabels("node1", map[string]string{
	"ceph_role_mon":                 "true",
	"ceph_role_mgr":                 "true",
	"ceph_role_osd":                 "true",
	"ceph_role_rgw":                 "true",
	"ceph_role_mds":                 "true",
	"topology.kubernetes.io/region": "region1",
	"cephdpl-prev-topology.kubernetes.io/region": "region1",
	"topology.kubernetes.io/zone":                "zone1",
	"cephdpl-prev-topology.kubernetes.io/zone":   "zone1",
	"topology.rook.io/rack":                      "rack1",
}, nil)
View Source
var RookCephMonEndpoints = corev1.ConfigMap{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "rook-ceph-mon-endpoints",
	},
	Data: map[string]string{
		"data": "a=127.0.0.1,b=127.0.0.2,c=127.0.0.3",
	},
}
View Source
var RookCephMonEndpointsExternal = corev1.ConfigMap{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "rook-ceph-mon-endpoints",
	},
	Data: map[string]string{
		"data":     "cmn01=10.0.0.1:6969,cmn02=10.0.0.2:6969,cmn03=10.0.0.3:6969",
		"mapping":  "{}",
		"maxMonId": "3",
	},
}
View Source
var RookCephMonSecret = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "rook-ceph-mon",
		Namespace: "rook-ceph",
	},
	Data: map[string][]byte{
		"cluster-name":  []byte("rook-ceph"),
		"fsid":          []byte("8668f062-3faa-358a-85f3-f80fe6c1e306"),
		"admin-secret":  []byte("AQAcpuJiITYXMhAAXaOoAqOKJ4mhNOAqxFb1Hw=="),
		"ceph-username": []byte("client.admin"),
		"ceph-secret":   []byte("AQAcpuJiITYXMhAAXaOoAqOKJ4mhNOAqxFb1Hw=="),
		"mon-secret":    []byte("mon-secret"),
		"ceph-args":     []byte(""),
	},
}
View Source
var RookCephMonSecretNonAdmin = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "rook-ceph-mon",
		Namespace: "rook-ceph",
	},
	Data: map[string][]byte{
		"cluster-name":  []byte("rook-ceph"),
		"fsid":          []byte("8668f062-3faa-358a-85f3-f80fe6c1e306"),
		"admin-secret":  []byte("admin-secret"),
		"ceph-args":     []byte("-n client.test"),
		"ceph-username": []byte("client.test"),
		"ceph-secret":   []byte("AQAcpuJiITYXMhAAXaOoAqOKJ4mhNOAqxFb1Hw=="),
		"mon-secret":    []byte("mon-secret"),
	},
}
View Source
var RookCephObjectsReportOnlyCephCluster = &lcmv1alpha1.RookCephObjectsStatus{
	CephCluster: &ReefCephClusterReady.Status,
}
View Source
var RookCephObjectsReportReadyFull = &lcmv1alpha1.RookCephObjectsStatus{
	CephCluster: &ReefCephClusterReady.Status,
	BlockStorage: &lcmv1alpha1.BlockStorageStatus{
		CephBlockPools: map[string]*cephv1.CephBlockPoolStatus{
			"pool1": CephBlockPoolListReady.Items[0].Status,
			"pool2": CephBlockPoolListReady.Items[1].Status,
		},
	},
	CephClients: map[string]*cephv1.CephClientStatus{
		"client1": CephClientListReady.Items[0].Status,
		"client2": CephClientListReady.Items[1].Status,
	},
	ObjectStorage: &lcmv1alpha1.ObjectStorageStatus{
		CephObjectStores: map[string]*cephv1.ObjectStoreStatus{
			"rgw-store":      CephObjectStoresMultisiteSyncDaemonPhaseReady.Items[0].Status,
			"rgw-store-sync": CephObjectStoresMultisiteSyncDaemonPhaseReady.Items[1].Status,
		},
		CephObjectStoreUsers: map[string]*cephv1.ObjectStoreUserStatus{
			"rgw-user-1": CephObjectStoreUserListReady.Items[0].Status,
			"rgw-user-2": CephObjectStoreUserListReady.Items[1].Status,
		},
		ObjectBucketClaims: map[string]bktv1alpha1.ObjectBucketClaimStatus{
			"bucket-1": ObjectBucketClaimListReady.Items[0].Status,
		},
		CephObjectRealms: map[string]*cephv1.Status{
			"realm-1": CephObjectRealmListReady.Items[0].Status,
		},
		CephObjectZoneGroups: map[string]*cephv1.Status{
			"zonegroup-1": CephObjectZoneGroupListReady.Items[0].Status,
		},
		CephObjectZones: map[string]*cephv1.Status{
			"zone-1": CephObjectZoneListReady.Items[0].Status,
		},
	},
	SharedFilesystem: &lcmv1alpha1.SharedFilesystemStatus{
		CephFilesystems: map[string]*cephv1.CephFilesystemStatus{
			"cephfs-1": CephFilesystemListMultipleReady.Items[0].Status,
			"cephfs-2": CephFilesystemListMultipleReady.Items[1].Status,
		},
	},
}
View Source
var RookCephObjectsReportReadyOnlyCephCluster = &lcmv1alpha1.RookCephObjectsStatus{
	CephCluster: &ReefCephClusterReady.Status,
	BlockStorage: &lcmv1alpha1.BlockStorageStatus{
		CephBlockPools: map[string]*cephv1.CephBlockPoolStatus{
			"pool1": CephBlockPoolListNotReady.Items[0].Status,
			"pool2": nil,
		},
	},
	CephClients: map[string]*cephv1.CephClientStatus{
		"client1": CephClientListNotReady.Items[0].Status,
		"client2": nil,
	},
	ObjectStorage: &lcmv1alpha1.ObjectStorageStatus{
		CephObjectStores: map[string]*cephv1.ObjectStoreStatus{
			"rgw-store":      CephObjectStoresMultisiteSyncDaemonPhaseNotReady.Items[0].Status,
			"rgw-store-sync": nil,
		},
		CephObjectStoreUsers: map[string]*cephv1.ObjectStoreUserStatus{
			"rgw-user-1": CephObjectStoreUserListNotReady.Items[0].Status,
			"rgw-user-2": nil,
		},
		ObjectBucketClaims: map[string]bktv1alpha1.ObjectBucketClaimStatus{
			"bucket-1": ObjectBucketClaimListNotReady.Items[0].Status,
		},
		CephObjectRealms: map[string]*cephv1.Status{
			"realm-1": CephObjectRealmListNotReady.Items[0].Status,
			"realm-2": nil,
		},
		CephObjectZoneGroups: map[string]*cephv1.Status{
			"zonegroup-1": CephObjectZoneGroupListNotReady.Items[0].Status,
			"zonegroup-2": nil,
		},
		CephObjectZones: map[string]*cephv1.Status{
			"zone-1": CephObjectZoneListNotReady.Items[0].Status,
			"zone-2": nil,
		},
	},
	SharedFilesystem: &lcmv1alpha1.SharedFilesystemStatus{
		CephFilesystems: map[string]*cephv1.CephFilesystemStatus{
			"cephfs-1": CephFilesystemListMultipleNotReady.Items[0].Status,
			"cephfs-2": nil,
		},
	},
}
View Source
var RookCephRgwAdminSecret = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Name:      "rgw-admin-ops-user",
		Namespace: "rook-ceph",
	},
	Data: map[string][]byte{
		"accessKey": []byte("5TABLO7H0I6BTW6N25X5"),
		"secretKey": []byte("Wd8SDDrtyyAuiD1klOGn9vJqOJh5dOSVlJ6kir9Q"),
	},
}
View Source
var RookCephRgwMetricsSecret = corev1.Secret{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "rgw-metrics-user-secret",
	},
	Data: map[string][]byte{
		"AccessKey": []byte("metrics-user-access-key"),
		"SecretKey": []byte("metrics-user-secret-key"),
	},
}
View Source
var RookDeploymentLatestVersion = GetRookDeployment(PelagiaConfig.Data["DEPLOYMENT_ROOK_IMAGE"], 1, 1)
View Source
var RookDeploymentNotScaled = GetRookDeployment(PelagiaConfig.Data["DEPLOYMENT_ROOK_IMAGE"], 0, 0)
View Source
var RookDeploymentPrevVersion = GetRookDeployment(PelagiaConfigForPrevCephVersion.Data["DEPLOYMENT_ROOK_IMAGE"], 1, 1)
View Source
var RookDiscover = appsv1.DaemonSet{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "rook-discover",
	},
	Spec: appsv1.DaemonSetSpec{
		Template: corev1.PodTemplateSpec{
			Spec: corev1.PodSpec{
				Containers: []corev1.Container{
					{
						Image: PelagiaConfig.Data["DEPLOYMENT_ROOK_IMAGE"],
					},
				},
			},
		},
	},
	Status: appsv1.DaemonSetStatus{
		NumberReady:            1,
		CurrentNumberScheduled: 1,
		DesiredNumberScheduled: 1,
		NumberAvailable:        1,
		UpdatedNumberScheduled: 1,
	},
}
View Source
var RookNamespace = "rook-ceph"
View Source
var RookOperatorConfigMapBase = RookOperatorConfig(nil)
View Source
var RookOperatorStatusFailed = lcmv1alpha1.DaemonStatus{
	Status: lcmv1alpha1.DaemonStateFailed,
	Issues: []string{"failed to get 'rook-ceph-operator' deployment in 'rook-ceph' namespace"},
}
View Source
var RookOperatorStatusOk = lcmv1alpha1.DaemonStatus{
	Status: lcmv1alpha1.DaemonStateOk,
}
View Source
var SecretsListEmpty = corev1.SecretList{Items: []corev1.Secret{}}
View Source
var ServicesListEmpty = corev1.ServiceList{Items: []corev1.Service{}}
View Source
var ServicesListRgwExternal = corev1.ServiceList{Items: []corev1.Service{RgwExternalService}}
View Source
var SkipCleanupJobRemoveMap = &lcmv1alpha1.TaskRemoveInfo{
	CleanupMap: map[string]lcmv1alpha1.HostMapping{
		"node-1": {
			OsdMapping: map[string]lcmv1alpha1.OsdMapping{
				"30": {
					UUID:                 "f4edb5cd-fb1e-4620-9419-3f9a4fcecba5",
					ClusterFSID:          "8668f062-3faa-358a-85f3-f80fe6c1e306",
					HostDirectory:        "/var/lib/rook/rook-ceph/8668f062-3faa-358a-85f3-f80fe6c1e306_f4edb5cd-fb1e-4620-9419-3f9a4fcecba5",
					SkipDeviceCleanupJob: true,
					InCrushMap:           true,
					DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
						"/dev/vda": {
							ID:         "8dad5ae9-ddf7-40bf-8",
							Rotational: true,
							Path:       "/dev/disk/by-path/pci-0000:00:09.0",
							Partition:  "/dev/vda14",
							Type:       "db",
							Alive:      true,
						},
						"/dev/vdb": {
							ID:         "996ea59f-7f47-4fac-b",
							Rotational: true,
							Path:       "/dev/disk/by-path/pci-0000:00:0a.0",
							Partition:  "/dev/ceph-992bbd78-3d8e-4cc3-93dc-eae387309364/osd-block-f4edb5cd-fb1e-4620-9419-3f9a4fcecba5",
							Type:       "block",
							Alive:      true,
						},
					},
				},
			},
		},
		"node-2": {
			OsdMapping: map[string]lcmv1alpha1.OsdMapping{
				"4": {
					UUID:          "ad76cf53-5cb5-48fe-a39a-343734f5ccde",
					ClusterFSID:   "8668f062-3faa-358a-85f3-f80fe6c1e306",
					InCrushMap:    true,
					HostDirectory: "/var/lib/rook/rook-ceph/8668f062-3faa-358a-85f3-f80fe6c1e306_ad76cf53-5cb5-48fe-a39a-343734f5ccde",
					DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
						"/dev/vdd": {
							ID:         "35a15532-8b56-4f83-9",
							Rotational: false,
							Path:       "/dev/disk/by-path/pci-0000:00:1e.0",
							Partition:  "/dev/ceph-dada9f25-41b4-4c26-9a20-448ac01e1d06/osd-block-ad76cf53-5cb5-48fe-a39a-343734f5ccde",
							Type:       "block",
							Alive:      true,
							Zap:        true,
						},
					},
				},
				"5": {
					UUID:                 "af39b794-e1c6-41c0-8997-d6b6c631b8f2",
					ClusterFSID:          "8668f062-3faa-358a-85f3-f80fe6c1e306",
					HostDirectory:        "/var/lib/rook/rook-ceph/8668f062-3faa-358a-85f3-f80fe6c1e306_af39b794-e1c6-41c0-8997-d6b6c631b8f2",
					SkipDeviceCleanupJob: true,
					InCrushMap:           true,
					DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
						"/dev/vdd": {
							ID:         "35a15532-8b56-4f83-9",
							Rotational: false,
							Path:       "/dev/disk/by-path/pci-0000:00:1e.0",
							Partition:  "/dev/ceph-dada9f25-41b4-4c26-9a20-448ac01e1d06/osd-block-7d09cceb-4de0-478e-9d8d-bd09cb0c904e",
							Type:       "block",
							Alive:      true,
						},
					},
				},
			},
		},
	},
	Issues: []string{},
	Warnings: []string{
		"[node 'node-1'] device 'vdb' has set 'skip device clean up' flag set in spec. Related osd deployment (osd id '30') should be removed manually as well",
		"[node 'node-1'] found physical osd db partition '/dev/vda14' for osd '30'",
		"[node 'node-2'] osd with id '5' has 'skip device clean up' flag set in spec. Related deployment should be removed manually as well",
	},
}
View Source
var StorageClassesListEmpty = storagev1.StorageClassList{
	Items: []storagev1.StorageClass{},
}
View Source
var StorageNodesForAnalysisNotAllSpecified = []cephv1.Node{
	{
		Name: "node-1",
		Selection: cephv1.Selection{
			Devices: []cephv1.Device{
				{
					Name: "vdb",
					Config: map[string]string{
						"deviceClass":    "hdd",
						"metadataDevice": "/dev/vda14",
					},
				},
				{
					Name: "vdf",
					Config: map[string]string{
						"deviceClass":    "hdd",
						"metadataDevice": "/dev/ceph-metadata/part-2",
					},
				},
			},
		},
	},
	{
		Name: "node-2",
		Selection: cephv1.Selection{
			Devices: []cephv1.Device{
				{
					Name: "vdb",
					Config: map[string]string{
						"deviceClass": "hdd",
					},
				},
			},
		},
	},
}
View Source
var StorageNodesForAnalysisOk = []cephv1.Node{
	{
		Name: "node-1",
		Selection: cephv1.Selection{
			Devices: []cephv1.Device{
				{
					Name: "vdb",
					Config: map[string]string{
						"deviceClass":    "hdd",
						"metadataDevice": "/dev/vda14",
					},
				},
				{
					FullPath: "/dev/disk/by-path/pci-0000:00:0f.0",
					Config: map[string]string{
						"deviceClass":    "hdd",
						"metadataDevice": "/dev/disk/by-id/virtio-e8d89e2f-ffc6-4988-9",
					},
				},
				{
					Name: "vdf",
					Config: map[string]string{
						"deviceClass":    "hdd",
						"metadataDevice": "/dev/ceph-metadata/part-2",
					},
				},
			},
		},
	},
	{
		Name: "node-2",
		Selection: cephv1.Selection{
			Devices: []cephv1.Device{
				{
					Name: "vdb",
					Config: map[string]string{
						"deviceClass": "hdd",
					},
				},
				{
					Name: "vdd",
					Config: map[string]string{
						"deviceClass":   "hdd",
						"osdsPerDevice": "2",
					},
				},
			},
		},
	},
}
View Source
var StorageNodesForRequestFiltered = []cephv1.Node{
	{
		Name: "node-1",
		Config: map[string]string{
			"metadataDevice": "vdd",
		},
		Selection: cephv1.Selection{
			DevicePathFilter: "/dev/vd[fe]",
		},
	},
	{
		Name: "node-2",
		Selection: cephv1.Selection{
			DeviceFilter: "vdb",
		},
	},
}
View Source
var StorageNodesForRequestReduced = []cephv1.Node{
	{
		Name: "node-2",
		Selection: cephv1.Selection{
			Devices: []cephv1.Device{
				{
					Name: "vdb",
					Config: map[string]string{
						"deviceClass": "hdd",
					},
				},
				{
					Name: "vdd",
					Config: map[string]string{
						"deviceClass":   "hdd",
						"osdsPerDevice": "2",
					},
				},
			},
		},
	},
}
View Source
var StrayOnNodeAndInCrushRemoveMap = &lcmv1alpha1.TaskRemoveInfo{
	CleanupMap: map[string]lcmv1alpha1.HostMapping{
		"node-2": {
			OsdMapping: map[string]lcmv1alpha1.OsdMapping{
				"0.06bf4d7c-9603-41a4-b250-284ecf3ecb2f.__stray": StrayOnlyOnNodeRemoveMap.CleanupMap["node-2"].OsdMapping["0.06bf4d7c-9603-41a4-b250-284ecf3ecb2f.__stray"],
				"2.61869d90-2c45-4f02-b7c3-96955f41e2ca.__stray": {
					UUID:          "61869d90-2c45-4f02-b7c3-96955f41e2ca",
					ClusterFSID:   "8668f062-3faa-358a-85f3-f80fe6c1e306",
					HostDirectory: "/var/lib/rook/rook-ceph/8668f062-3faa-358a-85f3-f80fe6c1e306_61869d90-2c45-4f02-b7c3-96955f41e2ca",
					InCrushMap:    true,
					DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
						"/dev/vde": {
							ID:         "8cbb9ce3-6fb4-4216-8",
							Rotational: true,
							Path:       "/dev/disk/by-path/pci-0000:00:0e.0",
							Partition:  "/dev/ceph-0e03d5c6-d0e9-4f04-b9af-38d15e14369f/osd-block-61869d90-2c45-4f02-b7c3-96955f41e2ca",
							Type:       "block",
							Zap:        true,
							Alive:      true,
						},
					},
				},
			},
		},
	},
	Issues: []string{},
	Warnings: []string{
		"[node 'node-2'] found partition with stray osd uuid '06bf4d7c-9603-41a4-b250-284ecf3ecb2f', id '0', will be cleaned up",
		"[node 'node-2'] found partition with stray osd uuid '61869d90-2c45-4f02-b7c3-96955f41e2ca', id '2', will be cleaned up",
	},
}
View Source
var StrayOnlyInCrushRemoveMap = &lcmv1alpha1.TaskRemoveInfo{
	CleanupMap: map[string]lcmv1alpha1.HostMapping{
		"__stray": {
			OsdMapping: map[string]lcmv1alpha1.OsdMapping{
				"2": {
					UUID:        "61869d90-2c45-4f02-b7c3-96955f41e2ca",
					ClusterFSID: "8668f062-3faa-358a-85f3-f80fe6c1e306",
					InCrushMap:  true,
				},
			},
		},
	},
	Issues: []string{},
	Warnings: []string{
		"[stray] detected stray osds, but impossible to determine related host/device (probably disk(s) removed or host(s) down), device cleanup jobs will be skipped",
	},
}
View Source
var StrayOnlyOnNodeRemoveMap = &lcmv1alpha1.TaskRemoveInfo{
	CleanupMap: map[string]lcmv1alpha1.HostMapping{
		"node-2": {
			OsdMapping: map[string]lcmv1alpha1.OsdMapping{
				"0.06bf4d7c-9603-41a4-b250-284ecf3ecb2f.__stray": {
					UUID:          "06bf4d7c-9603-41a4-b250-284ecf3ecb2f",
					ClusterFSID:   "8668f062-0lsk-358a-1gt4-f80fe6c1e306",
					HostDirectory: "/var/lib/rook/rook-ceph/8668f062-0lsk-358a-1gt4-f80fe6c1e306_06bf4d7c-9603-41a4-b250-284ecf3ecb2f",
					InCrushMap:    false,
					DeviceMapping: map[string]lcmv1alpha1.DeviceInfo{
						"/dev/vdc": {
							ID:         "ffe08946-7614-4f69-b",
							Rotational: true,
							Path:       "/dev/disk/by-path/pci-0000:00:0c.0",
							Partition:  "/dev/ceph-c5628abe-ae41-4c3d-bdc6-ef86c54bf78c/osd-block-69481cd1-38b1-42fd-ac07-06bf4d7c0e19",
							Type:       "block",
							Zap:        true,
							Alive:      true,
						},
					},
				},
			},
		},
	},
	Issues: []string{},
	Warnings: []string{
		"[node 'node-2'] found partition with stray osd uuid '06bf4d7c-9603-41a4-b250-284ecf3ecb2f', id '0', will be cleaned up",
	},
}
View Source
var TestCephClient = cephv1.CephClient{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "test",
	},
	Spec: cephv1.ClientSpec{
		Name: "test",
		Caps: map[string]string{
			"osd": "custom-caps",
		},
	},
}
View Source
var TestCephClientReady = func() cephv1.CephClient {
	c := GetCephClientWithStatus(TestCephClient, true)
	c.Status.Info = map[string]string{"secretName": "rook-ceph-client-test"}
	return *c
}()
View Source
var TestCephCluster = cephv1.CephCluster{
	ObjectMeta: metav1.ObjectMeta{
		Name:      LcmObjectMeta.Name,
		Namespace: RookNamespace,
	},
	Spec: cephv1.ClusterSpec{
		CephVersion: CephClusterGenerated.Spec.CephVersion,
		ContinueUpgradeAfterChecksEvenIfNotHealthy: true,
		DataDirHostPath: "/var/lib/rook",
		Mon:             CephClusterGenerated.Spec.Mon,
		Mgr:             CephClusterGenerated.Spec.Mgr,
		Network:         CephClusterGenerated.Spec.Network,
		Placement:       CephClusterGenerated.Spec.Placement,
		Storage: cephv1.StorageScopeSpec{
			Selection: cephv1.Selection{UseAllDevices: &[]bool{false}[0]},
			Nodes: []cephv1.Node{
				{
					Name: "node-1",
					Selection: cephv1.Selection{
						UseAllDevices: nil,
						DeviceFilter:  "",
						Devices: []cephv1.Device{
							{
								Name:     "sda",
								FullPath: "",
								Config:   map[string]string{"deviceClass": "hdd"},
							},
							{
								Name:     "sdb",
								FullPath: "",
								Config:   map[string]string{"deviceClass": "hdd"},
							},
						},
						DevicePathFilter:     "",
						VolumeClaimTemplates: nil,
					},
					Config: nil,
				},
				{
					Name: "node-2",
					Selection: cephv1.Selection{
						UseAllDevices: nil,
						DeviceFilter:  "",
						Devices: []cephv1.Device{
							{
								Name:     "sda",
								FullPath: "",
								Config:   map[string]string{"osdsPerDevice": "1", "deviceClass": "hdd"},
							},
							{
								Name:     "sdb",
								FullPath: "",
								Config:   map[string]string{"osdsPerDevice": "2", "deviceClass": "hdd"},
							},
							{
								Name:     "sdc",
								FullPath: "",
								Config:   map[string]string{"metadataDevice": "sde", "deviceClass": "hdd"},
							},
						},
						DevicePathFilter:     "",
						VolumeClaimTemplates: nil,
					},
					Config: nil,
				},
				{
					Name: "node-3",
					Selection: cephv1.Selection{
						UseAllDevices: nil,
						DeviceFilter:  "",
						Devices: []cephv1.Device{
							{
								Name:     "sda",
								FullPath: "",
								Config:   map[string]string{"deviceClass": "hdd"},
							},
						},
						DevicePathFilter:     "",
						VolumeClaimTemplates: nil,
					},
					Config: map[string]string{"osdsPerDevice": "2"},
				},
			},
		},
		SkipUpgradeChecks: true,
		HealthCheck:       CephClusterGenerated.Spec.HealthCheck,
	},
}
View Source
var TestCephFs = cephv1.CephFilesystem{
	ObjectMeta: metav1.ObjectMeta{
		Namespace: "rook-ceph",
		Name:      "test-cephfs",
	},
	Spec: cephv1.FilesystemSpec{
		MetadataPool: cephv1.NamedPoolSpec{
			PoolSpec: cephv1.PoolSpec{
				DeviceClass: "hdd",
				Replicated: cephv1.ReplicatedSpec{
					Size: 3,
				},
			},
		},
		DataPools: []cephv1.NamedPoolSpec{
			{
				Name: "some-pool-name",
				PoolSpec: cephv1.PoolSpec{
					DeviceClass: "hdd",
					Replicated: cephv1.ReplicatedSpec{
						Size: 3,
					},
				},
			},
		},
		MetadataServer: cephv1.MetadataServerSpec{
			Annotations: map[string]string{
				"cephdeployment.lcm.mirantis.com/config-global-updated": "some-time",
				"cephdeployment.lcm.mirantis.com/config-mds-updated":    "some-time",
			},
			ActiveCount:   1,
			ActiveStandby: true,
			Placement: cephv1.Placement{
				NodeAffinity: &corev1.NodeAffinity{
					RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
						NodeSelectorTerms: []corev1.NodeSelectorTerm{
							{
								MatchExpressions: []corev1.NodeSelectorRequirement{
									{
										Key:      "ceph_role_mds",
										Operator: "In",
										Values: []string{
											"true",
										},
									},
								},
							},
						},
					},
				},
				PodAntiAffinity: &corev1.PodAntiAffinity{
					RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
						{
							LabelSelector: &metav1.LabelSelector{
								MatchExpressions: []metav1.LabelSelectorRequirement{
									{
										Key:      "rook_file_system",
										Operator: "In",
										Values: []string{
											"test-cephfs",
										},
									},
								},
							},
							TopologyKey: "kubernetes.io/hostname",
						},
					},
				},
				Tolerations: []corev1.Toleration{
					{
						Key:      "ceph_role_mds",
						Operator: "Exists",
					},
				},
			},
			LivenessProbe: &cephv1.ProbeSpec{
				Probe: &corev1.Probe{
					TimeoutSeconds:   5,
					FailureThreshold: 5,
				},
			},
		},
	},
}
View Source
var TestCephFsWithTolerationsAndResources = func() cephv1.CephFilesystem {
	fs := TestCephFs.DeepCopy()
	fs.Spec.MetadataServer.Annotations["cephdeployment.lcm.mirantis.com/config-mds.test-cephfs-updated"] = "some-time"
	fs.Spec.MetadataServer.Placement.Tolerations = append(fs.Spec.MetadataServer.Placement.Tolerations, corev1.Toleration{
		Key:      "test.kubernetes.io/testkey",
		Effect:   "Schedule",
		Operator: "Exists",
	})
	fs.Spec.MetadataServer.Resources = corev1.ResourceRequirements{
		Limits: corev1.ResourceList{
			corev1.ResourceCPU:    resource.MustParse("100m"),
			corev1.ResourceMemory: resource.MustParse("156Mi"),
		},
		Requests: corev1.ResourceList{
			corev1.ResourceMemory: resource.MustParse("28Mi"),
			corev1.ResourceCPU:    resource.MustParse("10m"),
		},
	}
	return *fs
}()
View Source
var ToolBoxAndDiskDaemonPodsList = &corev1.PodList{
	Items: []corev1.Pod{
		GetReadySimplePod("pelagia-disk-daemon", RookNamespace, map[string]string{"app": "pelagia-disk-daemon"}),
		GetReadySimplePod("pelagia-ceph-toolbox", LcmObjectMeta.Namespace, map[string]string{"app": "pelagia-ceph-toolbox"}),
	},
}
View Source
var ToolBoxDeploymentBase = GetToolBoxDeployment(false)
View Source
var ToolBoxDeploymentExternal = GetToolBoxDeployment(true)
View Source
var ToolBoxDeploymentReady = func() *appsv1.Deployment {
	tb := ToolBoxDeploymentBase.DeepCopy()
	tb.Status = appsv1.DeploymentStatus{
		Replicas:          1,
		UpdatedReplicas:   1,
		ReadyReplicas:     1,
		AvailableReplicas: 1,
	}
	return tb
}()
View Source
var ToolBoxDeploymentWithRgwSecret = func() *appsv1.Deployment {
	deploy := GetToolBoxDeployment(false)
	deploy.Spec.Template.Annotations = map[string]string{"rgw-ssl-certificate/sha256": "c448d82eeaebb5ab538f49a14a57ec788abffd242b43f8eba7b757a22c555005"}
	deploy.Spec.Template.Spec.InitContainers = []corev1.Container{
		{
			Name:    "cabundle-update",
			Image:   deploy.Spec.Template.Spec.Containers[0].Image,
			Command: []string{"/bin/bash", "-c"},
			Args:    []string{"/usr/bin/update-ca-trust extract; cp -rf /etc/pki/ca-trust/extracted//* /tmp/new-ca-bundle/"},
			SecurityContext: &corev1.SecurityContext{
				Capabilities:             &corev1.Capabilities{Drop: []corev1.Capability{"ALL"}},
				RunAsUser:                &[]int64{0}[0],
				RunAsGroup:               &[]int64{0}[0],
				Privileged:               &[]bool{false}[0],
				AllowPrivilegeEscalation: &[]bool{false}[0],
			},
			TerminationMessagePath:   "/dev/termination-log",
			TerminationMessagePolicy: "File",
			ImagePullPolicy:          "IfNotPresent",
			VolumeMounts: []corev1.VolumeMount{
				{
					Name:      "cabundle-secret",
					MountPath: "/etc/pki/ca-trust/source/anchors/",
					ReadOnly:  true,
				},
				{
					Name:      "cabundle-updated",
					MountPath: "/tmp/new-ca-bundle/",
				},
			},
		},
	}
	deploy.Spec.Template.Spec.Containers[0].VolumeMounts = append(deploy.Spec.Template.Spec.Containers[0].VolumeMounts,
		corev1.VolumeMount{
			Name:      "cabundle-updated",
			MountPath: "/etc/pki/ca-trust/extracted/",
			ReadOnly:  true,
		})
	deploy.Spec.Template.Spec.Volumes = append(deploy.Spec.Template.Spec.Volumes,
		corev1.Volume{
			Name: "cabundle-updated",
			VolumeSource: corev1.VolumeSource{
				EmptyDir: &corev1.EmptyDirVolumeSource{},
			},
		})
	deploy.Spec.Template.Spec.Volumes = append(deploy.Spec.Template.Spec.Volumes,
		corev1.Volume{
			Name: "cabundle-secret",
			VolumeSource: corev1.VolumeSource{
				Secret: &corev1.SecretVolumeSource{
					SecretName:  "rgw-ssl-certificate",
					DefaultMode: &[]int32{420}[0],
					Items: []corev1.KeyToPath{
						{
							Key:  "cabundle",
							Path: "rgw-ssl-certificate.crt",
							Mode: &[]int32{256}[0],
						},
					},
				},
			},
		})
	return deploy
}()
View Source
var ToolBoxPodList = &corev1.PodList{
	Items: []corev1.Pod{
		GetReadySimplePod("pelagia-ceph-toolbox", RookNamespace, map[string]string{"app": "pelagia-ceph-toolbox"}),
	},
}
View Source
var TopologyLabelsNode = GetNodeWithLabels("node1", map[string]string{
	"topology.kubernetes.io/region":              "region1",
	"cephdpl-prev-topology.kubernetes.io/region": "region1",
	"topology.kubernetes.io/zone":                "zone1",
	"cephdpl-prev-topology.kubernetes.io/zone":   "zone1",
	"topology.rook.io/rack":                      "rack1",
}, nil)
View Source
var TopologyLabelsNodeNoRoles = GetNodeWithLabels("node1", map[string]string{
	"topology.kubernetes.io/region": "region1",
	"topology.kubernetes.io/zone":   "zone1",
	"topology.rook.io/rack":         "rack1",
}, nil)
View Source
var TopologyLabelsNodeOrigRoles = GetNodeWithLabels("node1", map[string]string{
	"topology.kubernetes.io/region":              "region1",
	"cephdpl-prev-topology.kubernetes.io/region": "orig-region",
	"topology.kubernetes.io/zone":                "zone1",
	"cephdpl-prev-topology.kubernetes.io/zone":   "orig-zone",
	"topology.rook.io/rack":                      "rack1",
}, nil)
View Source
var TrueVarForPointer = true

Functions

func AdaptDeviceMapping

func AdaptDeviceMapping(node, osdID string, devConfig map[string]map[string]bool) map[string]lcmv1alpha1.DeviceInfo

func AdaptOsdMapping

func AdaptOsdMapping(node, osdID string, osdConfig map[string]bool, devConfig map[string]map[string]bool) lcmv1alpha1.OsdMapping

func BuildBaseCephCluster

func BuildBaseCephCluster(name, namespace string) cephv1.CephCluster

func BuildCliOutput

func BuildCliOutput(template string, cmd string, overrideForOutput map[string]string) string

func CephClusterOpenstack

func CephClusterOpenstack() *cephv1.CephCluster

func CephRBDMirrorWithStatus

func CephRBDMirrorWithStatus(mirror cephv1.CephRBDMirror, phase string) *cephv1.CephRBDMirror

func DaemonSetWithStatus

func DaemonSetWithStatus(namespace, name string, desiredReplicas, readyReplicas int32) *appsv1.DaemonSet

func DiskDaemonNodeReportWithStrayOkNode2

func DiskDaemonNodeReportWithStrayOkNode2(inCrush bool) *lcmcommon.DiskDaemonReport

func GetAbortedTask

func GetAbortedTask(taskToAbort lcmv1alpha1.CephOsdRemoveTask, time, reason string) *lcmv1alpha1.CephOsdRemoveTask

func GetAvailableNode

func GetAvailableNode(name string) corev1.Node

func GetCephBlockPoolWithStatus

func GetCephBlockPoolWithStatus(pool cephv1.CephBlockPool, ready bool) cephv1.CephBlockPool

func GetCephClientWithStatus

func GetCephClientWithStatus(client cephv1.CephClient, ready bool) *cephv1.CephClient

func GetCephDeployPool

func GetCephDeployPool(name string, role string) cephlcmv1alpha1.CephPool

func GetCephFsWithStatus

func GetCephFsWithStatus(condition cephv1.ConditionType) *cephv1.CephFilesystem

func GetCephRgwUser

func GetCephRgwUser(name, namespace, rgwName string) cephv1.CephObjectStoreUser

func GetCleanupJob

func GetCleanupJob(host, osd, longName string, devices map[string]string) *batch.Job

func GetCleanupJobOnlyStatus

func GetCleanupJobOnlyStatus(jobName, namespace string, active, failed, succeeded int32) *batch.Job

func GetConfigMap

func GetConfigMap(name, namespace string, params map[string]string) *corev1.ConfigMap

func GetDeployment

func GetDeployment(name, namespace string, labels map[string]string, replicas *int32) *appsv1.Deployment

func GetDiskDaemonReportToString

func GetDiskDaemonReportToString(report *lcmcommon.DiskDaemonReport) string

func GetExternalConnectionSecret

func GetExternalConnectionSecret(content []byte) corev1.Secret

func GetInfoWithStatus

func GetInfoWithStatus(sourceInfo *lcmv1alpha1.TaskRemoveInfo, statusMap map[string]*lcmv1alpha1.RemoveResult) *lcmv1alpha1.TaskRemoveInfo

func GetNamedStorageClass

func GetNamedStorageClass(pool string, external bool) *storagev1.StorageClass

func GetNetworkPolicy

func GetNetworkPolicy(appName string, ports []networkingv1.NetworkPolicyPort) networkingv1.NetworkPolicy

func GetNodeWithLabels

func GetNodeWithLabels(nodeName string, labels map[string]string, annotations map[string]string) corev1.Node

func GetNodesList

func GetNodesList(nodes []NodeAttrs) corev1.NodeList

func GetOpenstackDeploymentStatusList

func GetOpenstackDeploymentStatusList(release string, state string, correctOsdplStatus bool) *unstructured.UnstructuredList

func GetOpenstackPool

func GetOpenstackPool(name string, ready bool, targetRatio float64) cephv1.CephBlockPool

func GetOsdNodesList

func GetOsdNodesList(names []string) *corev1.NodeList

func GetReadySimplePod

func GetReadySimplePod(name, namespace string, labels map[string]string) corev1.Pod

func GetRookDeployment

func GetRookDeployment(image string, desiredReplicas, readyReplicas int32) *appsv1.Deployment

func GetSimpleBucket

func GetSimpleBucket(name string) *bktv1alpha1.ObjectBucketClaim

func GetToolBoxDeployment

func GetToolBoxDeployment(external bool) *appsv1.Deployment

func GetUpdatedClusterVersionCephDeploy

func GetUpdatedClusterVersionCephDeploy(cephDpl *cephlcmv1alpha1.CephDeployment, clusterVersion string) *cephlcmv1alpha1.CephDeployment

func RgwUserWithStatus

func RgwUserWithStatus(user cephv1.CephObjectStoreUser, phase string) *cephv1.CephObjectStoreUser

func RookOperatorConfig

func RookOperatorConfig(parameters map[string]string) *corev1.ConfigMap

Types

type NodeAttrs

type NodeAttrs struct {
	Name        string
	Labeled     bool
	Unreachable bool
}

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL