Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .secrets.baseline
Original file line number Diff line number Diff line change
Expand Up @@ -987,7 +987,7 @@
"hashed_secret": "f0e2d8610edefa0c02b673dcac7964b02ce3e890",
"is_secret": false,
"is_verified": false,
"line_number": 576,
"line_number": 582,
"type": "Basic Auth Credentials",
"verified_result": null
}
Expand Down
21 changes: 0 additions & 21 deletions ocs_ci/deployment/azure.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,16 +79,6 @@ def __init__(self):
self.name = self.__class__.__name__
super(AZUREIPI, self).__init__()
# Set custom storage class path for Azure Performance Plus feature
if config.ENV_DATA.get("azure_performance_plus") or config.DEPLOYMENT.get(
"azure_performance_plus"
):
self.custom_storage_class_path = os.path.join(
constants.TEMPLATE_DEPLOYMENT_DIR, "azure_storageclass_perfplus.yaml"
)
logger.info(
"Azure Performance Plus enabled. Will use custom storage class: %s",
self.custom_storage_class_path,
)

class OCPDeployment(IPIOCPDeployment):
def deploy_prereq(self):
Expand Down Expand Up @@ -218,17 +208,6 @@ def __init__(self):
self.name = self.__class__.__name__
self.azure_util = AzureAroUtil()
super(AZUREAroManaged, self).__init__()
# Set custom storage class path for Azure Performance Plus feature
if config.ENV_DATA.get("azure_performance_plus") or config.DEPLOYMENT.get(
"azure_performance_plus"
):
self.custom_storage_class_path = os.path.join(
constants.TEMPLATE_DEPLOYMENT_DIR, "azure_storageclass_perfplus.yaml"
)
logger.info(
"Azure Performance Plus enabled. Will use custom storage class: %s",
self.custom_storage_class_path,
)

def deploy_ocp(self, log_cli_level="DEBUG"):
"""
Expand Down
118 changes: 76 additions & 42 deletions ocs_ci/deployment/deployment.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,13 +207,46 @@ class Deployment(object):
"""

def __init__(self):
self.platform = config.ENV_DATA["platform"]
self.ocp_deployment_type = config.ENV_DATA["deployment_type"]
self.cluster_path = config.ENV_DATA["cluster_path"]
self.namespace = config.ENV_DATA["cluster_namespace"]
self.sts_role_arn = None
self.storage_class = storage_class.get_storageclass()
self.custom_storage_class_path = None
storage_class.set_custom_storage_class_path()
logger.info(
f"Deployment platform {self.platform} initiated with storage class: {self.storage_class}"
)

# Because of for different platform multicluster run, as by wrong design we define only one deployer
# We need use config to hold the storage class for each cluster.
# See issue: https://issues.redhat.com/browse/OCSQE-4214 for more details
@property
def storage_class(self):
if not config.ENV_DATA.get("storage_class"):
sc = storage_class.get_storageclass()
self.storage_class = sc
return sc
return config.ENV_DATA["storage_class"]

@storage_class.setter
def storage_class(self, value):
config.ENV_DATA["storage_class"] = value

@property
def platform(self):
return config.ENV_DATA["platform"]

@property
def ocp_deployment_type(self):
return config.ENV_DATA["deployment_type"]

@property
def cluster_path(self):
return config.ENV_DATA["cluster_path"]

@property
def namespace(self):
return config.ENV_DATA["cluster_namespace"]

@property
def custom_storage_class_path(self):
return config.ENV_DATA["custom_storage_class_path"]

class OCPDeployment(BaseOCPDeployment):
"""
Expand Down Expand Up @@ -785,7 +818,7 @@ def deploy_cluster(self, log_cli_level="DEBUG"):
else:
x_addr_list = None
if config.DEPLOYMENT.get("arbiter_deployment"):
arbiter_zone = self.get_arbiter_location()
arbiter_zone = get_arbiter_location()
logger.debug("detected arbiter zone: %s", arbiter_zone)
else:
arbiter_zone = None
Expand Down Expand Up @@ -1172,40 +1205,6 @@ def wait_for_csv(self, csv_name, namespace=None):
return
logger.debug(f"Still waiting for the CSV: {csv_name}")

def get_arbiter_location(self):
"""
Get arbiter mon location for storage cluster
"""
if config.DEPLOYMENT.get("arbiter_deployment") and not config.DEPLOYMENT.get(
"arbiter_autodetect"
):
return config.DEPLOYMENT.get("arbiter_zone")

# below logic will autodetect arbiter_zone
nodes = ocp.OCP(kind="node").get().get("items", [])

worker_nodes_zones = {
node["metadata"]["labels"].get(constants.ZONE_LABEL)
for node in nodes
if constants.WORKER_LABEL in node["metadata"]["labels"]
and str(constants.OPERATOR_NODE_LABEL)[:-3] in node["metadata"]["labels"]
}

master_nodes_zones = {
node["metadata"]["labels"].get(constants.ZONE_LABEL)
for node in nodes
if constants.MASTER_LABEL in node["metadata"]["labels"]
}

arbiter_locations = list(master_nodes_zones - worker_nodes_zones)

if len(arbiter_locations) < 1:
raise UnavailableResourceException(
"Atleast 1 different zone required than storage nodes in master nodes to host arbiter mon"
)

return arbiter_locations[0]

def deploy_ocs_via_operator(self, image=None):
"""
Method for deploy OCS via OCS operator
Expand Down Expand Up @@ -1539,7 +1538,7 @@ def deploy_ocs_via_operator(self, image=None):
f"{constants.ROOK_OPERATOR_CONFIGMAP} -p {config_map_patch}"
)

storage_cluster_setup = StorageClusterSetup(deployment=self)
storage_cluster_setup = StorageClusterSetup()
storage_cluster_setup.setup_storage_cluster()

if config.DEPLOYMENT["infra_nodes"]:
Expand Down Expand Up @@ -4049,3 +4048,38 @@ def deploy_dr_policy(self):
"regional-dr": RDRMultiClusterDROperatorsDeploy,
"metro-dr": MDRMultiClusterDROperatorsDeploy,
}


def get_arbiter_location():
"""
Get arbiter mon location for storage cluster
"""
if config.DEPLOYMENT.get("arbiter_deployment") and not config.DEPLOYMENT.get(
"arbiter_autodetect"
):
return config.DEPLOYMENT.get("arbiter_zone")

# below logic will autodetect arbiter_zone
nodes = ocp.OCP(kind="node").get().get("items", [])

worker_nodes_zones = {
node["metadata"]["labels"].get(constants.ZONE_LABEL)
for node in nodes
if constants.WORKER_LABEL in node["metadata"]["labels"]
and str(constants.OPERATOR_NODE_LABEL)[:-3] in node["metadata"]["labels"]
}

master_nodes_zones = {
node["metadata"]["labels"].get(constants.ZONE_LABEL)
for node in nodes
if constants.MASTER_LABEL in node["metadata"]["labels"]
}

arbiter_locations = list(master_nodes_zones - worker_nodes_zones)

if len(arbiter_locations) < 1:
raise UnavailableResourceException(
"Atleast 1 different zone required than storage nodes in master nodes to host arbiter mon"
)

return arbiter_locations[0]
23 changes: 18 additions & 5 deletions ocs_ci/deployment/fusion_data_foundation.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,23 @@ def __init__(self):
self.pre_release = config.DEPLOYMENT.get("fdf_pre_release", False)
self.kubeconfig = config.RUN["kubeconfig"]
self.lso_enabled = config.DEPLOYMENT.get("local_storage", False)
self.storage_class = (
storage_class.get_storageclass() or constants.DEFAULT_STORAGECLASS_LSO
)
self.custom_storage_class_path = None
storage_class.set_custom_storage_class_path()

@property
def storage_class(self):
if not config.ENV_DATA.get("storage_class"):
sc = storage_class.get_storageclass() or constants.DEFAULT_STORAGECLASS_LSO
self.storage_class = sc
return sc
return config.ENV_DATA["storage_class"]

@storage_class.setter
def storage_class(self, value):
config.ENV_DATA["storage_class"] = value

@property
def custom_storage_class_path(self):
return config.ENV_DATA["custom_storage_class_path"]

def deploy(self):
"""
Expand Down Expand Up @@ -179,7 +192,7 @@ def setup_storage(self):
odfcluster_status_check()
else:
logger.info("Storage configuration for Fusion 2.11 or greater")
clustersetup = StorageClusterSetup(self)
clustersetup = StorageClusterSetup()
create_lvs_resource(self.storage_class, self.storage_class)
add_storage_label()
clustersetup.setup_storage_cluster()
Expand Down
8 changes: 0 additions & 8 deletions ocs_ci/deployment/gcp.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,10 @@
"""

import logging
import os

from libcloud.compute.types import NodeState

from ocs_ci.deployment.cloud import CloudDeploymentBase, IPIOCPDeployment
from ocs_ci.ocs.constants import TEMPLATE_DEPLOYMENT_DIR
from ocs_ci.utility.gcp import GoogleCloudUtil


Expand Down Expand Up @@ -78,9 +76,3 @@ class GCPIPI(GCPBase):
def __init__(self):
self.name = self.__class__.__name__
super(GCPIPI, self).__init__()
# storage class for StorageCluster CRD on Google Cloud platform
# uses a custom storageclass, which is created prior creating
# StorageCluster CR during OCS installation
self.custom_storage_class_path = os.path.join(
TEMPLATE_DEPLOYMENT_DIR, "storageclass.gcp.yaml"
)
37 changes: 37 additions & 0 deletions ocs_ci/deployment/helpers/storage_class.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import logging
import os
import yaml

from ocs_ci.framework import config
Expand Down Expand Up @@ -77,3 +78,39 @@ def create_custom_storageclass(storage_class_path: str) -> str:
run_cmd(f"oc create -f {storage_class_path}")

return storage_class_name


def get_custom_storage_class_path() -> str:
"""
Get the custom storage class path for the given platform
"""
custom_sc_path = None
platform = config.ENV_DATA.get("platform")
if platform == constants.AZURE_PLATFORM:
if config.ENV_DATA.get("azure_performance_plus") or config.DEPLOYMENT.get(
"azure_performance_plus"
):
custom_sc_path = os.path.join(
constants.TEMPLATE_DEPLOYMENT_DIR, "azure_storageclass_perfplus.yaml"
)
elif platform == constants.VSPHERE_PLATFORM:
if config.ENV_DATA.get("use_custom_sc_in_deployment"):
custom_sc_path = os.path.join(
constants.TEMPLATE_DEPLOYMENT_DIR, "storageclass_thin-csi-odf.yaml"
)
elif platform == constants.GCP_PLATFORM:
custom_sc_path = os.path.join(
constants.TEMPLATE_DEPLOYMENT_DIR, "storageclass.gcp.yaml"
)

logger.info(
f"For platform: {platform} we will use custom storage class path: {custom_sc_path}"
)
return custom_sc_path


def set_custom_storage_class_path():
"""
Set the custom storage class path for the given platform
"""
config.ENV_DATA["custom_storage_class_path"] = get_custom_storage_class_path()
4 changes: 0 additions & 4 deletions ocs_ci/deployment/vmware.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,10 +119,6 @@ def __init__(self):
self.cluster_launcer_repo_path = os.path.join(
constants.EXTERNAL_DIR, "v4-scaleup"
)
if config.ENV_DATA.get("use_custom_sc_in_deployment"):
self.custom_storage_class_path = os.path.join(
constants.TEMPLATE_DEPLOYMENT_DIR, "storageclass_thin-csi-odf.yaml"
)
os.environ["TF_LOG"] = config.ENV_DATA.get("TF_LOG_LEVEL", "TRACE")
os.environ["TF_LOG_PATH"] = os.path.join(
config.ENV_DATA.get("cluster_path"), config.ENV_DATA.get("TF_LOG_FILE")
Expand Down
23 changes: 12 additions & 11 deletions ocs_ci/utility/storage_cluster_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,12 @@ class StorageClusterSetup(object):
Performs the setup of the StorageCluster for Data Foundation deployments
"""

def __init__(self, deployment):
def __init__(self):
"""
Args:
deployment (Deployment): The deployment object

"""
self.deployment = deployment
self.platform = config.ENV_DATA["platform"]
self.namespace = config.ENV_DATA["cluster_namespace"]
self.ocs_version = version.get_semantic_ocs_version_from_config()
Expand All @@ -48,9 +47,9 @@ def __init__(self, deployment):

def setup_storage_cluster(self):
# create custom storage class for StorageCluster CR if necessary
if self.deployment.custom_storage_class_path is not None:
self.deployment.storage_class = storage_class.create_custom_storageclass(
self.deployment.custom_storage_class_path
if config.ENV_DATA.get("custom_storage_class_path") is not None:
config.ENV_DATA["storage_class"] = storage_class.create_custom_storageclass(
config.ENV_DATA.get("custom_storage_class_path")
)

# Set rook log level
Expand Down Expand Up @@ -109,12 +108,14 @@ def setup_storage_cluster(self):

device_class = config.ENV_DATA.get("device_class")
if self.arbiter_deployment:
from ocs_ci.deployment.deployment import get_arbiter_location

cluster_data["spec"]["arbiter"] = {}
cluster_data["spec"]["nodeTopologies"] = {}
cluster_data["spec"]["arbiter"]["enable"] = True
cluster_data["spec"]["nodeTopologies"][
"arbiterLocation"
] = self.deployment.get_arbiter_location()
] = get_arbiter_location()
cluster_data["spec"]["storageDeviceSets"][0]["replica"] = 4

cluster_data["metadata"]["name"] = config.ENV_DATA["storage_cluster_name"]
Expand Down Expand Up @@ -195,10 +196,10 @@ def setup_storage_cluster(self):
] = f"{device_size}Gi"

# set storage class to OCS default on current platform
if self.deployment.storage_class:
deviceset_data["dataPVCTemplate"]["spec"][
"storageClassName"
] = self.deployment.storage_class
if config.ENV_DATA.get("storage_class"):
deviceset_data["dataPVCTemplate"]["spec"]["storageClassName"] = (
config.ENV_DATA["storage_class"]
)

# StorageCluster tweaks for LSO
if self.local_storage:
Expand Down Expand Up @@ -308,7 +309,7 @@ def setup_storage_cluster(self):
"spec": {
"accessModes": ["ReadWriteOnce"],
"resources": {"requests": {"storage": "20Gi"}},
"storageClassName": self.deployment.storage_class,
"storageClassName": config.ENV_DATA["storage_class"],
"volumeMode": "Filesystem",
}
}
Expand Down
Loading
Loading