Skip to content

Commit 18cc75b

Browse files
committed
[system] Extend regression to support containers
1 parent df08ad3 commit 18cc75b

File tree

1 file changed

+38
-10
lines changed

1 file changed

+38
-10
lines changed

sebs/regression.py

Lines changed: 38 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -28,34 +28,51 @@
2828
benchmarks_nodejs = ["110.dynamic-html", "120.uploader", "210.thumbnailer"]
2929

3030
architectures_aws = ["x64", "arm64"]
31+
deployments_aws = ["package", "container"]
32+
3133
architectures_gcp = ["x64"]
34+
deployments_gcp = ["package"]
35+
3236
architectures_azure = ["x64"]
37+
deployments_azure = ["package"]
38+
3339
architectures_openwhisk = ["x64"]
40+
deployments_openwhisk = ["container"]
3441

3542
# user-defined config passed during initialization
3643
cloud_config: Optional[dict] = None
3744

3845

3946
class TestSequenceMeta(type):
40-
def __init__(cls, name, bases, attrs, benchmarks, architectures, deployment_name, triggers):
47+
def __init__(
48+
cls, name, bases, attrs, benchmarks, architectures,
49+
deployments, deployment_name, triggers
50+
):
4151
type.__init__(cls, name, bases, attrs)
4252
cls.deployment_name = deployment_name
4353
cls.triggers = triggers
4454

45-
def __new__(mcs, name, bases, dict, benchmarks, architectures, deployment_name, triggers):
46-
def gen_test(benchmark_name, architecture):
55+
def __new__(
56+
mcs, name, bases, dict, benchmarks, architectures,
57+
deployments, deployment_name, triggers
58+
):
59+
def gen_test(benchmark_name, architecture, deployment_type):
4760
def test(self):
4861

49-
log_name = f"Regression-{deployment_name}-{benchmark_name}"
62+
log_name = f"Regression-{deployment_name}-{benchmark_name}-{deployment_type}"
5063
logger = logging.getLogger(log_name)
5164
logger.setLevel(logging.INFO)
5265
logging_wrapper = ColoredWrapper(log_name, logger)
5366

5467
self.experiment_config["architecture"] = architecture
68+
self.experiment_config["container_deployment"] = deployment_type == "container"
5569

5670
deployment_client = self.get_deployment(benchmark_name, architecture)
71+
deployment_client.disable_rich_output()
72+
5773
logging_wrapper.info(
58-
f"Begin regression test of {benchmark_name} on {deployment_client.name()}."
74+
f"Begin regression test of {benchmark_name} on {deployment_client.name()}. "
75+
f"Architecture {architecture}, deployment type: {deployment_type}."
5976
)
6077

6178
experiment_config = self.client.get_experiment_config(self.experiment_config)
@@ -109,9 +126,12 @@ def test(self):
109126

110127
for architecture in architectures:
111128

112-
# for trigger in triggers:
113-
test_name = f"test_{deployment_name}_{benchmark}_{architecture}"
114-
dict[test_name] = gen_test(benchmark, architecture)
129+
for deployment_type in deployments:
130+
131+
# for trigger in triggers:
132+
test_name = f"test_{deployment_name}_{benchmark}"
133+
test_name += f"_{architecture}_{deployment_type}"
134+
dict[test_name] = gen_test(benchmark, architecture, deployment_type)
115135

116136
dict["lock"] = threading.Lock()
117137
dict["cfg"] = None
@@ -123,6 +143,7 @@ class AWSTestSequencePython(
123143
metaclass=TestSequenceMeta,
124144
benchmarks=benchmarks_python,
125145
architectures=architectures_aws,
146+
deployments=deployments_aws,
126147
deployment_name="aws",
127148
triggers=[Trigger.TriggerType.LIBRARY, Trigger.TriggerType.HTTP],
128149
):
@@ -151,6 +172,7 @@ class AWSTestSequenceNodejs(
151172
metaclass=TestSequenceMeta,
152173
benchmarks=benchmarks_nodejs,
153174
architectures=architectures_aws,
175+
deployments=deployments_aws,
154176
deployment_name="aws",
155177
triggers=[Trigger.TriggerType.LIBRARY, Trigger.TriggerType.HTTP],
156178
):
@@ -174,6 +196,7 @@ class AzureTestSequencePython(
174196
metaclass=TestSequenceMeta,
175197
benchmarks=benchmarks_python,
176198
architectures=architectures_azure,
199+
deployments=deployments_azure,
177200
deployment_name="azure",
178201
triggers=[Trigger.TriggerType.HTTP],
179202
):
@@ -214,6 +237,7 @@ class AzureTestSequenceNodejs(
214237
metaclass=TestSequenceMeta,
215238
benchmarks=benchmarks_nodejs,
216239
architectures=architectures_azure,
240+
deployments=deployments_azure,
217241
deployment_name="azure",
218242
triggers=[Trigger.TriggerType.HTTP],
219243
):
@@ -250,6 +274,7 @@ class GCPTestSequencePython(
250274
metaclass=TestSequenceMeta,
251275
benchmarks=benchmarks_python,
252276
architectures=architectures_gcp,
277+
deployments=deployments_gcp,
253278
deployment_name="gcp",
254279
triggers=[Trigger.TriggerType.HTTP],
255280
):
@@ -273,6 +298,7 @@ class GCPTestSequenceNodejs(
273298
metaclass=TestSequenceMeta,
274299
benchmarks=benchmarks_nodejs,
275300
architectures=architectures_gcp,
301+
deployments=deployments_gcp,
276302
deployment_name="gcp",
277303
triggers=[Trigger.TriggerType.HTTP],
278304
):
@@ -296,6 +322,7 @@ class OpenWhiskTestSequencePython(
296322
metaclass=TestSequenceMeta,
297323
benchmarks=benchmarks_python,
298324
architectures=architectures_openwhisk,
325+
deployments=deployments_openwhisk,
299326
deployment_name="openwhisk",
300327
triggers=[Trigger.TriggerType.HTTP],
301328
):
@@ -319,6 +346,7 @@ class OpenWhiskTestSequenceNodejs(
319346
metaclass=TestSequenceMeta,
320347
benchmarks=benchmarks_nodejs,
321348
architectures=architectures_openwhisk,
349+
deployments=deployments_openwhisk,
322350
deployment_name="openwhisk",
323351
triggers=[Trigger.TriggerType.HTTP],
324352
):
@@ -351,8 +379,8 @@ def __init__(self):
351379
def status(self, *args, **kwargs):
352380
self.all_correct = self.all_correct and (kwargs["test_status"] in ["inprogress", "success"])
353381

354-
bench, arch = kwargs["test_id"].split("_")[-2:None]
355-
test_name = f"{bench}, {arch}"
382+
bench, arch, deployment_type = kwargs["test_id"].split("_")[-3:None]
383+
test_name = f"{bench}, {arch}, {deployment_type}"
356384
if not kwargs["test_status"]:
357385
test_id = kwargs["test_id"]
358386
if test_id not in self.output:

0 commit comments

Comments
 (0)