diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 0a50534ff..acac68266 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -48,6 +48,7 @@ jobs: - services_test.py - signet_test.py - scenarios_test.py + - namespace_admin_test.py steps: - uses: actions/checkout@v4 - uses: azure/setup-helm@v4.2.0 diff --git a/.gitignore b/.gitignore index c42f6ba7f..f4b5d0076 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ warnet.egg-info .env dist/ build/ +**/kubeconfigs/ diff --git a/docs/admin.md b/docs/admin.md new file mode 100644 index 000000000..b888de3d8 --- /dev/null +++ b/docs/admin.md @@ -0,0 +1,69 @@ +# Admin + +## Connect to your cluster + +Ensure you are connected to your cluster because Warnet will use your current configuration to generate configurations for your users. + +```shell +$ warnet status +``` + +Observe that the output of the command matches your cluster. + +## Create an *admin* directory + +```shell +$ mkdir admin +$ cd admin +$ warnet admin init +``` + +Observe that there are now two folders within the *admin* directory: *namespaces* and *networks* + +## The *namespaces* directory +This directory contains a Helm chart named *two_namespaces_two_users*. + +Modify this chart based on the number of teams and users you have. + +Deploy the *two_namespaces_two_users* chart. + +```shell +$ warnet deploy namespaces/two_namespaces_two_users +``` + +Observe that this creates service accounts and namespaces in the cluster: + +```shell +$ kubectl get ns +$ kubectl get sa -A +``` + +### Creating Warnet invites +A Warnet invite is a Kubernetes config file. + +Create invites for each of your users. + +```shell +$ warnet admin create-kubeconfigs +``` + +Observe the *kubeconfigs* directory. It holds invites for each user. + +### Using Warnet invites +Users can connect to your wargame using their invite. + +```shell +$ warnet auth alice-wargames-red-team-kubeconfig +``` + +### Set up a network for your users +Before letting the users into your cluster, make sure to create a network of tanks for them to view. + + +```shell +$ warnet deploy networks/mynet --to-all-users +``` + +Observe that the *wargames-red-team* namespace now has tanks in it. + +**TODO**: What's the logging approach here? diff --git a/resources/charts/namespaces/values.yaml b/resources/charts/namespaces/values.yaml index 61f946879..e4e95400d 100644 --- a/resources/charts/namespaces/values.yaml +++ b/resources/charts/namespaces/values.yaml @@ -7,16 +7,16 @@ roles: - name: pod-viewer rules: - apiGroups: [""] - resources: ["pods"] + resources: ["pods", "services"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] verbs: ["get"] - apiGroups: [""] resources: ["configmaps", "secrets"] - verbs: ["get"] + verbs: ["get", "list"] - apiGroups: [""] - resources: ["persistentvolumeclaims"] + resources: ["persistentvolumeclaims", "namespaces"] verbs: ["get", "list"] - apiGroups: [""] resources: ["events"] @@ -24,16 +24,16 @@ roles: - name: pod-manager rules: - apiGroups: [""] - resources: ["pods"] + resources: ["pods", "services"] verbs: ["get", "list", "watch", "create", "delete", "update"] - apiGroups: [""] resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] verbs: ["get", "create"] - apiGroups: [""] resources: ["configmaps", "secrets"] - verbs: ["get", "create"] + verbs: ["get", "list", "create"] - apiGroups: [""] - resources: ["persistentvolumeclaims"] + resources: ["persistentvolumeclaims", "namespaces"] verbs: ["get", "list"] - apiGroups: [""] resources: ["events"] diff --git a/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml b/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml index 91ac2fc67..75cc8e42c 100644 --- a/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml +++ b/resources/namespaces/two_namespaces_two_users/namespace-defaults.yaml @@ -3,14 +3,16 @@ users: roles: - pod-viewer - pod-manager -roles: - - name: pod-viewer - rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch"] - - name: pod-manager - rules: - - apiGroups: [""] - resources: ["pods", "configmaps"] - verbs: ["get", "list", "watch", "create", "update", "delete"] +# the pod-viewer and pod-manager roles are the default +# roles defined in values.yaml for the namespaces charts +# +# if you need a different set of roles for a particular namespaces +# deployment, you can override values.yaml by providing your own +# role definitions below +# +# roles: +# - name: my-custom-role +# rules: +# - apiGroups: "" +# resources: "" +# verbs: "" diff --git a/resources/namespaces/two_namespaces_two_users/namespaces.yaml b/resources/namespaces/two_namespaces_two_users/namespaces.yaml index 4172657b8..542456ef6 100644 --- a/resources/namespaces/two_namespaces_two_users/namespaces.yaml +++ b/resources/namespaces/two_namespaces_two_users/namespaces.yaml @@ -1,5 +1,5 @@ namespaces: - - name: warnet-red-team + - name: wargames-red-team users: - name: alice roles: @@ -8,42 +8,7 @@ namespaces: roles: - pod-viewer - pod-manager - roles: - - name: pod-viewer - rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] - verbs: ["get"] - - apiGroups: [""] - resources: ["configmaps", "secrets"] - verbs: ["get"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get"] - - name: pod-manager - rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch", "create", "delete", "update"] - - apiGroups: [""] - resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] - verbs: ["get", "create"] - - apiGroups: [""] - resources: ["configmaps", "secrets"] - verbs: ["get", "create"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get"] - - name: warnet-blue-team + - name: wargames-blue-team users: - name: mallory roles: @@ -52,38 +17,3 @@ namespaces: roles: - pod-viewer - pod-manager - roles: - - name: pod-viewer - rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] - verbs: ["get"] - - apiGroups: [""] - resources: ["configmaps", "secrets"] - verbs: ["get"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get"] - - name: pod-manager - rules: - - apiGroups: [""] - resources: ["pods"] - verbs: ["get", "list", "watch", "create", "delete", "update"] - - apiGroups: [""] - resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] - verbs: ["get", "create"] - - apiGroups: [""] - resources: ["configmaps", "secrets"] - verbs: ["get", "create"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list"] - - apiGroups: [""] - resources: ["events"] - verbs: ["get"] diff --git a/src/warnet/admin.py b/src/warnet/admin.py index f194e16bd..233a220e9 100644 --- a/src/warnet/admin.py +++ b/src/warnet/admin.py @@ -1,12 +1,22 @@ import os +import sys from pathlib import Path import click +import yaml from rich import print as richprint -from .constants import NETWORK_DIR +from .constants import KUBECONFIG, NETWORK_DIR, WARGAMES_NAMESPACE_PREFIX +from .k8s import ( + K8sError, + get_cluster_of_current_context, + get_namespaces_by_type, + get_service_accounts_in_namespace, + open_kubeconfig, +) from .namespaces import copy_namespaces_defaults, namespaces from .network import copy_network_defaults +from .process import run_command @click.group(name="admin", hidden=True) @@ -33,3 +43,94 @@ def init(): f"[green]Copied network and namespace example files to {Path(current_dir) / NETWORK_DIR.name}[/green]" ) richprint(f"[green]Created warnet project structure in {current_dir}[/green]") + + +@admin.command() +@click.option( + "--kubeconfig-dir", + default="kubeconfigs", + help="Directory to store kubeconfig files (default: kubeconfigs)", +) +@click.option( + "--token-duration", + default=172800, + type=int, + help="Duration of the token in seconds (default: 48 hours)", +) +def create_kubeconfigs(kubeconfig_dir, token_duration): + """Create kubeconfig files for ServiceAccounts""" + kubeconfig_dir = os.path.expanduser(kubeconfig_dir) + + try: + kubeconfig_data = open_kubeconfig(KUBECONFIG) + except K8sError as e: + click.secho(e, fg="yellow") + click.secho(f"Could not open auth_config: {KUBECONFIG}", fg="red") + sys.exit(1) + + cluster = get_cluster_of_current_context(kubeconfig_data) + + os.makedirs(kubeconfig_dir, exist_ok=True) + + # Get all namespaces that start with prefix + # This assumes when deploying multiple namespaces for the purpose of team games, all namespaces start with a prefix, + # e.g., tabconf-wargames-*. Currently, this is a bit brittle, but we can improve on this in the future + # by automatically applying a TEAM_PREFIX when creating the get_warnet_namespaces + # TODO: choose a prefix convention and have it managed by the helm charts instead of requiring the + # admin user to pipe through the correct string in multiple places. Another would be to use + # labels instead of namespace naming conventions + warnet_namespaces = get_namespaces_by_type(WARGAMES_NAMESPACE_PREFIX) + + for v1namespace in warnet_namespaces: + namespace = v1namespace.metadata.name + click.echo(f"Processing namespace: {namespace}") + service_accounts = get_service_accounts_in_namespace(namespace) + + for sa in service_accounts: + # Create a token for the ServiceAccount with specified duration + command = f"kubectl create token {sa} -n {namespace} --duration={token_duration}s" + try: + token = run_command(command) + except Exception as e: + click.echo( + f"Failed to create token for ServiceAccount {sa} in namespace {namespace}. Error: {str(e)}. Skipping..." + ) + continue + + # Create a kubeconfig file for the user + kubeconfig_file = os.path.join(kubeconfig_dir, f"{sa}-{namespace}-kubeconfig") + + # TODO: move yaml out of python code to resources/manifests/ + # + # might not be worth it since we are just reading the yaml to then create a bunch of values and its not + # actually used to deploy anything into the cluster + # Then benefit would be making this code a bit cleaner and easy to follow, fwiw + kubeconfig_dict = { + "apiVersion": "v1", + "kind": "Config", + "clusters": [cluster], + "users": [{"name": sa, "user": {"token": token}}], + "contexts": [ + { + "name": f"{sa}-{namespace}", + "context": {"cluster": cluster["name"], "namespace": namespace, "user": sa}, + } + ], + "current-context": f"{sa}-{namespace}", + } + + # Write to a YAML file + with open(kubeconfig_file, "w") as f: + yaml.dump(kubeconfig_dict, f, default_flow_style=False) + + click.echo(f" Created kubeconfig file for {sa}: {kubeconfig_file}") + + click.echo("---") + click.echo( + f"All kubeconfig files have been created in the '{kubeconfig_dir}' directory with a duration of {token_duration} seconds." + ) + click.echo("Distribute these files to the respective users.") + click.echo( + "Users can then use by running `warnet auth ` or with kubectl by specifying the --kubeconfig flag or by setting the KUBECONFIG environment variable." + ) + click.echo(f"Note: The tokens will expire after {token_duration} seconds.") diff --git a/src/warnet/bitcoin.py b/src/warnet/bitcoin.py index b0f5d0c66..9d0c54f50 100644 --- a/src/warnet/bitcoin.py +++ b/src/warnet/bitcoin.py @@ -1,9 +1,9 @@ import os import re -import subprocess import sys from datetime import datetime from io import BytesIO +from typing import Optional import click from test_framework.messages import ser_uint256 @@ -11,7 +11,7 @@ from urllib3.exceptions import MaxRetryError from .constants import BITCOINCORE_CONTAINER -from .k8s import get_default_namespace, get_mission, pod_log +from .k8s import get_default_namespace_or, get_mission, pod_log from .process import run_command @@ -24,23 +24,23 @@ def bitcoin(): @click.argument("tank", type=str) @click.argument("method", type=str) @click.argument("params", type=str, nargs=-1) # this will capture all remaining arguments -def rpc(tank: str, method: str, params: str): +@click.option("--namespace", default=None, show_default=True) +def rpc(tank: str, method: str, params: str, namespace: Optional[str]): """ Call bitcoin-cli [params] on """ try: - result = _rpc(tank, method, params) + result = _rpc(tank, method, params, namespace) except Exception as e: print(f"{e}") sys.exit(1) print(result) -def _rpc(tank: str, method: str, params: str): +def _rpc(tank: str, method: str, params: str, namespace: Optional[str] = None): # bitcoin-cli should be able to read bitcoin.conf inside the container # so no extra args like port, chain, username or password are needed - namespace = get_default_namespace() - + namespace = get_default_namespace_or(namespace) if params: cmd = f"kubectl -n {namespace} exec {tank} --container {BITCOINCORE_CONTAINER} -- bitcoin-cli {method} {' '.join(map(str, params))}" else: @@ -50,11 +50,13 @@ def _rpc(tank: str, method: str, params: str): @bitcoin.command() @click.argument("tank", type=str, required=True) -def debug_log(tank: str): +@click.option("--namespace", default=None, show_default=True) +def debug_log(tank: str, namespace: Optional[str]): """ Fetch the Bitcoin Core debug log from """ - cmd = f"kubectl logs {tank}" + namespace = get_default_namespace_or(namespace) + cmd = f"kubectl logs {tank} --namespace {namespace}" try: print(run_command(cmd)) except Exception as e: @@ -77,8 +79,12 @@ def grep_logs(pattern: str, show_k8s_timestamps: bool, no_sort: bool): sys.exit(1) matching_logs = [] + longest_namespace_len = 0 for tank in tanks: + if len(tank.metadata.namespace) > longest_namespace_len: + longest_namespace_len = len(tank.metadata.namespace) + pod_name = tank.metadata.name logs = pod_log(pod_name, BITCOINCORE_CONTAINER) @@ -87,7 +93,7 @@ def grep_logs(pattern: str, show_k8s_timestamps: bool, no_sort: bool): for line in logs: log_entry = line.decode("utf-8").rstrip() if re.search(pattern, log_entry): - matching_logs.append((log_entry, pod_name)) + matching_logs.append((log_entry, tank.metadata.namespace, pod_name)) except Exception as e: print(e) except KeyboardInterrupt: @@ -98,7 +104,7 @@ def grep_logs(pattern: str, show_k8s_timestamps: bool, no_sort: bool): matching_logs.sort(key=lambda x: x[0]) # Print matching logs - for log_entry, pod_name in matching_logs: + for log_entry, namespace, pod_name in matching_logs: try: # Split the log entry into Kubernetes timestamp, Bitcoin timestamp, and the rest of the log k8s_timestamp, rest = log_entry.split(" ", 1) @@ -106,9 +112,13 @@ def grep_logs(pattern: str, show_k8s_timestamps: bool, no_sort: bool): # Format the output based on the show_k8s_timestamps option if show_k8s_timestamps: - print(f"{pod_name}: {k8s_timestamp} {bitcoin_timestamp} {log_message}") + print( + f"{pod_name} {namespace:<{longest_namespace_len}} {k8s_timestamp} {bitcoin_timestamp} {log_message}" + ) else: - print(f"{pod_name}: {bitcoin_timestamp} {log_message}") + print( + f"{pod_name} {namespace:<{longest_namespace_len}} {bitcoin_timestamp} {log_message}" + ) except ValueError: # If we can't parse the timestamps, just print the original log entry print(f"{pod_name}: {log_entry}") @@ -123,13 +133,41 @@ def grep_logs(pattern: str, show_k8s_timestamps: bool, no_sort: bool): def messages(tank_a: str, tank_b: str, chain: str): """ Fetch messages sent between and in [chain] + + Optionally, include a namespace like so: tank-name.namespace """ + + def parse_name_and_namespace(tank: str) -> tuple[str, Optional[str]]: + tank_split = tank.split(".") + try: + namespace = tank_split[1] + except IndexError: + namespace = None + return tank_split[0], namespace + + tank_a_split = tank_a.split(".") + tank_b_split = tank_b.split(".") + if len(tank_a_split) > 2 or len(tank_b_split) > 2: + click.secho("Accepted formats: tank-name OR tank-name.namespace") + click.secho(f"Foramts found: {tank_a} {tank_b}") + sys.exit(1) + + tank_a, namespace_a = parse_name_and_namespace(tank_a) + tank_b, namespace_b = parse_name_and_namespace(tank_b) + try: + namespace_a = get_default_namespace_or(namespace_a) + namespace_b = get_default_namespace_or(namespace_b) + # Get the messages - messages = get_messages(tank_a, tank_b, chain) + messages = get_messages( + tank_a, tank_b, chain, namespace_a=namespace_a, namespace_b=namespace_b + ) if not messages: - print(f"No messages found between {tank_a} and {tank_b}") + print( + f"No messages found between {tank_a} ({namespace_a}) and {tank_b} ({namespace_b})" + ) return # Process and print messages @@ -154,7 +192,7 @@ def messages(tank_a: str, tank_b: str, chain: str): print(f"Error fetching messages between nodes {tank_a} and {tank_b}: {e}") -def get_messages(tank_a: str, tank_b: str, chain: str): +def get_messages(tank_a: str, tank_b: str, chain: str, namespace_a: str, namespace_b: str): """ Fetch messages from the message capture files """ @@ -162,15 +200,17 @@ def get_messages(tank_a: str, tank_b: str, chain: str): base_dir = f"/root/.bitcoin/{subdir}message_capture" # Get the IP of node_b - cmd = f"kubectl get pod {tank_b} -o jsonpath='{{.status.podIP}}'" + cmd = f"kubectl get pod {tank_b} -o jsonpath='{{.status.podIP}}' --namespace {namespace_b}" tank_b_ip = run_command(cmd).strip() # Get the service IP of node_b - cmd = f"kubectl get service {tank_b} -o jsonpath='{{.spec.clusterIP}}'" + cmd = ( + f"kubectl get service {tank_b} -o jsonpath='{{.spec.clusterIP}}' --namespace {namespace_b}" + ) tank_b_service_ip = run_command(cmd).strip() # List directories in the message capture folder - cmd = f"kubectl exec {tank_a} -- ls {base_dir}" + cmd = f"kubectl exec {tank_a} --namespace {namespace_a} -- ls {base_dir}" dirs = run_command(cmd).splitlines() @@ -181,7 +221,8 @@ def get_messages(tank_a: str, tank_b: str, chain: str): for file, outbound in [["msgs_recv.dat", False], ["msgs_sent.dat", True]]: file_path = f"{base_dir}/{dir_name}/{file}" # Fetch the file contents from the container - cmd = f"kubectl exec {tank_a} -- cat {file_path}" + cmd = f"kubectl exec {tank_a} --namespace {namespace_a} -- cat {file_path}" + import subprocess blob = subprocess.run( cmd, shell=True, capture_output=True, executable="bash" diff --git a/src/warnet/constants.py b/src/warnet/constants.py index 3bf666007..25b583352 100644 --- a/src/warnet/constants.py +++ b/src/warnet/constants.py @@ -11,10 +11,15 @@ tag for index, tag in enumerate(reversed(SUPPORTED_TAGS)) for _ in range(index + 1) ] -DEFAULT_NAMESPACE = "warnet" +DEFAULT_NAMESPACE = "default" LOGGING_NAMESPACE = "warnet-logging" INGRESS_NAMESPACE = "ingress" -HELM_COMMAND = "helm upgrade --install --create-namespace" +WARGAMES_NAMESPACE_PREFIX = "wargames-" +KUBE_INTERNAL_NAMESPACES = ["kube-node-lease", "kube-public", "kube-system", "kubernetes-dashboard"] +HELM_COMMAND = "helm upgrade --install" + +TANK_MISSION = "tank" +COMMANDER_MISSION = "commander" BITCOINCORE_CONTAINER = "bitcoincore" COMMANDER_CONTAINER = "commander" @@ -98,10 +103,10 @@ "helm repo add prometheus-community https://prometheus-community.github.io/helm-charts", "helm repo update", f"helm upgrade --install --namespace warnet-logging --create-namespace --values {MANIFESTS_DIR}/loki_values.yaml loki grafana/loki --version 5.47.2", - "helm upgrade --install --namespace warnet-logging promtail grafana/promtail", - "helm upgrade --install --namespace warnet-logging prometheus prometheus-community/kube-prometheus-stack --namespace warnet-logging --set grafana.enabled=false", - f"helm upgrade --install grafana-dashboards {CHARTS_DIR}/grafana-dashboards --namespace warnet-logging", - f"helm upgrade --install --namespace warnet-logging loki-grafana grafana/grafana --values {MANIFESTS_DIR}/grafana_values.yaml", + "helm upgrade --install --namespace warnet-logging promtail grafana/promtail --create-namespace", + "helm upgrade --install --namespace warnet-logging prometheus prometheus-community/kube-prometheus-stack --namespace warnet-logging --create-namespace --set grafana.enabled=false", + f"helm upgrade --install grafana-dashboards {CHARTS_DIR}/grafana-dashboards --namespace warnet-logging --create-namespace", + f"helm upgrade --install --namespace warnet-logging --create-namespace loki-grafana grafana/grafana --values {MANIFESTS_DIR}/grafana_values.yaml", ] diff --git a/src/warnet/control.py b/src/warnet/control.py index 43c895cfb..cd8d51cdf 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -7,10 +7,12 @@ import zipapp from concurrent.futures import ThreadPoolExecutor, as_completed from pathlib import Path +from typing import Optional import click import inquirer from inquirer.themes import GreenPassion +from kubernetes.client.models import V1Pod from rich import print from rich.console import Console from rich.prompt import Confirm, Prompt @@ -20,12 +22,16 @@ BITCOINCORE_CONTAINER, COMMANDER_CHART, COMMANDER_CONTAINER, - LOGGING_NAMESPACE, + COMMANDER_MISSION, + TANK_MISSION, ) from .k8s import ( + can_delete_pods, delete_pod, get_default_namespace, + get_default_namespace_or, get_mission, + get_namespaces, get_pod, get_pods, pod_log, @@ -113,12 +119,15 @@ def stop_all_scenarios(scenarios): console.print("[bold green]All scenarios have been stopped.[/bold green]") +@click.option( + "--force", + is_flag=True, + default=False, + help="Skip confirmations", +) @click.command() -def down(): +def down(force): """Bring down a running warnet quickly""" - console.print("[bold yellow]Bringing down the warnet...[/bold yellow]") - - namespaces = [get_default_namespace(), LOGGING_NAMESPACE] def uninstall_release(namespace, release_name): cmd = f"helm uninstall {release_name} --namespace {namespace} --wait=false" @@ -130,21 +139,61 @@ def delete_pod(pod_name, namespace): subprocess.Popen(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) return f"Initiated deletion of pod: {pod_name} in namespace {namespace}" + if not can_delete_pods(): + click.secho("You do not have permission to bring down the network.", fg="red") + return + + namespaces = get_namespaces() + release_list: list[dict[str, str]] = [] + for v1namespace in namespaces: + namespace = v1namespace.metadata.name + command = f"helm list --namespace {namespace} -o json" + result = run_command(command) + if result: + releases = json.loads(result) + for release in releases: + release_list.append({"namespace": namespace, "name": release["name"]}) + + if not force: + affected_namespaces = set([entry["namespace"] for entry in release_list]) + namespace_listing = "\n ".join(affected_namespaces) + confirmed = "confirmed" + click.secho("Preparing to bring down the running Warnet...", fg="yellow") + click.secho("The listed namespaces will be affected:", fg="yellow") + click.secho(f" {namespace_listing}", fg="blue") + + proj_answers = inquirer.prompt( + [ + inquirer.Confirm( + confirmed, + message=click.style( + "Do you want to bring down the running Warnet?", fg="yellow", bold=False + ), + default=False, + ), + ] + ) + if not proj_answers: + click.secho("Operation cancelled by user.", fg="yellow") + sys.exit(0) + if proj_answers[confirmed]: + click.secho("Bringing down the warnet...", fg="yellow") + else: + click.secho("Operation cancelled by user", fg="yellow") + sys.exit(0) + with ThreadPoolExecutor(max_workers=10) as executor: futures = [] # Uninstall Helm releases - for namespace in namespaces: - command = f"helm list --namespace {namespace} -o json" - result = run_command(command) - if result: - releases = json.loads(result) - for release in releases: - futures.append(executor.submit(uninstall_release, namespace, release["name"])) + for release in release_list: + futures.append( + executor.submit(uninstall_release, release["namespace"], release["name"]) + ) # Delete remaining pods pods = get_pods() - for pod in pods.items: + for pod in pods: futures.append(executor.submit(delete_pod, pod.metadata.name, pod.metadata.namespace)) # Wait for all tasks to complete and print results @@ -182,11 +231,20 @@ def get_active_network(namespace): "--source_dir", type=click.Path(exists=True, file_okay=False, dir_okay=True), required=False ) @click.argument("additional_args", nargs=-1, type=click.UNPROCESSED) -def run(scenario_file: str, debug: bool, source_dir, additional_args: tuple[str]): +@click.option("--namespace", default=None, show_default=True) +def run( + scenario_file: str, + debug: bool, + source_dir, + additional_args: tuple[str], + namespace: Optional[str], +): """ Run a scenario from a file. Pass `-- --help` to get individual scenario help """ + namespace = get_default_namespace_or(namespace) + scenario_path = Path(scenario_file).resolve() scenario_dir = scenario_path.parent if not source_dir else Path(source_dir).resolve() scenario_name = scenario_path.stem @@ -196,7 +254,6 @@ def run(scenario_file: str, debug: bool, source_dir, additional_args: tuple[str] # Collect tank data for warnet.json name = f"commander-{scenario_name.replace('_', '')}-{int(time.time())}" - namespace = get_default_namespace() tankpods = get_mission("tank") tanks = [ { @@ -284,41 +341,51 @@ def filter(path): print(f"Error: {e.stderr}") # upload scenario files and network data to the init container - wait_for_init(name) + wait_for_init(name, namespace=namespace) if write_file_to_container( - name, "init", "/shared/warnet.json", warnet_data - ) and write_file_to_container(name, "init", "/shared/archive.pyz", archive_data): + name, "init", "/shared/warnet.json", warnet_data, namespace=namespace + ) and write_file_to_container( + name, "init", "/shared/archive.pyz", archive_data, namespace=namespace + ): print(f"Successfully uploaded scenario data to commander: {scenario_name}") if debug: print("Waiting for commander pod to start...") - wait_for_pod(name) - _logs(pod_name=name, follow=True) + wait_for_pod(name, namespace=namespace) + _logs(pod_name=name, follow=True, namespace=namespace) print("Deleting pod...") - delete_pod(name) + delete_pod(name, namespace=namespace) @click.command() @click.argument("pod_name", type=str, default="") @click.option("--follow", "-f", is_flag=True, default=False, help="Follow logs") -def logs(pod_name: str, follow: bool): +@click.option("--namespace", type=str, default="default", show_default=True) +def logs(pod_name: str, follow: bool, namespace: str): """Show the logs of a pod""" - return _logs(pod_name, follow) + return _logs(pod_name, follow, namespace) -def _logs(pod_name: str, follow: bool): - namespace = get_default_namespace() +def _logs(pod_name: str, follow: bool, namespace: Optional[str] = None): + namespace = get_default_namespace_or(namespace) + + def format_pods(pods: list[V1Pod]) -> list[str]: + return [f"{pod.metadata.name}: {pod.metadata.namespace}" for pod in pods] if pod_name == "": try: - pods = get_pods() - pod_list = [item.metadata.name for item in pods.items] + pod_list = [] + formatted_commanders = format_pods(get_mission(COMMANDER_MISSION)) + formatted_tanks = format_pods(get_mission(TANK_MISSION)) + pod_list.extend(formatted_commanders) + pod_list.extend(formatted_tanks) + except Exception as e: - print(f"Could not fetch any pods in namespace {namespace}: {e}") + print(f"Could not fetch any pods in namespace ({namespace}): {e}") return if not pod_list: - print(f"Could not fetch any pods in namespace {namespace}") + print(f"Could not fetch any pods in namespace ({namespace})") return q = [ @@ -330,7 +397,7 @@ def _logs(pod_name: str, follow: bool): ] selected = inquirer.prompt(q, theme=GreenPassion()) if selected: - pod_name = selected["pod"] + pod_name, pod_namespace = selected["pod"].split(": ") else: return # cancelled by user diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index c175dd6d9..35648e72f 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -2,6 +2,7 @@ import sys import tempfile from pathlib import Path +from typing import Optional import click import yaml @@ -19,8 +20,15 @@ NAMESPACES_CHART_LOCATION, NAMESPACES_FILE, NETWORK_FILE, + WARGAMES_NAMESPACE_PREFIX, +) +from .k8s import ( + get_default_namespace, + get_default_namespace_or, + get_namespaces_by_type, + wait_for_ingress_controller, + wait_for_pod_ready, ) -from .k8s import get_default_namespace, wait_for_ingress_controller, wait_for_pod_ready from .process import stream_command @@ -42,13 +50,31 @@ def validate_directory(ctx, param, value): callback=validate_directory, ) @click.option("--debug", is_flag=True) -def deploy(directory, debug): +@click.option("--namespace", type=str, help="Specify a namespace in which to deploy the network") +@click.option("--to-all-users", is_flag=True, help="Deploy network to all user namespaces") +def deploy(directory, debug, namespace, to_all_users): + """Deploy a warnet with topology loaded from """ + if to_all_users: + namespaces = get_namespaces_by_type(WARGAMES_NAMESPACE_PREFIX) + for namespace in namespaces: + _deploy(directory, debug, namespace.metadata.name, False) + else: + _deploy(directory, debug, namespace, to_all_users) + + +def _deploy(directory, debug, namespace, to_all_users): """Deploy a warnet with topology loaded from """ directory = Path(directory) + if to_all_users: + namespaces = get_namespaces_by_type(WARGAMES_NAMESPACE_PREFIX) + for namespace in namespaces: + deploy(directory, debug, namespace.metadata.name, False) + return + if (directory / NETWORK_FILE).exists(): dl = deploy_logging_stack(directory, debug) - deploy_network(directory, debug) + deploy_network(directory, debug, namespace=namespace) df = deploy_fork_observer(directory, debug) if dl | df: deploy_ingress(debug) @@ -147,7 +173,7 @@ def deploy_fork_observer(directory: Path, debug: bool) -> bool: default_namespace = get_default_namespace() namespace = LOGGING_NAMESPACE - cmd = f"{HELM_COMMAND} 'fork-observer' {FORK_OBSERVER_CHART} --namespace {namespace}" + cmd = f"{HELM_COMMAND} 'fork-observer' {FORK_OBSERVER_CHART} --namespace {namespace} --create-namespace" if debug: cmd += " --debug" @@ -189,15 +215,15 @@ def deploy_fork_observer(directory: Path, debug: bool) -> bool: return True -def deploy_network(directory: Path, debug: bool = False): +def deploy_network(directory: Path, debug: bool = False, namespace: Optional[str] = None): network_file_path = directory / NETWORK_FILE defaults_file_path = directory / DEFAULTS_FILE + namespace = get_default_namespace_or(namespace) + with network_file_path.open() as f: network_file = yaml.safe_load(f) - namespace = get_default_namespace() - for node in network_file["nodes"]: click.echo(f"Deploying node: {node.get('name')}") try: @@ -237,16 +263,17 @@ def deploy_namespaces(directory: Path): names = [n.get("name") for n in namespaces_file["namespaces"]] for n in names: - if not n.startswith("warnet-"): - click.echo( - f"Failed to create namespace: {n}. Namespaces must start with a 'warnet-' prefix." + if not n.startswith(WARGAMES_NAMESPACE_PREFIX): + click.secho( + f"Failed to create namespace: {n}. Namespaces must start with a '{WARGAMES_NAMESPACE_PREFIX}' prefix.", + fg="red", ) return for namespace in namespaces_file["namespaces"]: click.echo(f"Deploying namespace: {namespace.get('name')}") try: - temp_override_file_path = Path() + temp_override_file_path = "" namespace_name = namespace.get("name") namespace_config_override = {k: v for k, v in namespace.items() if k != "name"} @@ -267,7 +294,7 @@ def deploy_namespaces(directory: Path): click.echo(f"Error: {e}") return finally: - if temp_override_file_path.exists(): + if temp_override_file_path: temp_override_file_path.unlink() diff --git a/src/warnet/k8s.py b/src/warnet/k8s.py index 589169e24..9354eb903 100644 --- a/src/warnet/k8s.py +++ b/src/warnet/k8s.py @@ -8,7 +8,8 @@ import yaml from kubernetes import client, config, watch -from kubernetes.client.models import CoreV1Event, V1Pod, V1PodList +from kubernetes.client import CoreV1Api +from kubernetes.client.models import V1Namespace, V1Pod, V1PodList from kubernetes.client.rest import ApiException from kubernetes.dynamic import DynamicClient from kubernetes.stream import stream @@ -17,13 +18,18 @@ CADDY_INGRESS_NAME, DEFAULT_NAMESPACE, INGRESS_NAMESPACE, + KUBE_INTERNAL_NAMESPACES, KUBECONFIG, LOGGING_NAMESPACE, ) from .process import run_command, stream_command -def get_static_client() -> CoreV1Event: +class K8sError(Exception): + pass + + +def get_static_client() -> CoreV1Api: config.load_kube_config(config_file=KUBECONFIG) return client.CoreV1Api() @@ -33,35 +39,41 @@ def get_dynamic_client() -> DynamicClient: return DynamicClient(client.ApiClient()) -def get_pods() -> V1PodList: +def get_pods() -> list[V1Pod]: sclient = get_static_client() - try: - pod_list: V1PodList = sclient.list_namespaced_pod(get_default_namespace()) - except Exception as e: - raise e - return pod_list + pods: list[V1Pod] = [] + namespaces = get_namespaces() + for ns in namespaces: + namespace = ns.metadata.name + try: + pod_list: V1PodList = sclient.list_namespaced_pod(namespace) + for pod in pod_list.items: + pods.append(pod) + except Exception as e: + raise e + return pods def get_pod(name: str, namespace: Optional[str] = None) -> V1Pod: + namespace = get_default_namespace_or(namespace) sclient = get_static_client() - if not namespace: - namespace = get_default_namespace() return sclient.read_namespaced_pod(name=name, namespace=namespace) -def get_mission(mission: str) -> list[V1PodList]: +def get_mission(mission: str) -> list[V1Pod]: pods = get_pods() - crew = [] - for pod in pods.items: + crew: list[V1Pod] = [] + for pod in pods: if "mission" in pod.metadata.labels and pod.metadata.labels["mission"] == mission: crew.append(pod) return crew -def get_pod_exit_status(pod_name): +def get_pod_exit_status(pod_name, namespace: Optional[str] = None): + namespace = get_default_namespace_or(namespace) try: sclient = get_static_client() - pod = sclient.read_namespaced_pod(name=pod_name, namespace=get_default_namespace()) + pod = sclient.read_namespaced_pod(name=pod_name, namespace=namespace) for container_status in pod.status.container_statuses: if container_status.state.terminated: return container_status.state.terminated.exit_code @@ -71,9 +83,10 @@ def get_pod_exit_status(pod_name): return None -def get_edges() -> any: +def get_edges(namespace: Optional[str] = None) -> any: + namespace = get_default_namespace_or(namespace) sclient = get_static_client() - configmap = sclient.read_namespaced_config_map(name="edges", namespace="warnet") + configmap = sclient.read_namespaced_config_map(name="edges", namespace=namespace) return json.loads(configmap.data["data"]) @@ -125,8 +138,9 @@ def delete_namespace(namespace: str) -> bool: return run_command(command) -def delete_pod(pod_name: str) -> bool: - command = f"kubectl -n {get_default_namespace()} delete pod {pod_name}" +def delete_pod(pod_name: str, namespace: Optional[str] = None) -> bool: + namespace = get_default_namespace_or(namespace) + command = f"kubectl -n {namespace} delete pod {pod_name}" return stream_command(command) @@ -145,10 +159,18 @@ def get_default_namespace() -> str: return kubectl_namespace if kubectl_namespace else DEFAULT_NAMESPACE +def get_default_namespace_or(namespace: Optional[str]) -> str: + return namespace if namespace else get_default_namespace() + + def snapshot_bitcoin_datadir( - pod_name: str, chain: str, local_path: str = "./", filters: list[str] = None + pod_name: str, + chain: str, + local_path: str = "./", + filters: list[str] = None, + namespace: Optional[str] = None, ) -> None: - namespace = get_default_namespace() + namespace = get_default_namespace_or(namespace) sclient = get_static_client() try: @@ -272,21 +294,23 @@ def wait_for_pod_ready(name, namespace, timeout=300): return False -def wait_for_init(pod_name, timeout=300): +def wait_for_init(pod_name, timeout=300, namespace: Optional[str] = None): + namespace = get_default_namespace_or(namespace) sclient = get_static_client() - namespace = get_default_namespace() w = watch.Watch() for event in w.stream( sclient.list_namespaced_pod, namespace=namespace, timeout_seconds=timeout ): pod = event["object"] if pod.metadata.name == pod_name: + if not pod.status.init_container_statuses: + continue for init_container_status in pod.status.init_container_statuses: if init_container_status.state.running: - print(f"initContainer in pod {pod_name} is ready") + print(f"initContainer in pod {pod_name} ({namespace}) is ready") w.stop() return True - print(f"Timeout waiting for initContainer in {pod_name} to be ready.") + print(f"Timeout waiting for initContainer in {pod_name} ({namespace})to be ready.") return False @@ -312,12 +336,14 @@ def get_ingress_ip_or_host(): return None -def pod_log(pod_name, container_name=None, follow=False): +def pod_log(pod_name, container_name=None, follow=False, namespace: Optional[str] = None): + namespace = get_default_namespace_or(namespace) sclient = get_static_client() + try: return sclient.read_namespaced_pod_log( name=pod_name, - namespace=get_default_namespace(), + namespace=namespace, container=container_name, follow=follow, _preload_content=False, @@ -326,20 +352,23 @@ def pod_log(pod_name, container_name=None, follow=False): raise Exception(json.loads(e.body.decode("utf-8"))["message"]) from None -def wait_for_pod(pod_name, timeout_seconds=10): +def wait_for_pod(pod_name, timeout_seconds=10, namespace: Optional[str] = None): + namespace = get_default_namespace_or(namespace) sclient = get_static_client() while timeout_seconds > 0: - pod = sclient.read_namespaced_pod_status(name=pod_name, namespace=get_default_namespace()) + pod = sclient.read_namespaced_pod_status(name=pod_name, namespace=namespace) if pod.status.phase != "Pending": return sleep(1) timeout_seconds -= 1 -def write_file_to_container(pod_name, container_name, dst_path, data): +def write_file_to_container( + pod_name, container_name, dst_path, data, namespace: Optional[str] = None +): + namespace = get_default_namespace_or(namespace) sclient = get_static_client() - namespace = get_default_namespace() - exec_command = ["sh", "-c", f"cat > {dst_path}"] + exec_command = ["sh", "-c", f"cat > {dst_path}.tmp && sync"] try: res = stream( sclient.connect_get_namespaced_pod_exec, @@ -355,7 +384,156 @@ def write_file_to_container(pod_name, container_name, dst_path, data): ) res.write_stdin(data) res.close() + rename_command = ["sh", "-c", f"mv {dst_path}.tmp {dst_path}"] + stream( + sclient.connect_get_namespaced_pod_exec, + pod_name, + namespace, + command=rename_command, + container=container_name, + stdin=False, + stderr=True, + stdout=True, + tty=False, + ) print(f"Successfully copied data to {pod_name}({container_name}):{dst_path}") return True except Exception as e: print(f"Failed to copy data to {pod_name}({container_name}):{dst_path}:\n{e}") + + +def get_kubeconfig_value(jsonpath): + command = f"kubectl config view --minify --raw -o jsonpath={jsonpath}" + return run_command(command) + + +def get_cluster_of_current_context(kubeconfig_data: dict) -> dict: + # Get the current context name + current_context_name = kubeconfig_data.get("current-context") + + if not current_context_name: + raise K8sError("No current context found in kubeconfig.") + + # Find the context entry for the current context + context_entry = next( + ( + context + for context in kubeconfig_data.get("contexts", []) + if context["name"] == current_context_name + ), + None, + ) + + if not context_entry: + raise K8sError(f"Context '{current_context_name}' not found in kubeconfig.") + + # Get the cluster name from the context entry + cluster_name = context_entry.get("context", {}).get("cluster") + + if not cluster_name: + raise K8sError(f"Cluster not specified in context '{current_context_name}'.") + + # Find the cluster entry associated with the cluster name + cluster_entry = next( + ( + cluster + for cluster in kubeconfig_data.get("clusters", []) + if cluster["name"] == cluster_name + ), + None, + ) + + if not cluster_entry: + raise K8sError(f"Cluster '{cluster_name}' not found in kubeconfig.") + + return cluster_entry + + +def get_namespaces() -> list[V1Namespace]: + sclient = get_static_client() + try: + return [ + ns + for ns in sclient.list_namespace().items + if ns.metadata.name not in KUBE_INTERNAL_NAMESPACES + ] + + except ApiException as e: + if e.status == 403: + ns = sclient.read_namespace(name=get_default_namespace()) + return [ns] + else: + return [] + + +def get_namespaces_by_type(namespace_type: str) -> list[V1Namespace]: + """ + Get all namespaces beginning with `prefix`. Returns empty list of no namespaces with the specified prefix are found. + """ + namespaces = get_namespaces() + return [ns for ns in namespaces if ns.metadata.name.startswith(namespace_type)] + + +def get_service_accounts_in_namespace(namespace): + """ + Get all service accounts in a namespace. Returns an empty list if no service accounts are found in the specified namespace. + """ + command = f"kubectl get serviceaccounts -n {namespace} -o jsonpath={{.items[*].metadata.name}}" + # skip the default service account created by k8s + service_accounts = run_command(command).split() + return [sa for sa in service_accounts if sa != "default"] + + +def can_delete_pods(namespace: Optional[str] = None) -> bool: + namespace = get_default_namespace_or(namespace) + + get_static_client() + auth_api = client.AuthorizationV1Api() + + # Define the SelfSubjectAccessReview request for deleting pods + access_review = client.V1SelfSubjectAccessReview( + spec=client.V1SelfSubjectAccessReviewSpec( + resource_attributes=client.V1ResourceAttributes( + namespace=namespace, + verb="delete", # Action: 'delete' + resource="pods", # Resource: 'pods' + ) + ) + ) + + try: + # Perform the SelfSubjectAccessReview check + review_response = auth_api.create_self_subject_access_review(body=access_review) + + # Check the result and return + if review_response.status.allowed: + print(f"Service account can delete pods in namespace '{namespace}'.") + return True + else: + print(f"Service account CANNOT delete pods in namespace '{namespace}'.") + return False + + except ApiException as e: + print(f"An error occurred: {e}") + return False + + +def open_kubeconfig(kubeconfig_path: str) -> dict: + try: + with open(kubeconfig_path) as file: + return yaml.safe_load(file) + except FileNotFoundError as e: + raise K8sError(f"Kubeconfig file {kubeconfig_path} not found.") from e + except yaml.YAMLError as e: + raise K8sError(f"Error parsing kubeconfig: {e}") from e + + +def write_kubeconfig(kube_config: dict, kubeconfig_path: str) -> None: + dir_name = os.path.dirname(kubeconfig_path) + try: + with tempfile.NamedTemporaryFile("w", dir=dir_name, delete=False) as temp_file: + yaml.safe_dump(kube_config, temp_file) + os.replace(temp_file.name, kubeconfig_path) + except Exception as e: + os.remove(temp_file.name) + raise K8sError(f"Error writing kubeconfig: {kubeconfig_path}") from e diff --git a/src/warnet/namespaces.py b/src/warnet/namespaces.py index 45bcb7af5..12357525b 100644 --- a/src/warnet/namespaces.py +++ b/src/warnet/namespaces.py @@ -8,6 +8,7 @@ DEFAULTS_NAMESPACE_FILE, NAMESPACES_DIR, NAMESPACES_FILE, + WARGAMES_NAMESPACE_PREFIX, ) from .process import run_command, stream_command @@ -32,16 +33,15 @@ def namespaces(): """Namespaces commands""" -@click.argument( - "namespaces_dir", type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path) -) @namespaces.command() def list(): - """List all namespaces with 'warnet-' prefix""" + """List all namespaces with 'wargames-' prefix""" cmd = "kubectl get namespaces -o jsonpath='{.items[*].metadata.name}'" res = run_command(cmd) all_namespaces = res.split() - warnet_namespaces = [ns for ns in all_namespaces if ns.startswith("warnet-")] + warnet_namespaces = [ + ns for ns in all_namespaces if ns.startswith(f"{WARGAMES_NAMESPACE_PREFIX}") + ] if warnet_namespaces: print("Warnet namespaces:") @@ -55,14 +55,16 @@ def list(): @click.option("--all", "destroy_all", is_flag=True, help="Destroy all warnet- prefixed namespaces") @click.argument("namespace", required=False) def destroy(destroy_all: bool, namespace: str): - """Destroy a specific namespace or all warnet- prefixed namespaces""" + """Destroy a specific namespace or all 'wargames-' prefixed namespaces""" if destroy_all: cmd = "kubectl get namespaces -o jsonpath='{.items[*].metadata.name}'" res = run_command(cmd) # Get the list of namespaces all_namespaces = res.split() - warnet_namespaces = [ns for ns in all_namespaces if ns.startswith("warnet-")] + warnet_namespaces = [ + ns for ns in all_namespaces if ns.startswith(f"{WARGAMES_NAMESPACE_PREFIX}") + ] if not warnet_namespaces: print("No warnet namespaces found to destroy.") @@ -75,8 +77,8 @@ def destroy(destroy_all: bool, namespace: str): else: print(f"Destroyed namespace: {ns}") elif namespace: - if not namespace.startswith("warnet-"): - print("Error: Can only destroy namespaces with 'warnet-' prefix") + if not namespace.startswith(f"{WARGAMES_NAMESPACE_PREFIX}"): + print(f"Error: Can only destroy namespaces with '{WARGAMES_NAMESPACE_PREFIX}' prefix") return destroy_cmd = f"kubectl delete namespace {namespace}" diff --git a/src/warnet/network.py b/src/warnet/network.py index 401ab5106..a894cafc9 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -58,7 +58,9 @@ def _connected(end="\n"): for tank in tanks: # Get actual try: - peerinfo = json.loads(_rpc(tank.metadata.name, "getpeerinfo", "")) + peerinfo = json.loads( + _rpc(tank.metadata.name, "getpeerinfo", "", namespace=tank.metadata.namespace) + ) actual = 0 for peer in peerinfo: if is_connection_manual(peer): diff --git a/src/warnet/status.py b/src/warnet/status.py index ebbd245d4..df62ed2df 100644 --- a/src/warnet/status.py +++ b/src/warnet/status.py @@ -45,10 +45,11 @@ def status(): table.add_column("Component", style="cyan") table.add_column("Name", style="green") table.add_column("Status", style="yellow") + table.add_column("Namespace", style="green") # Add tanks to the table for tank in tanks: - table.add_row("Tank", tank["name"], tank["status"]) + table.add_row("Tank", tank["name"], tank["status"], tank["namespace"]) # Add a separator if there are both tanks and scenarios if tanks and scenarios: @@ -58,7 +59,7 @@ def status(): active = 0 if scenarios: for scenario in scenarios: - table.add_row("Scenario", scenario["name"], scenario["status"]) + table.add_row("Scenario", scenario["name"], scenario["status"], scenario["namespace"]) if scenario["status"] == "running" or scenario["status"] == "pending": active += 1 else: @@ -86,9 +87,23 @@ def status(): def _get_tank_status(): tanks = get_mission("tank") - return [{"name": tank.metadata.name, "status": tank.status.phase.lower()} for tank in tanks] + return [ + { + "name": tank.metadata.name, + "status": tank.status.phase.lower(), + "namespace": tank.metadata.namespace, + } + for tank in tanks + ] def _get_deployed_scenarios(): commanders = get_mission("commander") - return [{"name": c.metadata.name, "status": c.status.phase.lower()} for c in commanders] + return [ + { + "name": c.metadata.name, + "status": c.status.phase.lower(), + "namespace": c.metadata.namespace, + } + for c in commanders + ] diff --git a/src/warnet/users.py b/src/warnet/users.py index c85e53585..d061f08a9 100644 --- a/src/warnet/users.py +++ b/src/warnet/users.py @@ -1,70 +1,123 @@ +import difflib +import json import os -import subprocess import sys import click -import yaml + +from warnet.constants import KUBECONFIG +from warnet.k8s import K8sError, open_kubeconfig, write_kubeconfig @click.command() -@click.argument("kube_config", type=str) -def auth(kube_config: str) -> None: - """ - Authenticate with a warnet cluster using a kube config file - """ +@click.argument("auth_config", type=str) +def auth(auth_config): + """Authenticate with a Warnet cluster using a kubernetes config file""" try: - current_kubeconfig = os.environ.get("KUBECONFIG", os.path.expanduser("~/.kube/config")) - combined_kubeconfig = ( - f"{current_kubeconfig}:{kube_config}" if current_kubeconfig else kube_config - ) - os.environ["KUBECONFIG"] = combined_kubeconfig - with open(kube_config) as file: - content = yaml.safe_load(file) - user = content["users"][0] - user_name = user["name"] - user_token = user["user"]["token"] - current_context = content["current-context"] - flatten_cmd = "kubectl config view --flatten" - result_flatten = subprocess.run( - flatten_cmd, shell=True, check=True, capture_output=True, text=True - ) - except subprocess.CalledProcessError as e: - click.secho("Error occurred while executing kubectl config view --flatten:", fg="red") - click.secho(e.stderr, fg="red") + auth_config = open_kubeconfig(auth_config) + except K8sError as e: + click.secho(e, fg="yellow") + click.secho(f"Could not open auth_config: {auth_config}", fg="red") sys.exit(1) - if result_flatten.returncode == 0: - with open(current_kubeconfig, "w") as file: - file.write(result_flatten.stdout) - click.secho(f"Authorization file written to: {current_kubeconfig}", fg="green") - else: - click.secho("Could not create authorization file", fg="red") - click.secho(result_flatten.stderr, fg="red") - sys.exit(result_flatten.returncode) + is_first_config = False + if not os.path.exists(KUBECONFIG): + try: + write_kubeconfig(auth_config, KUBECONFIG) + is_first_config = True + except K8sError as e: + click.secho(e, fg="yellow") + click.secho(f"Could not write KUBECONFIG: {KUBECONFIG}", fg="red") + sys.exit(1) try: - update_cmd = f"kubectl config set-credentials {user_name} --token {user_token}" - result_update = subprocess.run( - update_cmd, shell=True, check=True, capture_output=True, text=True - ) - if result_update.returncode != 0: - click.secho("Could not update authorization file", fg="red") - click.secho(result_flatten.stderr, fg="red") - sys.exit(result_flatten.returncode) - except subprocess.CalledProcessError as e: - click.secho("Error occurred while executing kubectl config view --flatten:", fg="red") - click.secho(e.stderr, fg="red") + base_config = open_kubeconfig(KUBECONFIG) + except K8sError as e: + click.secho(e, fg="yellow") + click.secho(f"Could not open KUBECONFIG: {KUBECONFIG}", fg="red") sys.exit(1) - with open(current_kubeconfig) as file: - contents = yaml.safe_load(file) + if not is_first_config: + for category in ["clusters", "users", "contexts"]: + if category in auth_config: + merge_entries(category, base_config, auth_config) - with open(current_kubeconfig, "w") as file: - contents["current-context"] = current_context - yaml.safe_dump(contents, file) + new_current_context = auth_config.get("current-context") + base_config["current-context"] = new_current_context - with open(current_kubeconfig) as file: - contents = yaml.safe_load(file) + # Check if the new current context has an explicit namespace + context_entry = next( + (ctx for ctx in base_config["contexts"] if ctx["name"] == new_current_context), None + ) + if context_entry and "namespace" not in context_entry["context"]: click.secho( - f"\nwarnet's current context is now set to: {contents['current-context']}", fg="green" + f"Warning: The context '{new_current_context}' does not have an explicit namespace.", + fg="yellow", ) + + try: + write_kubeconfig(base_config, KUBECONFIG) + click.secho(f"Updated kubeconfig with authorization data: {KUBECONFIG}", fg="green") + except K8sError as e: + click.secho(e, fg="yellow") + click.secho(f"Could not write KUBECONFIG: {KUBECONFIG}", fg="red") + sys.exit(1) + + try: + base_config = open_kubeconfig(KUBECONFIG) + click.secho( + f"Warnet's current context is now set to: {base_config['current-context']}", fg="green" + ) + except K8sError as e: + click.secho(f"Error reading from {KUBECONFIG}: {e}", fg="red") + sys.exit(1) + + +def merge_entries(category, base_config, auth_config): + name = "name" + base_list = base_config.setdefault(category, []) + auth_list = auth_config[category] + base_entry_names = {entry[name] for entry in base_list} # Extract existing names + for auth_entry in auth_list: + if auth_entry[name] in base_entry_names: + existing_entry = next( + base_entry for base_entry in base_list if base_entry[name] == auth_entry[name] + ) + if existing_entry != auth_entry: + # Show diff between existing and new entry + existing_entry_str = json.dumps(existing_entry, indent=2, sort_keys=True) + auth_entry_str = json.dumps(auth_entry, indent=2, sort_keys=True) + diff = difflib.unified_diff( + existing_entry_str.splitlines(), + auth_entry_str.splitlines(), + fromfile="Existing Entry", + tofile="New Entry", + lineterm="", + ) + click.echo("Differences between existing and new entry:\n") + click.echo("\n".join(diff)) + + if click.confirm( + f"The '{category}' section key '{auth_entry[name]}' already exists and differs. Overwrite?", + default=False, + ): + # Find and replace the existing entry + base_list[:] = [ + base_entry if base_entry[name] != auth_entry[name] else auth_entry + for base_entry in base_list + ] + click.secho( + f"Overwrote '{category}' section key '{auth_entry[name]}'", fg="yellow" + ) + else: + click.secho( + f"Skipped '{category}' section key '{auth_entry[name]}'", fg="yellow" + ) + else: + click.secho( + f"Entry for '{category}' section key '{auth_entry[name]}' is identical. No changes made.", + fg="blue", + ) + else: + base_list.append(auth_entry) + click.secho(f"Added new '{category}' section key '{auth_entry[name]}'", fg="green") diff --git a/test/data/admin/namespaces/two_namespaces_two_users/namespace-defaults.yaml b/test/data/admin/namespaces/two_namespaces_two_users/namespace-defaults.yaml new file mode 100644 index 000000000..75cc8e42c --- /dev/null +++ b/test/data/admin/namespaces/two_namespaces_two_users/namespace-defaults.yaml @@ -0,0 +1,18 @@ +users: + - name: warnet-user + roles: + - pod-viewer + - pod-manager +# the pod-viewer and pod-manager roles are the default +# roles defined in values.yaml for the namespaces charts +# +# if you need a different set of roles for a particular namespaces +# deployment, you can override values.yaml by providing your own +# role definitions below +# +# roles: +# - name: my-custom-role +# rules: +# - apiGroups: "" +# resources: "" +# verbs: "" diff --git a/test/data/admin/namespaces/two_namespaces_two_users/namespaces.yaml b/test/data/admin/namespaces/two_namespaces_two_users/namespaces.yaml new file mode 100644 index 000000000..413d3bcb7 --- /dev/null +++ b/test/data/admin/namespaces/two_namespaces_two_users/namespaces.yaml @@ -0,0 +1,19 @@ +namespaces: + - name: wargames-red-team-warnettest + users: + - name: alice-warnettest + roles: + - pod-viewer + - name: bob-warnettest + roles: + - pod-viewer + - pod-manager + - name: wargames-blue-team-warnettest + users: + - name: mallory-warnettest + roles: + - pod-viewer + - name: carol-warnettest + roles: + - pod-viewer + - pod-manager diff --git a/test/data/admin/networks/6_node_bitcoin/network.yaml b/test/data/admin/networks/6_node_bitcoin/network.yaml new file mode 100644 index 000000000..fed0ef640 --- /dev/null +++ b/test/data/admin/networks/6_node_bitcoin/network.yaml @@ -0,0 +1,34 @@ +nodes: + - name: tank-0001 + image: + tag: "26.0" + connect: + - tank-0002.wargames-red-team-warnettest.svc.cluster.local + - tank-0003.wargames-blue-team-warnettest.svc.cluster.local + - name: tank-0002 + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + connect: + - tank-0003.wargames-red-team-warnettest.svc.cluster.local + - tank-0004.wargames-blue-team-warnettest.svc.cluster.local + - name: tank-0003 + connect: + - tank-0004.wargames-red-team-warnettest.svc.cluster.local + - tank-0005.wargames-blue-team-warnettest.svc.cluster.local + - name: tank-0004 + connect: + - tank-0005.wargames-red-team-warnettest.svc.cluster.local + - tank-0006.wargames-blue-team-warnettest.svc.cluster.local + - name: tank-0005 + connect: + - tank-0006.wargames-red-team-warnettest.svc.cluster.local + - name: tank-0006 +fork_observer: + enabled: false +caddy: + enabled: false diff --git a/test/data/admin/networks/6_node_bitcoin/node-defaults.yaml b/test/data/admin/networks/6_node_bitcoin/node-defaults.yaml new file mode 100644 index 000000000..325405e88 --- /dev/null +++ b/test/data/admin/networks/6_node_bitcoin/node-defaults.yaml @@ -0,0 +1,26 @@ +chain: regtest + +collectLogs: false +metricsExport: false + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "27.0" + +config: | + dns=1 + debug=rpc diff --git a/test/namespace_admin_test.py b/test/namespace_admin_test.py new file mode 100755 index 000000000..c92040e1f --- /dev/null +++ b/test/namespace_admin_test.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python3 + +import os +from pathlib import Path +from typing import Callable, Optional + +from test_base import TestBase + +from warnet.constants import KUBECONFIG, WARGAMES_NAMESPACE_PREFIX +from warnet.k8s import ( + K8sError, + get_kubeconfig_value, + get_static_client, + open_kubeconfig, + write_kubeconfig, +) +from warnet.process import run_command + + +class NamespaceAdminTest(TestBase): + def __init__(self): + super().__init__() + self.namespace_dir = ( + Path(os.path.dirname(__file__)) + / "data" + / "admin" + / "namespaces" + / "two_namespaces_two_users" + ) + self.network_dir = ( + Path(os.path.dirname(__file__)) / "data" / "admin" / "networks" / "6_node_bitcoin" + ) + + def run_test(self): + try: + os.chdir(self.tmpdir) + self.log.info(f"Running test in: {self.tmpdir}") + self.establish_initial_context() + self.establish_names() + self.setup_namespaces() + self.setup_service_accounts() + self.deploy_network_in_team_namespaces() + self.authenticate_and_become_bob() + self.return_to_intial_context() + finally: + try: + self.cleanup_kubeconfig() + except K8sError as e: + self.log.info(f"KUBECONFIG cleanup error: {e}") + self.cleanup() + + def establish_initial_context(self): + self.initial_context = get_kubeconfig_value("{.current-context}") + self.log.info(f"Initial context: {self.initial_context}") + + def establish_names(self): + self.bob_user = "bob-warnettest" + self.bob_auth_file = "bob-warnettest-wargames-red-team-warnettest-kubeconfig" + self.bob_context = "bob-warnettest-wargames-red-team-warnettest" + + self.blue_namespace = "wargames-blue-team-warnettest" + self.red_namespace = "wargames-red-team-warnettest" + self.blue_users = ["carol-warnettest", "default", "mallory-warnettest"] + self.red_users = ["alice-warnettest", self.bob_user, "default"] + + def return_to_intial_context(self): + cmd = f"kubectl config use-context {self.initial_context}" + self.log.info(run_command(cmd)) + self.wait_for_predicate(self.this_is_the_current_context(self.initial_context)) + + def this_is_the_current_context(self, context: str) -> Callable[[], bool]: + cmd = "kubectl config current-context" + current_context = run_command(cmd).strip() + self.log.info(f"Current context: {current_context} {context == current_context}") + return lambda: current_context == context + + def setup_namespaces(self): + self.log.info("Setting up the namespaces") + self.log.info(self.warnet(f"deploy {self.namespace_dir}")) + self.wait_for_predicate(self.two_namespaces_are_validated) + self.log.info("Namespace setup complete") + + def setup_service_accounts(self): + self.log.info("Creating service accounts...") + self.log.info(self.warnet("admin create-kubeconfigs")) + self.wait_for_predicate(self.service_accounts_are_validated) + self.log.info("Service accounts have been set up and validated") + + def deploy_network_in_team_namespaces(self): + self.log.info("Deploy networks to team namespaces") + self.log.info(self.warnet(f"deploy {self.network_dir} --to-all-users")) + self.wait_for_all_tanks_status() + self.log.info("Waiting for all edges") + self.wait_for_all_edges() + + def authenticate_and_become_bob(self): + self.log.info("Authenticating and becoming bob...") + assert get_kubeconfig_value("{.current-context}") == self.initial_context + self.warnet(f"auth kubeconfigs/{self.bob_auth_file}") + assert get_kubeconfig_value("{.current-context}") == self.bob_context + + def service_accounts_are_validated(self) -> bool: + self.log.info("Checking service accounts") + sclient = get_static_client() + namespaces = sclient.list_namespace().items + + filtered_namespaces = [ + ns.metadata.name + for ns in namespaces + if ns.metadata.name.startswith(WARGAMES_NAMESPACE_PREFIX) + ] + assert len(filtered_namespaces) != 0 + + maybe_service_accounts = {} + + for namespace in filtered_namespaces: + service_accounts = sclient.list_namespaced_service_account(namespace=namespace).items + for sa in service_accounts: + maybe_service_accounts.setdefault(namespace, []).append(sa.metadata.name) + + expected = { + self.blue_namespace: self.blue_users, + self.red_namespace: self.red_users, + } + + return maybe_service_accounts == expected + + def get_namespaces(self) -> Optional[list[str]]: + self.log.info("Querying the namespaces...") + resp = self.warnet("admin namespaces list") + if resp == "No warnet namespaces found.": + return None + namespaces = [] + for line in resp.splitlines(): + if line.startswith("- "): + namespaces.append(line.lstrip("- ")) + self.log.info(f"Namespaces: {namespaces}") + return namespaces + + def two_namespaces_are_validated(self) -> bool: + maybe_namespaces = self.get_namespaces() + if maybe_namespaces is None: + return False + if self.blue_namespace not in maybe_namespaces: + return False + return self.red_namespace in maybe_namespaces + + def cleanup_kubeconfig(self): + try: + kubeconfig_data = open_kubeconfig(KUBECONFIG) + except K8sError as e: + raise K8sError(f"Could not open KUBECONFIG: {KUBECONFIG}") from e + + kubeconfig_data = remove_user(kubeconfig_data, self.bob_user) + kubeconfig_data = remove_context(kubeconfig_data, self.bob_context) + + try: + write_kubeconfig(kubeconfig_data, KUBECONFIG) + except Exception as e: + raise K8sError(f"Could not write to KUBECONFIG: {KUBECONFIG}") from e + + +def remove_user(kubeconfig_data: dict, username: str) -> dict: + kubeconfig_data["users"] = [ + user for user in kubeconfig_data["users"] if user["name"] != username + ] + return kubeconfig_data + + +def remove_context(kubeconfig_data: dict, context_name: str) -> dict: + kubeconfig_data["contexts"] = [ + context for context in kubeconfig_data["contexts"] if context["name"] != context_name + ] + return kubeconfig_data + + +if __name__ == "__main__": + test = NamespaceAdminTest() + test.run_test() diff --git a/test/test_base.py b/test/test_base.py index 855e47c51..2b024da64 100644 --- a/test/test_base.py +++ b/test/test_base.py @@ -45,7 +45,7 @@ def cleanup(self, signum=None, frame=None): try: self.log.info("Stopping network") if self.network: - self.warnet("down") + self.warnet("down --force") self.wait_for_all_tanks_status(target="stopped", timeout=60, interval=1) except Exception as e: self.log.error(f"Error bringing network down: {e}") @@ -131,7 +131,7 @@ def check_scenarios(): if len(scns) == 0: return True for s in scns: - exit_status = get_pod_exit_status(s["name"]) + exit_status = get_pod_exit_status(s["name"], s["namespace"]) self.log.debug(f"Scenario {s['name']} exited with code {exit_status}") if exit_status != 0: return False