diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index acac68266..107023725 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -44,11 +44,13 @@ jobs: - dag_connection_test.py - graph_test.py - logging_test.py + - ln_basic_test.py - rpc_test.py - services_test.py - signet_test.py - scenarios_test.py - namespace_admin_test.py + - simln_test.py steps: - uses: actions/checkout@v4 - uses: azure/setup-helm@v4.2.0 diff --git a/resources/charts/bitcoincore/Chart.yaml b/resources/charts/bitcoincore/Chart.yaml index f99064472..4feb6e32e 100644 --- a/resources/charts/bitcoincore/Chart.yaml +++ b/resources/charts/bitcoincore/Chart.yaml @@ -2,6 +2,11 @@ apiVersion: v2 name: bitcoincore description: A Helm chart for Bitcoin Core +dependencies: + - name: lnd + version: 0.1.0 + condition: ln.lnd + # A chart can be either an 'application' or a 'library' chart. # # Application charts are a collection of templates that can be packaged into versioned archives diff --git a/resources/charts/bitcoincore/charts/lnd/Chart.yaml b/resources/charts/bitcoincore/charts/lnd/Chart.yaml new file mode 100644 index 000000000..b77eb714a --- /dev/null +++ b/resources/charts/bitcoincore/charts/lnd/Chart.yaml @@ -0,0 +1,25 @@ +apiVersion: v2 +name: lnd + +description: A Helm chart for LND + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: 0.1.0 diff --git a/resources/charts/bitcoincore/charts/lnd/templates/_helpers.tpl b/resources/charts/bitcoincore/charts/lnd/templates/_helpers.tpl new file mode 100644 index 000000000..de7c0c156 --- /dev/null +++ b/resources/charts/bitcoincore/charts/lnd/templates/_helpers.tpl @@ -0,0 +1,78 @@ +{{/* +Expand the name of the PARENT chart. +*/}} +{{- define "bitcoincore.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified PARENT app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "bitcoincore.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s" .Release.Name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} + + +{{/* +Expand the name of the chart. +*/}} +{{- define "lnd.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}-ln +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "lnd.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s" .Release.Name | trunc 63 | trimSuffix "-" }}-ln +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "lnd.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "lnd.labels" -}} +helm.sh/chart: {{ include "lnd.chart" . }} +{{ include "lnd.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "lnd.selectorLabels" -}} +app.kubernetes.io/name: {{ include "lnd.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "lnd.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "lnd.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/resources/charts/bitcoincore/charts/lnd/templates/configmap.yaml b/resources/charts/bitcoincore/charts/lnd/templates/configmap.yaml new file mode 100644 index 000000000..65cd54cd6 --- /dev/null +++ b/resources/charts/bitcoincore/charts/lnd/templates/configmap.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "lnd.fullname" . }} + labels: + {{- include "lnd.labels" . | nindent 4 }} +data: + lnd.conf: | + {{- .Values.baseConfig | nindent 4 }} + {{- .Values.defaultConfig | nindent 4 }} + {{- .Values.config | nindent 4 }} + bitcoin.{{ .Values.global.chain }}=1 + bitcoind.rpcpass={{ .Values.global.rpcpassword }} + bitcoind.rpchost={{ include "bitcoincore.fullname" . }}:{{ index .Values.global .Values.global.chain "RPCPort" }} + bitcoind.zmqpubrawblock=tcp://{{ include "bitcoincore.fullname" . }}:{{ .Values.global.ZMQBlockPort }} + bitcoind.zmqpubrawtx=tcp://{{ include "bitcoincore.fullname" . }}:{{ .Values.global.ZMQTxPort }} + alias={{ include "lnd.fullname" . }} + externalhosts={{ include "lnd.fullname" . }} + tlsextradomain={{ include "lnd.fullname" . }} + tls.cert: | + -----BEGIN CERTIFICATE----- + MIIB8TCCAZagAwIBAgIUJDsR6mmY+TaO9pCfjtotlbOkzJMwCgYIKoZIzj0EAwIw + MjEfMB0GA1UECgwWbG5kIGF1dG9nZW5lcmF0ZWQgY2VydDEPMA0GA1UEAwwGd2Fy + bmV0MB4XDTI0MTExMTE2NTM1MFoXDTM0MTEwOTE2NTM1MFowMjEfMB0GA1UECgwW + bG5kIGF1dG9nZW5lcmF0ZWQgY2VydDEPMA0GA1UEAwwGd2FybmV0MFkwEwYHKoZI + zj0CAQYIKoZIzj0DAQcDQgAEBVltIvaTlAQI/3FFatTqVflZuZdRJ0SmRMSJrFLP + tp0fxE7hmteSt6gjQriy90fP8j9OJXBNAjt915kLY4zVvqOBiTCBhjAOBgNVHQ8B + Af8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUwAwEB/zAd + BgNVHQ4EFgQU5d8QMrwhLgTkDjWA+eXZGz+dybUwLwYDVR0RBCgwJoIJbG9jYWxo + b3N0ggEqhwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMAoGCCqGSM49BAMCA0kAMEYC + IQDPofN0fEl5gTwCYhk3nZbjMqJhZ8BsSJ6K8XRhxr7zbwIhAPsgQCFOqUWg632O + NEO53OQ6CIqnpxSskjsFNH4ZBQOE + -----END CERTIFICATE----- + tls.key: | + -----BEGIN EC PRIVATE KEY----- + MHcCAQEEIIcFtWTLQv5JaRRxdkPKkO98OrvgeztbZ7h8Ev/4UbE4oAoGCCqGSM49 + AwEHoUQDQgAEBVltIvaTlAQI/3FFatTqVflZuZdRJ0SmRMSJrFLPtp0fxE7hmteS + t6gjQriy90fP8j9OJXBNAjt915kLY4zVvg== + -----END EC PRIVATE KEY----- + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "lnd.fullname" . }}-channels + labels: + channels: "true" + {{- include "lnd.labels" . | nindent 4 }} +data: + source: {{ include "lnd.fullname" . }} + channels: | + {{ .Values.channels | toJson }} diff --git a/resources/charts/bitcoincore/charts/lnd/templates/pod.yaml b/resources/charts/bitcoincore/charts/lnd/templates/pod.yaml new file mode 100644 index 000000000..e3b9782d7 --- /dev/null +++ b/resources/charts/bitcoincore/charts/lnd/templates/pod.yaml @@ -0,0 +1,82 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "lnd.fullname" . }} + labels: + {{- include "lnd.labels" . | nindent 4 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + app: {{ include "lnd.fullname" . }} + {{- if .Values.collectLogs }} + collect_logs: "true" + {{- end }} + chain: {{ .Values.global.chain }} +spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 4 }} + {{- end }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 4 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 8 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: rpc + containerPort: {{ .Values.RPCPort }} + protocol: TCP + - name: p2p + containerPort: {{ .Values.P2PPort }} + protocol: TCP + - name: rest + containerPort: {{ .Values.RestPort }} + protocol: TCP + livenessProbe: + {{- toYaml .Values.livenessProbe | nindent 8 }} + readinessProbe: + {{- toYaml .Values.readinessProbe | nindent 8 }} + startupProbe: + {{- toYaml .Values.startupProbe | nindent 8 }} + resources: + {{- toYaml .Values.resources | nindent 8 }} + volumeMounts: + {{- with .Values.volumeMounts }} + {{- toYaml . | nindent 8 }} + {{- end }} + - mountPath: /root/.lnd/lnd.conf + name: config + subPath: lnd.conf + - mountPath: /root/.lnd/tls.key + name: config + subPath: tls.key + - mountPath: /root/.lnd/tls.cert + name: config + subPath: tls.cert + {{- if .Values.circuitBreaker }} + - name: circuitbreaker + image: pinheadmz/circuitbreaker:278737d + imagePullPolicy: IfNotPresent + {{- end}} + volumes: + {{- with .Values.volumes }} + {{- toYaml . | nindent 4 }} + {{- end }} + - configMap: + name: {{ include "lnd.fullname" . }} + name: config + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 4 }} + {{- end }} diff --git a/resources/charts/bitcoincore/charts/lnd/templates/service.yaml b/resources/charts/bitcoincore/charts/lnd/templates/service.yaml new file mode 100644 index 000000000..51826ee9b --- /dev/null +++ b/resources/charts/bitcoincore/charts/lnd/templates/service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "lnd.fullname" . }} + labels: + {{- include "lnd.labels" . | nindent 4 }} + app: {{ include "lnd.fullname" . }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.RPCPort }} + targetPort: rpc + protocol: TCP + name: rpc + - port: {{ .Values.P2PPort }} + targetPort: p2p + protocol: TCP + name: p2p + - port: {{ .Values.RestPort }} + targetPort: rest + protocol: TCP + name: rest + selector: + {{- include "lnd.selectorLabels" . | nindent 4 }} diff --git a/resources/charts/bitcoincore/charts/lnd/values.yaml b/resources/charts/bitcoincore/charts/lnd/values.yaml new file mode 100644 index 000000000..d56e65bf4 --- /dev/null +++ b/resources/charts/bitcoincore/charts/lnd/values.yaml @@ -0,0 +1,134 @@ +# Default values for lnd. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +namespace: warnet + +image: + repository: lightninglabs/lnd + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "v0.18.3-beta" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +podLabels: + app: "warnet" + mission: "lightning" + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + +RPCPort: 10009 +P2PPort: 9735 +RestPort: 8080 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + +livenessProbe: + exec: + command: + - pidof + - lnd + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 +readinessProbe: + failureThreshold: 1 + periodSeconds: 1 + successThreshold: 1 + tcpSocket: + port: 10009 + timeoutSeconds: 1 +startupProbe: + failureThreshold: 10 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 60 + exec: + command: + - /bin/sh + - -c + - | + PHRASE=`curl --silent --insecure https://localhost:8080/v1/genseed | grep -o '\[[^]]*\]'` + curl --insecure https://localhost:8080/v1/initwallet --data "{\"macaroon_root_key\":\"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=\", \"wallet_password\":\"AAAAAAAAAAA=\", \"cipher_seed_mnemonic\": $PHRASE}" + +# Additional volumes on the output Deployment definition. +volumes: [] +# - name: foo +# secret: +# secretName: mysecret +# optional: false + +# Additional volumeMounts on the output Deployment definition. +volumeMounts: [] +# - name: foo +# mountPath: "/etc/foo" +# readOnly: true + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +baseConfig: | + norest=false + restlisten=0.0.0.0:8080 + debuglevel=debug + accept-keysend=true + bitcoin.active=true + bitcoin.node=bitcoind + maxpendingchannels=64 + trickledelay=1 + rpclisten=0.0.0.0:10009 + bitcoind.rpcuser=user + protocol.wumbo-channels=1 + # zmq* and bitcoind.rpcpass are set in configmap.yaml + +config: "" + +defaultConfig: "" + +channels: [] diff --git a/resources/charts/bitcoincore/templates/_helpers.tpl b/resources/charts/bitcoincore/templates/_helpers.tpl index 26258b5de..81ab85a37 100644 --- a/resources/charts/bitcoincore/templates/_helpers.tpl +++ b/resources/charts/bitcoincore/templates/_helpers.tpl @@ -65,6 +65,6 @@ Always add for custom semver, check version for valid semver {{- $custom := contains "-" .Values.image.tag -}} {{- $newer := semverCompare ">=0.17.0" .Values.image.tag -}} {{- if or $newer $custom -}} -[{{ .Values.chain }}] +[{{ .Values.global.chain }}] {{- end -}} {{- end -}} diff --git a/resources/charts/bitcoincore/templates/configmap.yaml b/resources/charts/bitcoincore/templates/configmap.yaml index 36c5ab389..cc1e580f2 100644 --- a/resources/charts/bitcoincore/templates/configmap.yaml +++ b/resources/charts/bitcoincore/templates/configmap.yaml @@ -6,14 +6,14 @@ metadata: {{- include "bitcoincore.labels" . | nindent 4 }} data: bitcoin.conf: | - {{ .Values.chain }}=1 + {{ .Values.global.chain }}=1 {{ template "bitcoincore.check_semver" . }} {{- .Values.baseConfig | nindent 4 }} - rpcport={{ index .Values .Values.chain "RPCPort" }} - rpcpassword={{ .Values.rpcpassword }} - zmqpubrawblock=tcp://0.0.0.0:{{ .Values.ZMQBlockPort }} - zmqpubrawtx=tcp://0.0.0.0:{{ .Values.ZMQTxPort }} + rpcport={{ index .Values.global .Values.global.chain "RPCPort" }} + rpcpassword={{ .Values.global.rpcpassword }} + zmqpubrawblock=tcp://0.0.0.0:{{ .Values.global.ZMQBlockPort }} + zmqpubrawtx=tcp://0.0.0.0:{{ .Values.global.ZMQTxPort }} {{- .Values.defaultConfig | nindent 4 }} {{- .Values.config | nindent 4 }} {{- range .Values.addnode }} diff --git a/resources/charts/bitcoincore/templates/pod.yaml b/resources/charts/bitcoincore/templates/pod.yaml index d7076e6e9..56cd61958 100644 --- a/resources/charts/bitcoincore/templates/pod.yaml +++ b/resources/charts/bitcoincore/templates/pod.yaml @@ -7,9 +7,11 @@ metadata: {{- with .Values.podLabels }} {{- toYaml . | nindent 4 }} {{- end }} - chain: {{ .Values.chain }} - RPCPort: "{{ index .Values .Values.chain "RPCPort" }}" - rpcpassword: {{ .Values.rpcpassword }} + chain: {{ .Values.global.chain }} + RPCPort: "{{ index .Values.global .Values.global.chain "RPCPort" }}" + ZMQTxPort: "{{ .Values.global.ZMQTxPort }}" + ZMQBlockPort: "{{ .Values.global.ZMQBlockPort }}" + rpcpassword: {{ .Values.global.rpcpassword }} app: {{ include "bitcoincore.fullname" . }} {{- if .Values.collectLogs }} collect_logs: "true" @@ -32,8 +34,8 @@ spec: args: - | apk add --no-cache curl - mkdir -p /root/.bitcoin/{{ .Values.chain }} - curl -L {{ .Values.loadSnapshot.url }} | tar -xz -C /root/.bitcoin/{{ .Values.chain }} + mkdir -p /root/.bitcoin/{{ .Values.global.chain }} + curl -L {{ .Values.loadSnapshot.url }} | tar -xz -C /root/.bitcoin/{{ .Values.global.chain }} volumeMounts: - name: data mountPath: /root/.bitcoin @@ -46,23 +48,23 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - name: rpc - containerPort: {{ index .Values .Values.chain "RPCPort" }} + containerPort: {{ index .Values.global .Values.global.chain "RPCPort" }} protocol: TCP - name: p2p - containerPort: {{ index .Values .Values.chain "P2PPort" }} + containerPort: {{ index .Values.global .Values.global.chain "P2PPort" }} protocol: TCP - name: zmq-tx - containerPort: {{ .Values.ZMQTxPort }} + containerPort: {{ .Values.global.ZMQTxPort }} protocol: TCP - name: zmq-block - containerPort: {{ .Values.ZMQBlockPort }} + containerPort: {{ .Values.global.ZMQBlockPort }} protocol: TCP livenessProbe: {{- toYaml .Values.livenessProbe | nindent 8 }} readinessProbe: {{- toYaml .Values.readinessProbe | nindent 8 }} tcpSocket: - port: {{ index .Values .Values.chain "RPCPort" }} + port: {{ index .Values.global .Values.global.chain "RPCPort" }} resources: {{- toYaml .Values.resources | nindent 8 }} volumeMounts: @@ -86,11 +88,11 @@ spec: - name: BITCOIN_RPC_HOST value: "127.0.0.1" - name: BITCOIN_RPC_PORT - value: "{{ index .Values .Values.chain "RPCPort" }}" + value: "{{ index .Values.global .Values.global.chain "RPCPort" }}" - name: BITCOIN_RPC_USER value: user - name: BITCOIN_RPC_PASSWORD - value: {{ .Values.rpcpassword }} + value: {{ .Values.global.rpcpassword }} {{- if .Values.metrics }} - name: METRICS value: {{ .Values.metrics }} diff --git a/resources/charts/bitcoincore/templates/service.yaml b/resources/charts/bitcoincore/templates/service.yaml index f37c384ef..8d8fa5324 100644 --- a/resources/charts/bitcoincore/templates/service.yaml +++ b/resources/charts/bitcoincore/templates/service.yaml @@ -8,19 +8,19 @@ metadata: spec: type: {{ .Values.service.type }} ports: - - port: {{ index .Values .Values.chain "RPCPort" }} + - port: {{ index .Values.global .Values.global.chain "RPCPort" }} targetPort: rpc protocol: TCP name: rpc - - port: {{ index .Values .Values.chain "P2PPort" }} + - port: {{ index .Values.global .Values.global.chain "P2PPort" }} targetPort: p2p protocol: TCP name: p2p - - port: {{ .Values.ZMQTxPort }} + - port: {{ .Values.global.ZMQTxPort }} targetPort: zmq-tx protocol: TCP name: zmq-tx - - port: {{ .Values.ZMQBlockPort }} + - port: {{ .Values.global.ZMQBlockPort }} targetPort: zmq-block protocol: TCP name: zmq-block diff --git a/resources/charts/bitcoincore/values.yaml b/resources/charts/bitcoincore/values.yaml index 6314ae32c..8c9f3215f 100644 --- a/resources/charts/bitcoincore/values.yaml +++ b/resources/charts/bitcoincore/values.yaml @@ -33,17 +33,6 @@ securityContext: {} service: type: ClusterIP -regtest: - RPCPort: 18443 - P2PPort: 18444 - -signet: - RPCPort: 38332 - P2PPort: 38333 - -ZMQTxPort: 28333 -ZMQBlockPort: 28332 - ingress: enabled: false className: "" @@ -109,12 +98,23 @@ tolerations: [] affinity: {} -chain: regtest - collectLogs: false metricsExport: false prometheusMetricsPort: 9332 +# These are values that are propogated to the sub-charts (i.e. lightning nodes) +global: + chain: regtest + regtest: + RPCPort: 18443 + P2PPort: 18444 + signet: + RPCPort: 38332 + P2PPort: 38333 + ZMQTxPort: 28333 + ZMQBlockPort: 28332 + rpcpassword: gn0cchi + baseConfig: | checkmempool=0 debuglogfile=debug.log @@ -130,8 +130,6 @@ baseConfig: | rest=1 # rpcport and zmq endpoints are configured by chain in configmap.yaml -rpcpassword: gn0cchi - config: "" defaultConfig: "" @@ -141,3 +139,6 @@ addnode: [] loadSnapshot: enabled: false url: "" + +ln: + lnd: false \ No newline at end of file diff --git a/resources/charts/commander/templates/pod.yaml b/resources/charts/commander/templates/pod.yaml index 1a9bb9310..0ad4583e1 100644 --- a/resources/charts/commander/templates/pod.yaml +++ b/resources/charts/commander/templates/pod.yaml @@ -23,7 +23,7 @@ spec: mountPath: /shared containers: - name: {{ .Chart.Name }} - image: python:3.12-slim + image: bitcoindevproject/commander imagePullPolicy: IfNotPresent command: ["/bin/sh", "-c"] args: @@ -35,3 +35,4 @@ spec: volumes: - name: shared-volume emptyDir: {} + serviceAccountName: {{ include "commander.fullname" . }} \ No newline at end of file diff --git a/resources/charts/commander/templates/rbac.yaml b/resources/charts/commander/templates/rbac.yaml new file mode 100644 index 000000000..7708328f3 --- /dev/null +++ b/resources/charts/commander/templates/rbac.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "commander.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ .Chart.Name }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "commander.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ .Chart.Name }} +rules: + - apiGroups: [""] + resources: ["pods", "configmaps"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "commander.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/name: {{ .Chart.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "commander.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "commander.fullname" . }} + namespace: {{ .Release.Namespace }} diff --git a/resources/charts/namespaces/values.yaml b/resources/charts/namespaces/values.yaml index 23ef66754..b68480705 100644 --- a/resources/charts/namespaces/values.yaml +++ b/resources/charts/namespaces/values.yaml @@ -7,13 +7,13 @@ roles: - name: pod-viewer rules: - apiGroups: [""] - resources: ["pods", "services"] + resources: ["pods", "services", "configmaps"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] verbs: ["get"] - apiGroups: [""] - resources: ["configmaps", "secrets"] + resources: ["secrets"] verbs: ["get", "list"] - apiGroups: [""] resources: ["persistentvolumeclaims", "namespaces"] @@ -33,7 +33,10 @@ roles: resources: ["pods/log", "pods/exec", "pods/attach", "pods/portforward"] verbs: ["get", "create"] - apiGroups: [""] - resources: ["configmaps", "secrets"] + resources: ["configmaps", "secrets", "serviceaccounts"] + verbs: ["get", "list", "create", "update", "watch"] + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings"] verbs: ["get", "list", "create", "update"] - apiGroups: [""] resources: ["persistentvolumeclaims", "namespaces"] diff --git a/resources/images/commander/Dockerfile b/resources/images/commander/Dockerfile new file mode 100644 index 000000000..4a4744717 --- /dev/null +++ b/resources/images/commander/Dockerfile @@ -0,0 +1,5 @@ +# Use an official Python runtime as the base image +FROM python:3.12-slim + +# Python dependencies +RUN pip install --no-cache-dir kubernetes diff --git a/resources/plugins/__init__.py b/resources/plugins/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/resources/plugins/ark/ark.py b/resources/plugins/ark/ark.py new file mode 100644 index 000000000..713d61fa7 --- /dev/null +++ b/resources/plugins/ark/ark.py @@ -0,0 +1,21 @@ +def show_build_instructions(): + print("bark is an ark implementation that lives here: https://codeberg.org/ark-bitcoin/bark") + print("warnet init") + print("cd plugins/ark") + print("git clone git@codeberg.org:mpls/bark.git") + print("cd bark") + print("cargo build --workspace --release") + print("barkhead=$(git rev-parse --short HEAD)") + print("cd ../") + print("cp bark/target/release/bark dockerfiles/bark/") + print("cp bark/target/release/aspd dockerfiles/aspd/") + print("cd dockerfiles/bark") + print("docker build -t mplsgrant/bark:$barkhead .") + print("docker login") + print("docker push mplsgrant/bark:barkhead") + print("cd ../aspd") + print("docker build -t mplsgrant/aspd:$barkhead .") + print("docker push mplsgrant/aspd:$barkhead") + print( + "Don' forget to update the 'tag' section of the values.yaml file for aspd and bark with the value in $barkhead" + ) diff --git a/resources/plugins/ark/charts/aspd/.helmignore b/resources/plugins/ark/charts/aspd/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/resources/plugins/ark/charts/aspd/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/resources/plugins/ark/charts/aspd/Chart.yaml b/resources/plugins/ark/charts/aspd/Chart.yaml new file mode 100644 index 000000000..ec1dfaf3d --- /dev/null +++ b/resources/plugins/ark/charts/aspd/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: aspd +description: A Helm chart to deploy aspd +version: 0.1.0 +appVersion: "alpha-git-tag" diff --git a/resources/plugins/ark/charts/aspd/templates/NOTES.txt b/resources/plugins/ark/charts/aspd/templates/NOTES.txt new file mode 100644 index 000000000..d61b4f802 --- /dev/null +++ b/resources/plugins/ark/charts/aspd/templates/NOTES.txt @@ -0,0 +1 @@ +Thank you for installing aspd. diff --git a/resources/plugins/ark/charts/aspd/templates/_helpers.tpl b/resources/plugins/ark/charts/aspd/templates/_helpers.tpl new file mode 100644 index 000000000..a699083e5 --- /dev/null +++ b/resources/plugins/ark/charts/aspd/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "mychart.name" -}} +{{- .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "mychart.fullname" -}} +{{- printf "%s-%s" (include "mychart.name" .) .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/resources/plugins/ark/charts/aspd/templates/deployment.yaml b/resources/plugins/ark/charts/aspd/templates/deployment.yaml new file mode 100644 index 000000000..ae7432877 --- /dev/null +++ b/resources/plugins/ark/charts/aspd/templates/deployment.yaml @@ -0,0 +1,52 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-{{ .Values.name }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ .Release.Name }}-{{ .Values.name }} + template: + metadata: + labels: + app: {{ .Release.Name }}-{{ .Values.name }} + spec: + containers: + - name: {{ .Values.name }}-main + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + command: ["aspd", "start", "--datadir", "{{ .Values.datadir }}"] + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.datadir }}" + env: + - name: BITCOIND_URL + value: "{{ .Values.bitcoind.url }}" + - name: BITCOIND_COOKIE + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-bitcoind + key: cookie + initContainers: + - name: {{ .Values.name }}-setup + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + command: + [ + "aspd", "create", + "--network", "{{ .Values.network }}", + "--datadir", "{{ .Values.datadir }}", + "--bitcoind-url", "{{ .Values.bitcoind.url }}", + "--bitcoind-cookie", "$(BITCOIND_COOKIE)" + ] + env: + - name: BITCOIND_COOKIE + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-bitcoind + key: cookie + volumeMounts: + - name: data-volume + mountPath: "{{ .Values.datadir }}" + volumes: + - name: data-volume + emptyDir: {} diff --git a/resources/plugins/ark/charts/aspd/values.yaml b/resources/plugins/ark/charts/aspd/values.yaml new file mode 100644 index 000000000..0f9fa9dae --- /dev/null +++ b/resources/plugins/ark/charts/aspd/values.yaml @@ -0,0 +1,17 @@ +name: "aspd" + +image: + repository: "mplsgrant/aspd" + tag: "af39ec4" + pullPolicy: IfNotPresent + +network: regtest +datadir: /data/arkdatadir + +bitcoind: + url: http://bitcoind-url:port + cookie: bitcoind-cookie + +service: + type: ClusterIP + port: 3535 diff --git a/resources/plugins/ark/charts/bark/.helmignore b/resources/plugins/ark/charts/bark/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/resources/plugins/ark/charts/bark/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/resources/plugins/ark/charts/bark/Chart.yaml b/resources/plugins/ark/charts/bark/Chart.yaml new file mode 100644 index 000000000..f190ce343 --- /dev/null +++ b/resources/plugins/ark/charts/bark/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: bark +description: A Helm chart to deploy bark +version: 0.1.0 +appVersion: alpha-git-tag diff --git a/resources/plugins/ark/charts/bark/templates/NOTES.txt b/resources/plugins/ark/charts/bark/templates/NOTES.txt new file mode 100644 index 000000000..435c9455b --- /dev/null +++ b/resources/plugins/ark/charts/bark/templates/NOTES.txt @@ -0,0 +1 @@ +Thank you for installing bark. diff --git a/resources/plugins/ark/charts/bark/templates/_helpers.tpl b/resources/plugins/ark/charts/bark/templates/_helpers.tpl new file mode 100644 index 000000000..a699083e5 --- /dev/null +++ b/resources/plugins/ark/charts/bark/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "mychart.name" -}} +{{- .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "mychart.fullname" -}} +{{- printf "%s-%s" (include "mychart.name" .) .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/resources/plugins/ark/charts/bark/templates/pod.yaml b/resources/plugins/ark/charts/bark/templates/pod.yaml new file mode 100644 index 000000000..80f99e435 --- /dev/null +++ b/resources/plugins/ark/charts/bark/templates/pod.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "mychart.fullname" . }} + labels: + app: {{ include "mychart.name" . }} + mission: plugin +spec: + containers: + - name: {{ .Values.name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: {{ .Values.command | toJson }} + args: {{ .Values.args | toJson }} diff --git a/resources/plugins/ark/charts/bark/values.yaml b/resources/plugins/ark/charts/bark/values.yaml new file mode 100644 index 000000000..a5f5570fe --- /dev/null +++ b/resources/plugins/ark/charts/bark/values.yaml @@ -0,0 +1,7 @@ +name: "bark" +image: + repository: "mplsgrant/bark" + tag: "af39ec4" + pullPolicy: IfNotPresent +command: ["sh", "-c"] +args: ["while true; do sleep 3600; done"] diff --git a/resources/plugins/ark/dockerfiles/aspd/Dockerfile b/resources/plugins/ark/dockerfiles/aspd/Dockerfile new file mode 100644 index 000000000..0e2486fe3 --- /dev/null +++ b/resources/plugins/ark/dockerfiles/aspd/Dockerfile @@ -0,0 +1,9 @@ +FROM debian:bookworm-slim + +RUN apt-get update && apt-get install -y jq && rm -rf /var/lib/apt/lists/* + +COPY aspd /usr/local/bin/aspd + +RUN chmod +x /usr/local/bin/aspd + +ENTRYPOINT ["/usr/local/bin/aspd"] diff --git a/resources/plugins/ark/dockerfiles/bark/Dockerfile b/resources/plugins/ark/dockerfiles/bark/Dockerfile new file mode 100644 index 000000000..8160bc3f1 --- /dev/null +++ b/resources/plugins/ark/dockerfiles/bark/Dockerfile @@ -0,0 +1,9 @@ +FROM debian:bookworm-slim + +RUN apt-get update && apt-get install -y jq && rm -rf /var/lib/apt/lists/* + +COPY bark /usr/local/bin/bark + +RUN chmod +x /usr/local/bin/bark + +ENTRYPOINT ["/usr/local/bin/bark"] diff --git a/resources/plugins/ark/plugin.yaml b/resources/plugins/ark/plugin.yaml new file mode 100644 index 000000000..844e6b7ac --- /dev/null +++ b/resources/plugins/ark/plugin.yaml @@ -0,0 +1,2 @@ +enabled: false + diff --git a/resources/plugins/simln/charts/simln/.helmignore b/resources/plugins/simln/charts/simln/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/resources/plugins/simln/charts/simln/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/resources/plugins/simln/charts/simln/Chart.yaml b/resources/plugins/simln/charts/simln/Chart.yaml new file mode 100644 index 000000000..92f904620 --- /dev/null +++ b/resources/plugins/simln/charts/simln/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +name: simln +description: A Helm chart to deploy simln +version: 0.1.0 +appVersion: "0.1.0" diff --git a/resources/plugins/simln/charts/simln/files/sim.json b/resources/plugins/simln/charts/simln/files/sim.json new file mode 100644 index 000000000..a72bd29e3 --- /dev/null +++ b/resources/plugins/simln/charts/simln/files/sim.json @@ -0,0 +1,16 @@ +{ + "nodes": [ + { + "id": "tank-0000-ln", + "address": "https://tank-0004-ln:10009", + "macaroon": "/working/admin.macaroon", + "cert": "/working/tls.cert" + }, + { + "id": "tank-0001-ln", + "address": "https://tank-0005-ln:10009", + "macaroon": "/working/admin.macaroon", + "cert": "/working/tls.cert" + } + ] +} diff --git a/resources/plugins/simln/charts/simln/templates/NOTES.txt b/resources/plugins/simln/charts/simln/templates/NOTES.txt new file mode 100644 index 000000000..2d8319bde --- /dev/null +++ b/resources/plugins/simln/charts/simln/templates/NOTES.txt @@ -0,0 +1 @@ +Thank you for installing simln. diff --git a/resources/plugins/simln/charts/simln/templates/_helpers.tpl b/resources/plugins/simln/charts/simln/templates/_helpers.tpl new file mode 100644 index 000000000..a699083e5 --- /dev/null +++ b/resources/plugins/simln/charts/simln/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "mychart.name" -}} +{{- .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "mychart.fullname" -}} +{{- printf "%s-%s" (include "mychart.name" .) .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/resources/plugins/simln/charts/simln/templates/configmap.yaml b/resources/plugins/simln/charts/simln/templates/configmap.yaml new file mode 100644 index 000000000..ecfb3428d --- /dev/null +++ b/resources/plugins/simln/charts/simln/templates/configmap.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "mychart.fullname" . }}-data +data: + sim.json: | + {{ .Files.Get "files/sim.json" | nindent 4 }} + tls.cert: | + -----BEGIN CERTIFICATE----- + MIIB8TCCAZagAwIBAgIUJDsR6mmY+TaO9pCfjtotlbOkzJMwCgYIKoZIzj0EAwIw + MjEfMB0GA1UECgwWbG5kIGF1dG9nZW5lcmF0ZWQgY2VydDEPMA0GA1UEAwwGd2Fy + bmV0MB4XDTI0MTExMTE2NTM1MFoXDTM0MTEwOTE2NTM1MFowMjEfMB0GA1UECgwW + bG5kIGF1dG9nZW5lcmF0ZWQgY2VydDEPMA0GA1UEAwwGd2FybmV0MFkwEwYHKoZI + zj0CAQYIKoZIzj0DAQcDQgAEBVltIvaTlAQI/3FFatTqVflZuZdRJ0SmRMSJrFLP + tp0fxE7hmteSt6gjQriy90fP8j9OJXBNAjt915kLY4zVvqOBiTCBhjAOBgNVHQ8B + Af8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUwAwEB/zAd + BgNVHQ4EFgQU5d8QMrwhLgTkDjWA+eXZGz+dybUwLwYDVR0RBCgwJoIJbG9jYWxo + b3N0ggEqhwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMAoGCCqGSM49BAMCA0kAMEYC + IQDPofN0fEl5gTwCYhk3nZbjMqJhZ8BsSJ6K8XRhxr7zbwIhAPsgQCFOqUWg632O + NEO53OQ6CIqnpxSskjsFNH4ZBQOE + -----END CERTIFICATE----- + tls.key: | + -----BEGIN EC PRIVATE KEY----- + MHcCAQEEIIcFtWTLQv5JaRRxdkPKkO98OrvgeztbZ7h8Ev/4UbE4oAoGCCqGSM49 + AwEHoUQDQgAEBVltIvaTlAQI/3FFatTqVflZuZdRJ0SmRMSJrFLPtp0fxE7hmteS + t6gjQriy90fP8j9OJXBNAjt915kLY4zVvg== + -----END EC PRIVATE KEY----- + admin.macaroon.hex: | + 0201036c6e6402f801030a1062beabbf2a614b112128afa0c0b4fdd61201301a160a0761646472657373120472656164120577726974651a130a04696e666f120472656164120577726974651a170a08696e766f69636573120472656164120577726974651a210a086d616361726f6f6e120867656e6572617465120472656164120577726974651a160a076d657373616765120472656164120577726974651a170a086f6666636861696e120472656164120577726974651a160a076f6e636861696e120472656164120577726974651a140a057065657273120472656164120577726974651a180a067369676e6572120867656e657261746512047265616400000620b17be53e367290871681055d0de15587f6d1cd47d1248fe2662ae27f62cfbdc6 diff --git a/resources/plugins/simln/charts/simln/templates/pod.yaml b/resources/plugins/simln/charts/simln/templates/pod.yaml new file mode 100644 index 000000000..c933769cc --- /dev/null +++ b/resources/plugins/simln/charts/simln/templates/pod.yaml @@ -0,0 +1,46 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "mychart.fullname" . }} + labels: + app: {{ include "mychart.name" . }} + mission: {{ .Values.name }} +spec: + initContainers: + - name: "init-container" + image: "busybox" + command: + - "sh" + - "-c" + args: + - > + cp /configmap/* /working; + cd /working; + cat admin.macaroon.hex | xxd -r -p > admin.macaroon + volumeMounts: + - name: {{ .Values.workingVolume.name }} + mountPath: {{ .Values.workingVolume.mountPath }} + - name: {{ .Values.configmapVolume.name }} + mountPath: {{ .Values.configmapVolume.mountPath }} + containers: + - name: {{ .Values.name }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - "sh" + - "-c" + args: + - > + cd /working; + sim-cli + volumeMounts: + - name: {{ .Values.workingVolume.name }} + mountPath: {{ .Values.workingVolume.mountPath }} + - name: {{ .Values.configmapVolume.name }} + mountPath: {{ .Values.configmapVolume.mountPath }} + volumes: + - name: {{ .Values.configmapVolume.name }} + configMap: + name: {{ include "mychart.fullname" . }}-data + - name: {{ .Values.workingVolume.name }} + emptyDir: {} diff --git a/resources/plugins/simln/charts/simln/values.yaml b/resources/plugins/simln/charts/simln/values.yaml new file mode 100644 index 000000000..838f7a542 --- /dev/null +++ b/resources/plugins/simln/charts/simln/values.yaml @@ -0,0 +1,14 @@ +name: "simln" +image: + repository: "mplsgrant/sim-ln" + tag: "d8c165d" + pullPolicy: IfNotPresent + +workingVolume: + name: working-volume + mountPath: /working +configmapVolume: + name: configmap-volume + mountPath: /configmap + +defaultDataDir: /app/data diff --git a/resources/plugins/simln/plugin.yaml b/resources/plugins/simln/plugin.yaml new file mode 100644 index 000000000..d4ca94189 --- /dev/null +++ b/resources/plugins/simln/plugin.yaml @@ -0,0 +1 @@ +enabled: true diff --git a/resources/plugins/simln/simln.py b/resources/plugins/simln/simln.py new file mode 100644 index 000000000..c57b76e38 --- /dev/null +++ b/resources/plugins/simln/simln.py @@ -0,0 +1,354 @@ +import json +import logging +import random +from pathlib import Path +from subprocess import run +from time import sleep + +import click +from kubernetes.stream import stream + +from warnet.k8s import ( + download, + get_default_namespace, + get_pods_with_label, + get_static_client, + wait_for_pod, +) +from warnet.plugins import _get_plugins_directory as get_plugin_directory +from warnet.process import run_command +from warnet.status import _get_tank_status as network_status + +log = logging.getLogger("simln") +log.setLevel(logging.DEBUG) +console_handler = logging.StreamHandler() +console_handler.setLevel(logging.DEBUG) +formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") +console_handler.setFormatter(formatter) +log.addHandler(console_handler) + +LIGHTNING_SELECTOR = "mission=lightning" + + +@click.group() +def simln(): + """Commands for the SimLN plugin""" + pass + + +def warnet_register_plugin(register_command): + register_command(simln) + + +class SimLNError(Exception): + pass + + +@simln.command() +def run_demo(): + """Run the SimLN Plugin demo""" + _init_network() + _fund_wallets() + _wait_for_everyone_to_have_a_host() + log.info(warnet("bitcoin rpc tank-0000 -generate 7")) + # warnet("ln open-all-channels") + manual_open_channels() + log.info(warnet("bitcoin rpc tank-0000 -generate 7")) + wait_for_gossip_sync(2) + log.info("done waiting") + pod_name = prepare_and_launch_activity() + log.info(pod_name) + wait_for_pod(pod_name, 60) + + +@simln.command() +def list_simln_podnames(): + """Get a list of simln pod names""" + print([pod.metadata.name for pod in get_pods_with_label("mission=simln")]) + + +@simln.command() +def download_results(pod_name: str): + """Download SimLN results to the current directory""" + print(download(pod_name, source_path=Path("/working/results"))) + + +def prepare_and_launch_activity() -> str: + sample_activity = _get_example_activity() + log.info(f"Activity: {sample_activity}") + pod_name = _launch_activity(sample_activity) + log.info("Sent command. Done.") + return pod_name + + +def _get_example_activity() -> list[dict]: + pods = get_pods_with_label(LIGHTNING_SELECTOR) + try: + pod_a = pods[1].metadata.name + pod_b = pods[2].metadata.name + except Exception as err: + raise SimLNError( + "Could not access the lightning nodes needed for the example.\n Try deploying some." + ) from err + return [{"source": pod_a, "destination": pod_b, "interval_secs": 1, "amount_msat": 2000}] + + +@simln.command() +def get_example_activity(): + """Get an activity representing node 2 sending msat to node 3""" + print(_get_example_activity()) + + +def _launch_activity(activity: list[dict]) -> str: + """Launch a SimLN chart which includes the `activity`""" + random_digits = "".join(random.choices("0123456789", k=10)) + plugin_dir = get_plugin_directory() + _generate_nodes_file(activity, plugin_dir / Path("simln/charts/simln/files/sim.json")) + command = f"helm upgrade --install simln-{random_digits} {plugin_dir}/simln/charts/simln" + log.info(f"generate activity: {command}") + run_command(command) + return f"simln-simln-{random_digits}" + + +@simln.command() +@click.argument("activity", type=str) +def launch_activity(activity: str): + """Takes a SimLN Activity which is a JSON list of objects.""" + parsed_activity = json.loads(activity) + print(_launch_activity(parsed_activity)) + + +def _init_network(): + """Mine regtest coins and wait for ln nodes to come online.""" + log.info("Initializing network") + wait_for_all_tanks_status(target="running") + + warnet("bitcoin rpc tank-0000 createwallet miner") + warnet("bitcoin rpc tank-0000 -generate 110") + _wait_for_predicate(lambda: int(warnet("bitcoin rpc tank-0000 getblockcount")) > 100) + + def wait_for_all_ln_rpc(): + lns = get_pods_with_label(LIGHTNING_SELECTOR) + for v1_pod in lns: + ln = v1_pod.metadata.name + try: + warnet(f"ln rpc {ln} getinfo") + except Exception: + log.info(f"LN node {ln} not ready for rpc yet") + return False + return True + + _wait_for_predicate(wait_for_all_ln_rpc) + + +@simln.command() +def init_network(): + _init_network() + + +def _fund_wallets(): + """Fund each ln node with 10 regtest coins.""" + log.info("Funding wallets") + outputs = "" + lns = get_pods_with_label(LIGHTNING_SELECTOR) + for v1_pod in lns: + lnd = v1_pod.metadata.name + addr = json.loads(warnet(f"ln rpc {lnd} newaddress p2wkh"))["address"] + outputs += f',"{addr}":10' + # trim first comma + outputs = outputs[1:] + log.info(warnet("bitcoin rpc tank-0000 sendmany '' '{" + outputs + "}'")) + log.info(warnet("bitcoin rpc tank-0000 -generate 1")) + + +@simln.command() +def fund_wallets(): + """Fund each ln node with 10 regtest coins.""" + _fund_wallets() + + +def _everyone_has_a_host() -> bool: + """Find out if each ln node has a host.""" + pods = get_pods_with_label(LIGHTNING_SELECTOR) + host_havers = 0 + for pod in pods: + name = pod.metadata.name + result = warnet(f"ln host {name}") + if len(result) > 1: + host_havers += 1 + return host_havers == len(pods) and host_havers != 0 + + +@simln.command() +def wait_for_everyone_to_have_a_host(): + log.info(_wait_for_everyone_to_have_a_host()) + + +def _wait_for_everyone_to_have_a_host(): + _wait_for_predicate(_everyone_has_a_host, timeout=10 * 60) + + +def _wait_for_predicate(predicate, timeout=5 * 60, interval=5): + log.info( + f"Waiting for predicate ({predicate.__name__}) with timeout {timeout}s and interval {interval}s" + ) + while timeout > 0: + try: + if predicate(): + return + except Exception: + pass + sleep(interval) + timeout -= interval + import inspect + + raise Exception( + f"Timed out waiting for Truth from predicate: {inspect.getsource(predicate).strip()}" + ) + + +def wait_for_all_tanks_status(target: str = "running", timeout: int = 20 * 60, interval: int = 5): + """Poll the warnet server for container status. Block until all tanks are running""" + + def check_status(): + tanks = network_status() + stats = {"total": 0} + # "Probably" means all tanks are stopped and deleted + if len(tanks) == 0: + return True + for tank in tanks: + status = tank["status"] + stats["total"] += 1 + stats[status] = stats.get(status, 0) + 1 + log.info(f"Waiting for all tanks to reach '{target}': {stats}") + return target in stats and stats[target] == stats["total"] + + _wait_for_predicate(check_status, timeout, interval) + + +def wait_for_gossip_sync(expected: int = 2): + """Wait for any of the ln nodes to have an `expected` number of edges.""" + log.info(f"Waiting for sync (expecting {expected})...") + current = 0 + while current < expected: + current = 0 + pods = get_pods_with_label(LIGHTNING_SELECTOR) + for v1_pod in pods: + node = v1_pod.metadata.name + chs = json.loads(run_command(f"warnet ln rpc {node} describegraph"))["edges"] + log.info(f"{node}: {len(chs)} channels") + current += len(chs) + sleep(1) + log.info("Synced") + + +def warnet(cmd: str = "--help"): + """Pass a `cmd` to Warnet.""" + log.info(f"Executing warnet command: {cmd}") + command = ["warnet"] + cmd.split() + proc = run(command, capture_output=True) + if proc.stderr: + raise Exception(proc.stderr.decode().strip()) + return proc.stdout.decode() + + +def _generate_nodes_file(activity: list[dict], output_file: Path = Path("nodes.json")): + nodes = [] + + for i in get_pods_with_label(LIGHTNING_SELECTOR): + name = i.metadata.name + node = { + "id": name, + "address": f"https://{name}:10009", + "macaroon": "/working/admin.macaroon", + "cert": "/working/tls.cert", + } + nodes.append(node) + + data = {"nodes": nodes, "activity": activity} + + with open(output_file, "w") as f: + json.dump(data, f, indent=2) + + +def manual_open_channels(): + """Manually open channels between ln nodes 1, 2, and 3""" + + def wait_for_two_txs(): + _wait_for_predicate( + lambda: json.loads(warnet("bitcoin rpc tank-0000 getmempoolinfo"))["size"] == 2 + ) + + # 0 -> 1 -> 2 + pk1 = warnet("ln pubkey tank-0001-ln") + pk2 = warnet("ln pubkey tank-0002-ln") + log.info(f"pk1: {pk1}") + log.info(f"pk2: {pk2}") + + host1 = "" + host2 = "" + + while not host1 or not host2: + if not host1: + host1 = warnet("ln host tank-0001-ln") + if not host2: + host2 = warnet("ln host tank-0002-ln") + sleep(1) + + print( + warnet( + f"ln rpc tank-0000-ln openchannel --node_key {pk1} --local_amt 100000 --connect {host1}" + ) + ) + print( + warnet( + f"ln rpc tank-0001-ln openchannel --node_key {pk2} --local_amt 100000 --connect {host2}" + ) + ) + + wait_for_two_txs() + + warnet("bitcoin rpc tank-0000 -generate 10") + + +def _rpc(pod, method: str, params: tuple[str, ...]) -> str: + namespace = get_default_namespace() + + sclient = get_static_client() + if params: + cmd = [method] + cmd.extend(params) + else: + cmd = [method] + resp = stream( + sclient.connect_get_namespaced_pod_exec, + pod, + namespace, + container="simln", + command=cmd, + stderr=True, + stdin=False, + stdout=True, + tty=False, + _preload_content=False, + ) + stdout = "" + stderr = "" + while resp.is_open(): + resp.update(timeout=1) + if resp.peek_stdout(): + stdout_chunk = resp.read_stdout() + stdout += stdout_chunk + if resp.peek_stderr(): + stderr_chunk = resp.read_stderr() + stderr += stderr_chunk + return stdout + stderr + + +@simln.command(context_settings={"ignore_unknown_options": True}) +@click.argument("pod", type=str) +@click.argument("method", type=str) +@click.argument("params", type=str, nargs=-1) # this will capture all remaining arguments +def rpc(pod: str, method: str, params: tuple[str, ...]): + """Run commands on a pod""" + print(_rpc(pod, method, params)) diff --git a/resources/scenarios/commander.py b/resources/scenarios/commander.py index 1f7d34a80..c581c3fad 100644 --- a/resources/scenarios/commander.py +++ b/resources/scenarios/commander.py @@ -1,15 +1,20 @@ import argparse +import base64 import configparser +import http.client import json import logging import os import pathlib import random import signal +import ssl import sys import tempfile +import time from typing import Dict +from kubernetes import client, config from test_framework.authproxy import AuthServiceProxy from test_framework.p2p import NetworkThread from test_framework.test_framework import ( @@ -20,13 +25,51 @@ from test_framework.test_node import TestNode from test_framework.util import PortSeed, get_rpc_proxy -WARNET_FILE = "/shared/warnet.json" +# hard-coded deterministic lnd credentials +ADMIN_MACAROON_HEX = "0201036c6e6402f801030a1062beabbf2a614b112128afa0c0b4fdd61201301a160a0761646472657373120472656164120577726974651a130a04696e666f120472656164120577726974651a170a08696e766f69636573120472656164120577726974651a210a086d616361726f6f6e120867656e6572617465120472656164120577726974651a160a076d657373616765120472656164120577726974651a170a086f6666636861696e120472656164120577726974651a160a076f6e636861696e120472656164120577726974651a140a057065657273120472656164120577726974651a180a067369676e6572120867656e657261746512047265616400000620b17be53e367290871681055d0de15587f6d1cd47d1248fe2662ae27f62cfbdc6" +# Don't worry about lnd's self-signed certificates +INSECURE_CONTEXT = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) +INSECURE_CONTEXT.check_hostname = False +INSECURE_CONTEXT.verify_mode = ssl.CERT_NONE + +# Figure out what namespace we are in +with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace") as f: + NAMESPACE = f.read().strip() + +# Use the in-cluster k8s client to determine what pods we have access to +config.load_incluster_config() +sclient = client.CoreV1Api() +pods = sclient.list_namespaced_pod(namespace=NAMESPACE) +cmaps = sclient.list_namespaced_config_map(namespace=NAMESPACE) + +WARNET = {"tanks": [], "lightning": [], "channels": []} +for pod in pods.items: + if "mission" not in pod.metadata.labels: + continue + + if pod.metadata.labels["mission"] == "tank": + WARNET["tanks"].append( + { + "tank": pod.metadata.name, + "chain": pod.metadata.labels["chain"], + "rpc_host": pod.status.pod_ip, + "rpc_port": int(pod.metadata.labels["RPCPort"]), + "rpc_user": "user", + "rpc_password": pod.metadata.labels["rpcpassword"], + "init_peers": pod.metadata.annotations["init_peers"], + } + ) + + if pod.metadata.labels["mission"] == "lightning": + WARNET["lightning"].append(pod.metadata.name) -try: - with open(WARNET_FILE) as file: - WARNET = json.load(file) -except Exception: - WARNET = [] +for cm in cmaps.items: + if not cm.metadata.labels or "channels" not in cm.metadata.labels: + continue + channel_jsons = json.loads(cm.data["channels"]) + for channel_json in channel_jsons: + channel_json["source"] = cm.data["source"] + WARNET["channels"].append(channel_json) # Ensure that all RPC calls are made with brand new http connections @@ -39,6 +82,91 @@ def auth_proxy_request(self, method, path, postdata): AuthServiceProxy._request = auth_proxy_request +class LND: + def __init__(self, pod_name): + self.name = pod_name + self.conn = http.client.HTTPSConnection( + host=pod_name, port=8080, timeout=5, context=INSECURE_CONTEXT + ) + + def get(self, uri): + while True: + try: + self.conn.request( + method="GET", + url=uri, + headers={"Grpc-Metadata-macaroon": ADMIN_MACAROON_HEX, "Connection": "close"}, + ) + return self.conn.getresponse().read().decode("utf8") + except Exception: + time.sleep(1) + + def post(self, uri, data): + body = json.dumps(data) + attempt = 0 + while True: + attempt += 1 + try: + self.conn.request( + method="POST", + url=uri, + body=body, + headers={ + "Content-Type": "application/json", + "Content-Length": str(len(body)), + "Grpc-Metadata-macaroon": ADMIN_MACAROON_HEX, + "Connection": "close", + }, + ) + # Stream output, otherwise we get a timeout error + res = self.conn.getresponse() + stream = "" + while True: + try: + data = res.read(1) + if len(data) == 0: + break + else: + stream += data.decode("utf8") + except Exception: + break + return stream + except Exception: + time.sleep(1) + + def newaddress(self): + res = self.get("/v1/newaddress") + return json.loads(res) + + def walletbalance(self): + res = self.get("/v1/balance/blockchain") + return int(json.loads(res)["confirmed_balance"]) + + def uri(self): + res = self.get("/v1/getinfo") + info = json.loads(res) + if "uris" not in info or len(info["uris"]) == 0: + return None + return info["uris"][0] + + def connect(self, target_uri): + pk, host = target_uri.split("@") + res = self.post("/v1/peers", data={"addr": {"pubkey": pk, "host": host}}) + return json.loads(res) + + def channel(self, pk, local_amt, push_amt, fee_rate): + res = self.post( + "/v1/channels/stream", + data={ + "local_funding_amount": local_amt, + "push_sat": push_amt, + "node_pubkey": pk, + "sat_per_vbyte": fee_rate, + }, + ) + return json.loads(res) + + class Commander(BitcoinTestFramework): # required by subclasses of BitcoinTestFramework def set_test_params(self): @@ -55,6 +183,17 @@ def ensure_miner(node): node.createwallet("miner", descriptors=True) return node.get_wallet_rpc("miner") + @staticmethod + def hex_to_b64(hex): + return base64.b64encode(bytes.fromhex(hex)).decode() + + @staticmethod + def b64_to_hex(b64, reverse=False): + if reverse: + return base64.b64decode(b64)[::-1].hex() + else: + return base64.b64decode(b64).hex() + def handle_sigterm(self, signum, frame): print("SIGTERM received, stopping...") self.shutdown() @@ -82,8 +221,10 @@ def setup(self): # Keep a separate index of tanks by pod name self.tanks: Dict[str, TestNode] = {} + self.lns: Dict[str, LND] = {} + self.channels = WARNET["channels"] - for i, tank in enumerate(WARNET): + for i, tank in enumerate(WARNET["tanks"]): self.log.info( f"Adding TestNode #{i} from pod {tank['tank']} with IP {tank['rpc_host']}" ) @@ -107,10 +248,14 @@ def setup(self): coveragedir=self.options.coveragedir, ) node.rpc_connected = True - node.init_peers = tank["init_peers"] + node.init_peers = int(tank["init_peers"]) + self.nodes.append(node) self.tanks[tank["tank"]] = node + for ln in WARNET["lightning"]: + self.lns[ln] = LND(ln) + self.num_nodes = len(self.nodes) # Set up temp directory and start logging diff --git a/resources/scenarios/ln_init.py b/resources/scenarios/ln_init.py index 82745a123..ba54146f2 100644 --- a/resources/scenarios/ln_init.py +++ b/resources/scenarios/ln_init.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 +import threading from time import sleep from commander import Commander @@ -14,171 +15,336 @@ def add_options(self, parser): parser.usage = "warnet run /path/to/ln_init.py" def run_test(self): - self.log.info("Lock out of IBD") - miner = self.ensure_miner(self.nodes[0]) - miner_addr = miner.getnewaddress() - self.generatetoaddress(self.nodes[0], 1, miner_addr, sync_fun=self.no_op) + ## + # L1 P2P + ## + self.log.info("Waiting for L1 p2p network connections...") - self.log.info("Get LN nodes and wallet addresses") - ln_nodes = [] - recv_addrs = [] - for tank in self.warnet.tanks: - if tank.lnnode is not None: - recv_addrs.append(tank.lnnode.getnewaddress()) - ln_nodes.append(tank.index) + def tank_connected(self, tank): + while True: + peers = tank.getpeerinfo() + count = sum( + 1 + for peer in peers + if peer.get("connection_type") == "manual" or peer.get("addnode") is True + ) + self.log.info(f"Tank {tank.tank} connected to {count}/{tank.init_peers} peers") + if count >= tank.init_peers: + break + else: + sleep(1) - self.log.info("Fund LN wallets") + conn_threads = [ + threading.Thread(target=tank_connected, args=(self, tank)) for tank in self.nodes + ] + for thread in conn_threads: + thread.start() + + all(thread.join() is None for thread in conn_threads) + self.log.info("Network connected") + + ## + # MINER + ## + self.log.info("Setting up miner...") miner = self.ensure_miner(self.nodes[0]) miner_addr = miner.getnewaddress() - # 298 block base - self.generatetoaddress(self.nodes[0], 297, miner_addr, sync_fun=self.no_op) - # divvy up the goods - split = (miner.getbalance() - 1) // len(recv_addrs) + + def gen(n): + return self.generatetoaddress(self.nodes[0], n, miner_addr, sync_fun=self.no_op) + + self.log.info("Locking out of IBD...") + gen(1) + + ## + # WALLET ADDRESSES + ## + self.log.info("Getting LN wallet addresses...") + ln_addrs = [] + + def get_ln_addr(self, name, ln): + while True: + res = ln.newaddress() + if "address" in res: + addr = res["address"] + ln_addrs.append(addr) + self.log.info(f"Got wallet address {addr} from {name}") + break + else: + self.log.info( + f"Couldn't get wallet address from {name}:\n {res}\n wait and retry..." + ) + sleep(1) + + addr_threads = [ + threading.Thread(target=get_ln_addr, args=(self, name, ln)) + for name, ln in self.lns.items() + ] + for thread in addr_threads: + thread.start() + + all(thread.join() is None for thread in addr_threads) + self.log.info(f"Got {len(ln_addrs)} addresses from {len(self.lns)} LN nodes") + + ## + # FUNDS + ## + self.log.info("Funding LN wallets...") + # 298 block base for miner wallet + gen(297) + # divvy up the goods, except fee. + # 10 UTXOs per node means 10 channel opens per node per block + split = (miner.getbalance() - 1) // len(ln_addrs) // 10 sends = {} - for addr in recv_addrs: - sends[addr] = split - miner.sendmany("", sends) + for _ in range(10): + for addr in ln_addrs: + sends[addr] = split + miner.sendmany("", sends) # confirm funds in block 299 - self.generatetoaddress(self.nodes[0], 1, miner_addr, sync_fun=self.no_op) + gen(1) self.log.info( - f"Waiting for funds to be spendable: {split} BTC each for {len(recv_addrs)} LN nodes" + f"Waiting for funds to be spendable: 10x{split} BTC UTXOs each for {len(ln_addrs)} LN nodes" ) - def funded_lnnodes(): - for tank in self.warnet.tanks: - if tank.lnnode is None: - continue - if int(tank.lnnode.get_wallet_balance()) < (split * 100000000): - return False - return True + def confirm_ln_balance(self, name, ln): + bal = 0 + while True: + bal = ln.walletbalance() + if bal >= (split * 100000000): + self.log.info(f"LN node {name} confirmed funds") + break + sleep(1) - self.wait_until(funded_lnnodes, timeout=5 * 60) + fund_threads = [ + threading.Thread(target=confirm_ln_balance, args=(self, name, ln)) + for name, ln in self.lns.items() + ] + for thread in fund_threads: + thread.start() - ln_nodes_uri = ln_nodes.copy() - while len(ln_nodes_uri) > 0: - self.log.info( - f"Waiting for all LN nodes to have URI, LN nodes remaining: {ln_nodes_uri}" - ) - for index in ln_nodes_uri: - lnnode = self.warnet.tanks[index].lnnode - if lnnode.getURI(): - ln_nodes_uri.remove(index) - sleep(5) - - self.log.info("Adding p2p connections to LN nodes") - for edge in self.warnet.graph.edges(data=True): - (src, dst, data) = edge - # Copy the L1 p2p topology (where applicable) to L2 - # so we get a more robust p2p graph for lightning - if ( - "channel_open" not in data - and self.warnet.tanks[src].lnnode - and self.warnet.tanks[dst].lnnode - ): - self.warnet.tanks[src].lnnode.connect_to_tank(dst) - - # Start confirming channel opens in block 300 - self.log.info("Opening channels, one per block") - chan_opens = [] - edges = self.warnet.graph.edges(data=True, keys=True) - edges = sorted(edges, key=lambda edge: edge[2]) - for edge in edges: - (src, dst, key, data) = edge - if "channel_open" in data: - src_node = self.warnet.get_ln_node_from_tank(src) - assert src_node is not None - assert self.warnet.get_ln_node_from_tank(dst) is not None - self.log.info(f"opening channel {src}->{dst}") - chan_pt = src_node.open_channel_to_tank(dst, data["channel_open"]) - # We can guarantee deterministic short channel IDs as long as - # the change output is greater than the channel funding output, - # which will then be output 0 - assert chan_pt[64:] == ":0" - chan_opens.append((edge, chan_pt)) - self.log.info(f" pending channel point: {chan_pt}") - self.wait_until( - lambda chan_pt=chan_pt: chan_pt[:64] in self.nodes[0].getrawmempool() - ) - self.generatetoaddress(self.nodes[0], 1, miner_addr) - assert chan_pt[:64] not in self.nodes[0].getrawmempool() - height = self.nodes[0].getblockcount() - self.log.info(f" confirmed in block {height}") + all(thread.join() is None for thread in fund_threads) + self.log.info("All LN nodes are funded") + + ## + # URIs + ## + self.log.info("Getting URIs for all LN nodes...") + ln_uris = {} + + def get_ln_uri(self, name, ln): + uri = None + while True: + uri = ln.uri() + if uri: + ln_uris[name] = uri + self.log.info(f"LN node {name} has URI {uri}") + break + sleep(1) + + uri_threads = [ + threading.Thread(target=get_ln_uri, args=(self, name, ln)) + for name, ln in self.lns.items() + ] + for thread in uri_threads: + thread.start() + + all(thread.join() is None for thread in uri_threads) + self.log.info("Got URIs from all LN nodes") + + ## + # P2P CONNECTIONS + ## + self.log.info("Adding p2p connections to LN nodes...") + # (source: LND, target_uri: str) tuples of LND instances + connections = [] + # Cycle graph through all LN nodes + nodes = list(self.lns.values()) + prev_node = nodes[-1] + for node in nodes: + connections.append((node, prev_node)) + prev_node = node + # Explicit connections between every pair of channel partners + for ch in self.channels: + src = self.lns[ch["source"]] + tgt = self.lns[ch["target"]] + # Avoid duplicates and reciprocals + if (src, tgt) not in connections and (tgt, src) not in connections: + connections.append((src, tgt)) + + def connect_ln(self, pair): + while True: + res = pair[0].connect(ln_uris[pair[1].name]) + if res == {}: + self.log.info(f"Connected LN nodes {pair[0].name} -> {pair[1].name}") + break + if "message" in res: + if "already connected" in res["message"]: + self.log.info( + f"Already connected LN nodes {pair[0].name} -> {pair[1].name}" + ) + break + if "process of starting" in res["message"]: + self.log.info( + f"{pair[0].name} not ready for connections yet, wait and retry..." + ) + sleep(1) + else: + self.log.info( + f"Unexpected response attempting to connect {pair[0].name} -> {pair[1].name}:\n {res}\n ABORTING" + ) + break + + p2p_threads = [ + threading.Thread(target=connect_ln, args=(self, pair)) for pair in connections + ] + for thread in p2p_threads: + thread.start() + + all(thread.join() is None for thread in p2p_threads) + self.log.info("Established all LN p2p connections") + + ## + # CHANNELS + ## + self.log.info("Opening lightning channels...") + # Sort the channels by assigned block and index + # so their channel ids are deterministic + ch_by_block = {} + for ch in self.channels: + # TODO: if "id" not in ch ... + block = ch["id"]["block"] + if block not in ch_by_block: + ch_by_block[block] = [ch] + else: + ch_by_block[block].append(ch) + blocks = list(ch_by_block.keys()) + blocks = sorted(blocks) + + for target_block in blocks: + # First make sure the target block is the next block + current_height = self.nodes[0].getblockcount() + need = target_block - current_height + if need < 1: + raise Exception("Blockchain too long for deterministic channel ID") + if need > 1: + gen(need - 1) + + def open_channel(self, ch, fee_rate): + src = self.lns[ch["source"]] + tgt_uri = ln_uris[ch["target"]] + tgt_pk, _ = tgt_uri.split("@") self.log.info( - f" channel_id should be: {int.from_bytes(height.to_bytes(3, 'big') + (1).to_bytes(3, 'big') + (0).to_bytes(2, 'big'), 'big')}" + f"Sending channel open from {ch['source']} -> {ch['target']} with fee_rate={fee_rate}" ) - - # Ensure all channel opens are sufficiently confirmed - self.generatetoaddress(self.nodes[0], 10, miner_addr, sync_fun=self.no_op) - ln_nodes_gossip = ln_nodes.copy() - while len(ln_nodes_gossip) > 0: - self.log.info(f"Waiting for graph gossip sync, LN nodes remaining: {ln_nodes_gossip}") - for index in ln_nodes_gossip: - lnnode = self.warnet.tanks[index].lnnode - count_channels = len(lnnode.get_graph_channels()) - count_graph_nodes = len(lnnode.get_graph_nodes()) - if count_channels == len(chan_opens) and count_graph_nodes == len(ln_nodes): - ln_nodes_gossip.remove(index) + res = src.channel( + pk=self.hex_to_b64(tgt_pk), + local_amt=ch["local_amt"], + push_amt=ch["push_amt"], + fee_rate=fee_rate, + ) + if "result" not in res: + self.log.info( + "Unexpected channel open response:\n " + + f"From {ch['source']} -> {ch['target']} fee_rate={fee_rate}\n " + + f"{res}" + ) else: + txid = self.b64_to_hex(res["result"]["chan_pending"]["txid"], reverse=True) + ch["txid"] = txid self.log.info( - f" node {index} not synced (channels: {count_channels}/{len(chan_opens)}, nodes: {count_graph_nodes}/{len(ln_nodes)})" + f"Channel open {ch['source']} -> {ch['target']}\n " + + f"outpoint={txid}:{res['result']['chan_pending']['output_index']}\n " + + f"expected channel id: {ch['id']}" ) - sleep(5) - - self.log.info("Updating channel policies") - for edge, chan_pt in chan_opens: - (src, dst, key, data) = edge - if "target_policy" in data: - target_node = self.warnet.get_ln_node_from_tank(dst) - target_node.update_channel_policy(chan_pt, data["target_policy"]) - if "source_policy" in data: - source_node = self.warnet.get_ln_node_from_tank(src) - source_node.update_channel_policy(chan_pt, data["source_policy"]) - - while True: - self.log.info("Waiting for all channel policies to match") - score = 0 - for tank_index, me in enumerate(ln_nodes): - you = (tank_index + 1) % len(ln_nodes) - my_channels = self.warnet.tanks[me].lnnode.get_graph_channels() - your_channels = self.warnet.tanks[you].lnnode.get_graph_channels() - match = True - for _chan_index, my_chan in enumerate(my_channels): - your_chan = [ - chan - for chan in your_channels - if chan.short_chan_id == my_chan.short_chan_id - ][0] - if not your_chan: - print(f"Channel policy missing for channel: {my_chan.short_chan_id}") - match = False - break - try: - if not my_chan.channel_match(your_chan): - print( - f"Channel policy doesn't match between tanks {me} & {you}: {my_chan.short_chan_id}" - ) - match = False - break - except Exception as e: - print(f"Error comparing channel policies: {e}") - print( - f"Channel policy doesn't match between tanks {me} & {you}: {my_chan.short_chan_id}" - ) - match = False - break - if match: - print(f"All channel policies match between tanks {me} & {you}") - score += 1 - print(f"Score: {score} / {len(ln_nodes)}") - if score == len(ln_nodes): - break - sleep(5) + channels = sorted(ch_by_block[target_block], key=lambda ch: ch["id"]["index"]) + index = 0 + fee_rate = 5006 # s/vB, decreases by 5 per tx for up to 1000 txs per block + ch_threads = [] + for ch in channels: + index += 1 # noqa + fee_rate -= 5 + assert index == ch["id"]["index"], "Channel ID indexes are not consecutive" + assert fee_rate >= 1, "Too many TXs in block, out of fee range" + t = threading.Thread(target=open_channel, args=(self, ch, fee_rate)) + t.start() + ch_threads.append(t) - self.log.info( - f"Warnet LN ready with {len(recv_addrs)} nodes and {len(chan_opens)} channels." - ) + all(thread.join() is None for thread in ch_threads) + self.log.info(f"Waiting for {len(channels)} channel opens in mempool...") + self.wait_until( + lambda channels=channels: self.nodes[0].getmempoolinfo()["size"] >= len(channels), + timeout=500, + ) + block_hash = gen(1)[0] + self.log.info(f"Confirmed {len(channels)} channel opens in block {target_block}") + self.log.info("Checking deterministic channel IDs in block...") + block = self.nodes[0].getblock(block_hash) + block_txs = block["tx"] + block_height = block["height"] + for ch in channels: + assert ch["id"]["block"] == block_height + assert block_txs[ch["id"]["index"]] == ch["txid"] + self.log.info("👍") + + gen(5) + self.log.info(f"Confirmed {len(self.channels)} total channel opens") + + # self.log.info("Updating channel policies") + # for edge, chan_pt in chan_opens: + # (src, dst, key, data) = edge + # if "target_policy" in data: + # target_node = self.warnet.get_ln_node_from_tank(dst) + # target_node.update_channel_policy(chan_pt, data["target_policy"]) + # if "source_policy" in data: + # source_node = self.warnet.get_ln_node_from_tank(src) + # source_node.update_channel_policy(chan_pt, data["source_policy"]) + + # while True: + # self.log.info("Waiting for all channel policies to match") + # score = 0 + # for tank_index, me in enumerate(ln_nodes): + # you = (tank_index + 1) % len(ln_nodes) + # my_channels = self.warnet.tanks[me].lnnode.get_graph_channels() + # your_channels = self.warnet.tanks[you].lnnode.get_graph_channels() + # match = True + # for _chan_index, my_chan in enumerate(my_channels): + # your_chan = [ + # chan + # for chan in your_channels + # if chan.short_chan_id == my_chan.short_chan_id + # ][0] + # if not your_chan: + # print(f"Channel policy missing for channel: {my_chan.short_chan_id}") + # match = False + # break + + # try: + # if not my_chan.channel_match(your_chan): + # print( + # f"Channel policy doesn't match between tanks {me} & {you}: {my_chan.short_chan_id}" + # ) + # match = False + # break + # except Exception as e: + # print(f"Error comparing channel policies: {e}") + # print( + # f"Channel policy doesn't match between tanks {me} & {you}: {my_chan.short_chan_id}" + # ) + # match = False + # break + # if match: + # print(f"All channel policies match between tanks {me} & {you}") + # score += 1 + # print(f"Score: {score} / {len(ln_nodes)}") + # if score == len(ln_nodes): + # break + # sleep(5) + + # self.log.info( + # f"Warnet LN ready with {len(recv_addrs)} nodes and {len(chan_opens)} channels." + # ) def main(): diff --git a/resources/scenarios/test_scenarios/ln_basic.py b/resources/scenarios/test_scenarios/ln_basic.py new file mode 100644 index 000000000..773ffd357 --- /dev/null +++ b/resources/scenarios/test_scenarios/ln_basic.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 + +import json + +from commander import Commander + + +class LNBasic(Commander): + def set_test_params(self): + self.num_nodes = None + + def add_options(self, parser): + parser.description = "Open a channel between two LN nodes using REST + macaroon" + parser.usage = "warnet run /path/to/ln_init.py" + + def run_test(self): + info = json.loads(self.lns["tank-0003-ln"].get("/v1/getinfo")) + uri = info["uris"][0] + pk3, host = uri.split("@") + + print( + self.lns["tank-0002-ln"].post("/v1/peers", data={"addr": {"pubkey": pk3, "host": host}}) + ) + + print( + self.lns["tank-0002-ln"].post( + "/v1/channels/stream", + data={"local_funding_amount": 100000, "node_pubkey": self.hex_to_b64(pk3)}, + ) + ) + + # Mine it ourself + self.wait_until(lambda: self.tanks["tank-0002"].getmempoolinfo()["size"] == 1) + print(self.tanks["tank-0002"].generate(5, invalid_call=False)) + + +def main(): + LNBasic().main() + + +if __name__ == "__main__": + main() diff --git a/resources/scripts/ssl/cert-gen.sh b/resources/scripts/ssl/cert-gen.sh new file mode 100755 index 000000000..c1370f884 --- /dev/null +++ b/resources/scripts/ssl/cert-gen.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +# Generate the private key using the P-256 curve +openssl ecparam -name prime256v1 -genkey -noout -out tls.key + +# Generate the self-signed certificate using the configuration file +# Expires in ten years, 2034 +openssl req -x509 -new -nodes -key tls.key -days 3650 -out tls.cert -config openssl-config.cnf diff --git a/resources/scripts/ssl/openssl-config.cnf b/resources/scripts/ssl/openssl-config.cnf new file mode 100644 index 000000000..db4e4a162 --- /dev/null +++ b/resources/scripts/ssl/openssl-config.cnf @@ -0,0 +1,28 @@ +[ req ] +distinguished_name = req_distinguished_name +req_extensions = req_ext +x509_extensions = v3_ca +prompt = no + +[ req_distinguished_name ] +O = lnd autogenerated cert +CN = warnet + +[ req_ext ] +keyUsage = critical, digitalSignature, keyEncipherment, keyCertSign +extendedKeyUsage = serverAuth +basicConstraints = critical, CA:true +subjectKeyIdentifier = hash + +[ v3_ca ] +keyUsage = critical, digitalSignature, keyEncipherment, keyCertSign +extendedKeyUsage = serverAuth +basicConstraints = critical, CA:true +subjectKeyIdentifier = hash +subjectAltName = @alt_names + +[ alt_names ] +DNS.1 = localhost +DNS.2 = * +IP.1 = 127.0.0.1 +IP.2 = ::1 diff --git a/resources/scripts/ssl/tls.cert b/resources/scripts/ssl/tls.cert new file mode 100644 index 000000000..6cf6e306a --- /dev/null +++ b/resources/scripts/ssl/tls.cert @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIIB8TCCAZagAwIBAgIUJDsR6mmY+TaO9pCfjtotlbOkzJMwCgYIKoZIzj0EAwIw +MjEfMB0GA1UECgwWbG5kIGF1dG9nZW5lcmF0ZWQgY2VydDEPMA0GA1UEAwwGd2Fy +bmV0MB4XDTI0MTExMTE2NTM1MFoXDTM0MTEwOTE2NTM1MFowMjEfMB0GA1UECgwW +bG5kIGF1dG9nZW5lcmF0ZWQgY2VydDEPMA0GA1UEAwwGd2FybmV0MFkwEwYHKoZI +zj0CAQYIKoZIzj0DAQcDQgAEBVltIvaTlAQI/3FFatTqVflZuZdRJ0SmRMSJrFLP +tp0fxE7hmteSt6gjQriy90fP8j9OJXBNAjt915kLY4zVvqOBiTCBhjAOBgNVHQ8B +Af8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUwAwEB/zAd +BgNVHQ4EFgQU5d8QMrwhLgTkDjWA+eXZGz+dybUwLwYDVR0RBCgwJoIJbG9jYWxo +b3N0ggEqhwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMAoGCCqGSM49BAMCA0kAMEYC +IQDPofN0fEl5gTwCYhk3nZbjMqJhZ8BsSJ6K8XRhxr7zbwIhAPsgQCFOqUWg632O +NEO53OQ6CIqnpxSskjsFNH4ZBQOE +-----END CERTIFICATE----- diff --git a/resources/scripts/ssl/tls.key b/resources/scripts/ssl/tls.key new file mode 100644 index 000000000..ca0118123 --- /dev/null +++ b/resources/scripts/ssl/tls.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIIcFtWTLQv5JaRRxdkPKkO98OrvgeztbZ7h8Ev/4UbE4oAoGCCqGSM49 +AwEHoUQDQgAEBVltIvaTlAQI/3FFatTqVflZuZdRJ0SmRMSJrFLPtp0fxE7hmteS +t6gjQriy90fP8j9OJXBNAjt915kLY4zVvg== +-----END EC PRIVATE KEY----- diff --git a/src/warnet/constants.py b/src/warnet/constants.py index 46f33a3fe..ee278cf49 100644 --- a/src/warnet/constants.py +++ b/src/warnet/constants.py @@ -20,6 +20,7 @@ TANK_MISSION = "tank" COMMANDER_MISSION = "commander" +PLUGIN_MISSION = "plugin" BITCOINCORE_CONTAINER = "bitcoincore" COMMANDER_CONTAINER = "commander" @@ -37,8 +38,15 @@ NAMESPACES_FILE = "namespaces.yaml" DEFAULTS_NAMESPACE_FILE = "namespace-defaults.yaml" +# Plugin architecture +PLUGINS_LABEL = "plugins" +PLUGIN_YAML = "plugin.yaml" +PLUGINS_DIR = RESOURCES_DIR.joinpath(PLUGINS_LABEL) +WARNET_USER_DIR_ENV_VAR = "WARNET_USER_DIR" + # Helm charts BITCOIN_CHART_LOCATION = str(CHARTS_DIR.joinpath("bitcoincore")) +LND_CHART_LOCATION = str(CHARTS_DIR.joinpath("lnd")) FORK_OBSERVER_CHART = str(CHARTS_DIR.joinpath("fork-observer")) COMMANDER_CHART = str(CHARTS_DIR.joinpath("commander")) NAMESPACES_CHART_LOCATION = CHARTS_DIR.joinpath("namespaces") diff --git a/src/warnet/control.py b/src/warnet/control.py index 83d358a4e..1105aabb3 100644 --- a/src/warnet/control.py +++ b/src/warnet/control.py @@ -23,6 +23,7 @@ COMMANDER_CHART, COMMANDER_CONTAINER, COMMANDER_MISSION, + PLUGIN_MISSION, TANK_MISSION, ) from .k8s import ( @@ -252,24 +253,7 @@ def run( if additional_args and ("--help" in additional_args or "-h" in additional_args): return subprocess.run([sys.executable, scenario_path, "--help"]) - # Collect tank data for warnet.json name = f"commander-{scenario_name.replace('_', '')}-{int(time.time())}" - tankpods = get_mission("tank") - tanks = [ - { - "tank": tank.metadata.name, - "chain": tank.metadata.labels["chain"], - "rpc_host": tank.status.pod_ip, - "rpc_port": int(tank.metadata.labels["RPCPort"]), - "rpc_user": "user", - "rpc_password": tank.metadata.labels["rpcpassword"], - "init_peers": [], - } - for tank in tankpods - ] - - # Encode tank data for warnet.json - warnet_data = json.dumps(tanks).encode() # Create in-memory buffer to store python archive instead of writing to disk archive_buffer = io.BytesIO() @@ -343,8 +327,6 @@ def filter(path): # upload scenario files and network data to the init container wait_for_init(name, namespace=namespace) if write_file_to_container( - name, "init", "/shared/warnet.json", warnet_data, namespace=namespace - ) and write_file_to_container( name, "init", "/shared/archive.pyz", archive_data, namespace=namespace ): print(f"Successfully uploaded scenario data to commander: {scenario_name}") @@ -378,8 +360,10 @@ def format_pods(pods: list[V1Pod]) -> list[str]: pod_list = [] formatted_commanders = format_pods(get_mission(COMMANDER_MISSION)) formatted_tanks = format_pods(get_mission(TANK_MISSION)) + formatted_plugins = format_pods(get_mission(PLUGIN_MISSION)) pod_list.extend(formatted_commanders) pod_list.extend(formatted_tanks) + pod_list.extend(formatted_plugins) except Exception as e: print(f"Could not fetch any pods in namespace ({namespace}): {e}") diff --git a/src/warnet/deploy.py b/src/warnet/deploy.py index d9b5a45b5..59d8d057f 100644 --- a/src/warnet/deploy.py +++ b/src/warnet/deploy.py @@ -260,6 +260,7 @@ def deploy_network(directory: Path, debug: bool = False, namespace: Optional[str if not stream_command(cmd): click.echo(f"Failed to run Helm command: {cmd}") return + except Exception as e: click.echo(f"Error: {e}") return diff --git a/src/warnet/graph.py b/src/warnet/graph.py index 390686486..e10caff36 100644 --- a/src/warnet/graph.py +++ b/src/warnet/graph.py @@ -1,3 +1,4 @@ +import json import os import random import sys @@ -226,3 +227,90 @@ def create(): fg="yellow", ) return False + + +@click.command() +@click.argument("graph_file_path", type=click.Path(exists=True, file_okay=True, dir_okay=False)) +@click.argument("output_path", type=click.Path(exists=False, file_okay=False, dir_okay=True)) +def import_network(graph_file_path: str, output_path: str): + """Create a network from an imported lightning network graph JSON""" + print(_import_network(graph_file_path, output_path)) + + +def _import_network(graph_file_path, output_path): + output_path = Path(output_path) + graph_file_path = Path(graph_file_path).resolve() + with open(graph_file_path) as graph_file: + graph = json.loads(graph_file.read()) + + tanks = {} + pk_to_tank = {} + tank_to_pk = {} + index = 0 + for node in graph["nodes"]: + tank = f"tank-{index:04d}" + pk_to_tank[node["pub_key"]] = tank + tank_to_pk[tank] = node["pub_key"] + tanks[tank] = {"name": tank, "ln": {"lnd": True}, "lnd": {"channels": []}} + index += 1 + print(f"Imported {index} nodes") + + sorted_edges = sorted(graph["edges"], key=lambda x: int(x["channel_id"])) + + supported_policies = [ + "base_fee_msat", + "fee_rate_ppm", + "time_lock_delta", + "min_htlc_msat", + "max_htlc_msat", + ] + + for_fuck_sake_lnd_what_is_your_fucking_problem = {"min_htlc": "min_htlc_msat"} + + def import_policy(json_policy): + for ugh in for_fuck_sake_lnd_what_is_your_fucking_problem: + if ugh in json_policy: + new_key = for_fuck_sake_lnd_what_is_your_fucking_problem[ugh] + json_policy[new_key] = json_policy[ugh] + return {key: int(json_policy[key]) for key in supported_policies if key in json_policy} + + # By default we start including channel open txs in block 300 + block = 300 + # Coinbase occupies the 0 position! + index = 1 + count = 0 + for edge in sorted_edges: + source = pk_to_tank[edge["node1_pub"]] + channel = { + "id": {"block": block, "index": index}, + "target": pk_to_tank[edge["node2_pub"]] + "-ln", + "local_amt": int(edge["capacity"]), + "push_amt": int(edge["capacity"]) // 2, + "source_policy": import_policy(edge["node1_policy"]), + "target_policy": import_policy(edge["node2_policy"]), + } + tanks[source]["lnd"]["channels"].append(channel) + index += 1 + if index > 1000: + index = 1 + block += 1 + count += 1 + + print(f"Imported {count} channels") + + network = {"nodes": []} + prev_node_name = list(tanks.keys())[-1] + for name, obj in tanks.items(): + obj["name"] = name + obj["addnode"] = [prev_node_name] + prev_node_name = name + network["nodes"].append(obj) + + output_path.mkdir(parents=True, exist_ok=True) + # This file must exist and must contain at least one line of valid yaml + with open(output_path / "node-defaults.yaml", "w") as f: + f.write(f"imported_from: {graph_file_path}\n") + # Here's the good stuff + with open(output_path / "network.yaml", "w") as f: + f.write(yaml.dump(network, sort_keys=False)) + return f"Network created in {output_path.resolve()}" diff --git a/src/warnet/k8s.py b/src/warnet/k8s.py index 9354eb903..8a1a65bce 100644 --- a/src/warnet/k8s.py +++ b/src/warnet/k8s.py @@ -1,6 +1,7 @@ import json import os import sys +import tarfile import tempfile from pathlib import Path from time import sleep @@ -60,6 +61,22 @@ def get_pod(name: str, namespace: Optional[str] = None) -> V1Pod: return sclient.read_namespaced_pod(name=name, namespace=namespace) +def get_pods_with_label(label_selector: str, namespace: Optional[str] = None) -> list[V1Pod]: + """Get a list of pods by label. + Label example: "mission=lightning" + """ + namespace = get_default_namespace_or(namespace) + v1 = get_static_client() + + try: + pods = v1.list_namespaced_pod(namespace=namespace, label_selector=label_selector) + v1_pods = [pod for pod in pods.items] + return v1_pods + except client.exceptions.ApiException as e: + print(f"Error fetching pods: {e}") + return [] + + def get_mission(mission: str) -> list[V1Pod]: pods = get_pods() crew: list[V1Pod] = [] @@ -83,11 +100,19 @@ def get_pod_exit_status(pod_name, namespace: Optional[str] = None): return None -def get_edges(namespace: Optional[str] = None) -> any: +def get_channels(namespace: Optional[str] = None) -> any: namespace = get_default_namespace_or(namespace) sclient = get_static_client() - configmap = sclient.read_namespaced_config_map(name="edges", namespace=namespace) - return json.loads(configmap.data["data"]) + config_maps = sclient.list_namespaced_config_map( + namespace=namespace, label_selector="channels=true" + ) + channels = [] + for cm in config_maps.items: + channel_jsons = json.loads(cm.data["channels"]) + for channel_json in channel_jsons: + channel_json["source"] = cm.data["source"] + channels.append(channel_json) + return channels def create_kubernetes_object( @@ -537,3 +562,50 @@ def write_kubeconfig(kube_config: dict, kubeconfig_path: str) -> None: except Exception as e: os.remove(temp_file.name) raise K8sError(f"Error writing kubeconfig: {kubeconfig_path}") from e + + +def download( + pod_name: str, + source_path: Path, + destination_path: Path = Path("."), + namespace: Optional[str] = None, +) -> Path: + """Download the item from the `source_path` to the `destination_path`""" + + namespace = get_default_namespace_or(namespace) + + v1 = get_static_client() + + target_folder = destination_path / source_path.stem + os.makedirs(target_folder, exist_ok=True) + + command = ["tar", "cf", "-", "-C", str(source_path.parent), str(source_path.name)] + + resp = stream( + v1.connect_get_namespaced_pod_exec, + name=pod_name, + namespace=namespace, + command=command, + stderr=True, + stdin=False, + stdout=True, + tty=False, + _preload_content=False, + ) + + tar_file = target_folder.with_suffix(".tar") + with open(tar_file, "wb") as f: + while resp.is_open(): + resp.update(timeout=1) + if resp.peek_stdout(): + f.write(resp.read_stdout().encode("utf-8")) + if resp.peek_stderr(): + print(resp.read_stderr()) + resp.close() + + with tarfile.open(tar_file, "r") as tar: + tar.extractall(path=destination_path) + + os.remove(tar_file) + + return destination_path diff --git a/src/warnet/ln.py b/src/warnet/ln.py index ade55759e..6296cf6ed 100644 --- a/src/warnet/ln.py +++ b/src/warnet/ln.py @@ -1,6 +1,14 @@ +import json +from typing import Optional + import click -from .rpc import rpc_call +from .k8s import ( + get_channels, + get_default_namespace_or, + get_pod, +) +from .process import run_command @click.group(name="ln") @@ -9,31 +17,83 @@ def ln(): @ln.command(context_settings={"ignore_unknown_options": True}) -@click.argument("node", type=int) -@click.argument("command", type=str, required=True, nargs=-1) -@click.option("--network", default="warnet", show_default=True, type=str) -def rpc(node: int, command: tuple, network: str): +@click.argument("pod", type=str) +@click.argument("method", type=str) +@click.argument("params", type=str, nargs=-1) # this will capture all remaining arguments +@click.option("--namespace", default=None, show_default=True) +def rpc(pod: str, method: str, params: str, namespace: Optional[str]): """ - Call lightning cli rpc on in [network] + Call lightning cli rpc on """ - print( - rpc_call( - "tank_lncli", - {"network": network, "node": node, "command": command}, - ) - ) + print(_rpc(pod, method, params, namespace)) -@ln.command(context_settings={"ignore_unknown_options": True}) -@click.argument("node", type=int) -@click.option("--network", default="warnet", show_default=True, type=str) -def pubkey(node: int, network: str): +def _rpc(pod_name: str, method: str, params: str = "", namespace: Optional[str] = None): + pod = get_pod(pod_name) + namespace = get_default_namespace_or(namespace) + chain = pod.metadata.labels["chain"] + cmd = f"kubectl -n {namespace} exec {pod_name} -- lncli --network {chain} {method} {' '.join(map(str, params))}" + return run_command(cmd) + + +@ln.command() +@click.argument("pod", type=str) +def pubkey( + pod: str, +): + """ + Get lightning node pub key from + """ + print(_pubkey(pod)) + + +def _pubkey(pod: str): + info = _rpc(pod, "getinfo") + return json.loads(info)["identity_pubkey"] + + +@ln.command() +@click.argument("pod", type=str) +def host( + pod: str, +): + """ + Get lightning node host from + """ + print(_host(pod)) + + +def _host(pod): + info = _rpc(pod, "getinfo") + uris = json.loads(info)["uris"] + if uris and len(uris) >= 0: + return uris[0].split("@")[1] + else: + return "" + + +@ln.command() +def open_all_channels(): """ - Get lightning node pub key on in [network] + Open all channels with source policies defined in the network.yaml + IGNORES HARD CODED CHANNEL IDs + Should only be run once or you'll end up with duplicate channels """ - print( - rpc_call( - "tank_ln_pub_key", - {"network": network, "node": node}, + channels = get_channels() + commands = [] + for ch in channels: + pk = _pubkey(ch["target"]) + host = _host(ch["target"]) + local_amt = ch["local_amt"] + push_amt = ch.get("push_amt", 0) + assert pk, f"{ch['target']} has no public key" + assert host, f"{ch['target']} has no host" + assert local_amt, "Channel has no local_amount" + commands.append( + ( + ch["source"], + f"openchannel --node_key {pk} --connect {host} --local_amt {local_amt} --push_amt {push_amt}", + ) ) - ) + for command in commands: + _rpc(*command) diff --git a/src/warnet/main.py b/src/warnet/main.py index 76893575c..64887c6ee 100644 --- a/src/warnet/main.py +++ b/src/warnet/main.py @@ -5,8 +5,10 @@ from .control import down, logs, run, snapshot, stop from .dashboard import dashboard from .deploy import deploy -from .graph import create, graph +from .graph import create, graph, import_network from .image import image +from .ln import ln +from .plugins import load_plugins, plugins from .project import init, new, setup from .status import status from .users import auth @@ -24,9 +26,11 @@ def cli(): cli.add_command(down) cli.add_command(dashboard) cli.add_command(graph) +cli.add_command(import_network) cli.add_command(image) cli.add_command(init) cli.add_command(logs) +cli.add_command(ln) cli.add_command(new) cli.add_command(run) cli.add_command(setup) @@ -34,6 +38,12 @@ def cli(): cli.add_command(status) cli.add_command(stop) cli.add_command(create) +cli.add_command(plugins) + + +@load_plugins +def load_early(): + pass if __name__ == "__main__": diff --git a/src/warnet/network.py b/src/warnet/network.py index a894cafc9..e6658ae8c 100644 --- a/src/warnet/network.py +++ b/src/warnet/network.py @@ -7,6 +7,7 @@ from .bitcoin import _rpc from .constants import ( NETWORK_DIR, + PLUGINS_DIR, SCENARIOS_DIR, ) from .k8s import get_mission @@ -48,6 +49,16 @@ def copy_scenario_defaults(directory: Path): ) +def copy_plugins_defaults(directory: Path): + """Create the project structure for a warnet project's scenarios""" + copy_defaults( + directory, + PLUGINS_DIR.name, + PLUGINS_DIR, + ["__pycache__", "__init__"], + ) + + def is_connection_manual(peer): # newer nodes specify a "connection_type" return bool(peer.get("connection_type") == "manual" or peer.get("addnode") is True) diff --git a/src/warnet/plugins.py b/src/warnet/plugins.py new file mode 100644 index 000000000..e91be5f16 --- /dev/null +++ b/src/warnet/plugins.py @@ -0,0 +1,198 @@ +import copy +import importlib.util +import inspect +import os +import sys +import tempfile +from pathlib import Path +from types import ModuleType +from typing import Any, Callable, Optional + +import click +import inquirer +import yaml +from inquirer.themes import GreenPassion + +from warnet.constants import ( + PLUGIN_YAML, + PLUGINS_LABEL, + WARNET_USER_DIR_ENV_VAR, +) + + +class PluginError(Exception): + pass + + +hook_registry: set[Callable[..., Any]] = set() +imported_modules: dict[str, ModuleType] = {} + + +@click.group(name=PLUGINS_LABEL) +def plugins(): + """Control plugins""" + pass + + +@plugins.command() +def ls(): + """List all available plugins and whether they are activated""" + plugin_dir = _get_plugins_directory() + if plugin_dir is None: + direct_user_to_plugin_directory_and_exit() + + for plugin, status in get_plugins_with_status(plugin_dir): + if status: + click.secho(f"{plugin.stem:<20} enabled", fg="green") + else: + click.secho(f"{plugin.stem:<20} disabled", fg="yellow") + + +@plugins.command() +@click.argument("plugin", type=str, default="") +def toggle(plugin: str): + """Toggle a plugin on or off""" + plugin_dir = _get_plugins_directory() + if plugin_dir is None: + direct_user_to_plugin_directory_and_exit() + + if plugin == "": + plugin_list = get_plugins_with_status(plugin_dir) + formatted_list = [ + f"{str(name.stem):<25} ◦ enabled: {active}" for name, active in plugin_list + ] + + plugins_tag = "plugins" + try: + q = [ + inquirer.List( + name=plugins_tag, + message="Toggle a plugin, or ctrl-c to cancel", + choices=formatted_list, + ) + ] + selected = inquirer.prompt(q, theme=GreenPassion()) + plugin = selected[plugins_tag].split("◦")[0].strip() + except TypeError: + # user cancels and `selected[plugins_tag] fails with TypeError + sys.exit(0) + + plugin_settings = read_yaml(plugin_dir / Path(plugin) / PLUGIN_YAML) + updated_settings = copy.deepcopy(plugin_settings) + updated_settings["enabled"] = not plugin_settings["enabled"] + write_yaml(updated_settings, plugin_dir / Path(plugin) / Path(PLUGIN_YAML)) + + +def load_user_modules() -> bool: + was_successful_load = False + + plugin_dir = _get_plugins_directory() + + if not plugin_dir or not plugin_dir.is_dir(): + return was_successful_load + + enabled_plugins = [plugin for plugin, enabled in get_plugins_with_status(plugin_dir) if enabled] + + if not enabled_plugins: + return was_successful_load + + # Temporarily add the directory to sys.path for imports + sys.path.insert(0, str(plugin_dir)) + + for plugin_path in enabled_plugins: + for file in plugin_path.glob("*.py"): + if file.stem not in ("__init__"): + module_name = f"{PLUGINS_LABEL}.{file.stem}" + spec = importlib.util.spec_from_file_location(module_name, file) + module = importlib.util.module_from_spec(spec) + imported_modules[module_name] = module + sys.modules[module_name] = module + spec.loader.exec_module(module) + was_successful_load = True + + # Remove the added path from sys.path + sys.path.pop(0) + return was_successful_load + + +def register_command(command): + """Register a command to the CLI.""" + from warnet.main import cli + + register = cli.commands.get(PLUGINS_LABEL) + register.add_command(command) + + +def load_plugins(fn): + load_user_modules() + for module in imported_modules.values(): + for name, func in inspect.getmembers(module, inspect.isfunction): + if name == "warnet_register_plugin": + func(register_command) + + +def _get_plugins_directory() -> Optional[Path]: + user_dir = os.getenv(WARNET_USER_DIR_ENV_VAR) + + plugin_dir = Path(user_dir) / PLUGINS_LABEL if user_dir else Path.cwd() / PLUGINS_LABEL + + if plugin_dir and plugin_dir.is_dir(): + return plugin_dir + else: + return None + + +def direct_user_to_plugin_directory_and_exit(): + click.secho("Could not determine the plugin directory location.") + click.secho( + "Solution 1: try runing this command again, but this time from your initialized warnet directory." + ) + click.secho( + "Solution 2: consider setting environment variable pointing to your Warnet project directory:" + ) + click.secho(f"export {WARNET_USER_DIR_ENV_VAR}=/home/user/path/to/project/", fg="yellow") + sys.exit(1) + + +def read_yaml(path: Path) -> dict: + try: + with open(path) as file: + return yaml.safe_load(file) + except FileNotFoundError as e: + raise PluginError(f"YAML file {path} not found.") from e + except yaml.YAMLError as e: + raise PluginError(f"Error parsing yaml: {e}") from e + + +def write_yaml(yaml_dict: dict, path: Path) -> None: + dir_name = os.path.dirname(path) + try: + with tempfile.NamedTemporaryFile("w", dir=dir_name, delete=False) as temp_file: + yaml.safe_dump(yaml_dict, temp_file) + os.replace(temp_file.name, path) + except Exception as e: + os.remove(temp_file.name) + raise PluginError(f"Error writing kubeconfig: {path}") from e + + +def check_if_plugin_enabled(path: Path) -> bool: + enabled = None + try: + plugin_dict = read_yaml(path / Path("plugin.yaml")) + enabled = plugin_dict.get("enabled") + except PluginError as e: + click.secho(e) + + return bool(enabled) + + +def get_plugins_with_status(plugin_dir: Optional[Path] = None) -> list[tuple[Path, bool]]: + if not plugin_dir: + plugin_dir = _get_plugins_directory() + candidates = [ + Path(os.path.join(plugin_dir, name)) + for name in os.listdir(plugin_dir) + if os.path.isdir(os.path.join(plugin_dir, name)) + ] + plugins = [plugin_dir for plugin_dir in candidates if any(plugin_dir.glob("plugin.yaml"))] + return [(plugin, check_if_plugin_enabled(plugin)) for plugin in plugins] diff --git a/src/warnet/project.py b/src/warnet/project.py index 67b063fcd..c4122d916 100644 --- a/src/warnet/project.py +++ b/src/warnet/project.py @@ -26,7 +26,7 @@ KUBECTL_DOWNLOAD_URL_STUB, ) from .graph import inquirer_create_network -from .network import copy_network_defaults, copy_scenario_defaults +from .network import copy_network_defaults, copy_plugins_defaults, copy_scenario_defaults @click.command() @@ -387,6 +387,7 @@ def create_warnet_project(directory: Path, check_empty: bool = False): try: copy_network_defaults(directory) copy_scenario_defaults(directory) + copy_plugins_defaults(directory) click.echo(f"Copied network example files to {directory}/networks") click.echo(f"Created warnet project structure in {directory}") except Exception as e: diff --git a/test/data/ln.graphml b/test/data/ln.graphml deleted file mode 100644 index efd0c359f..000000000 --- a/test/data/ln.graphml +++ /dev/null @@ -1,66 +0,0 @@ - - - - - - - - - - - - - - - - - - - simln - - 27.0 - lnd - lightninglabs/lnd:v0.17.5-beta - true - - - 27.0 - lnd - pinheadmz/circuitbreaker:278737d - true - - - 27.0 - lnd - pinheadmz/circuitbreaker:278737d - --bitcoin.timelockdelta=33 - - - 27.0 - cln - --cltv-delta=33 - - - 27.0 - - - - - - - - - - --local_amt=100000 - --base_fee_msat=2200 --fee_rate_ppm=13 --time_lock_delta=20 - - - --local_amt=100000 --push_amt=50000 - --base_fee_msat=5500 --fee_rate_ppm=3 --time_lock_delta=40 - - - amount=100000 push_msat=50000000 - feebase=5500 feeppm=3 - - - \ No newline at end of file diff --git a/test/data/ln/network.yaml b/test/data/ln/network.yaml new file mode 100644 index 000000000..d1c135242 --- /dev/null +++ b/test/data/ln/network.yaml @@ -0,0 +1,54 @@ +nodes: + - name: tank-0000 + addnode: + - tank-0001 + ln: + lnd: true + + - name: tank-0001 + addnode: + - tank-0002 + ln: + lnd: true + + - name: tank-0002 + addnode: + - tank-0000 + ln: + lnd: true + + - name: tank-0003 + addnode: + - tank-0000 + ln: + lnd: true + lnd: + config: | + bitcoin.timelockdelta=33 + channels: + - id: + block: 300 + index: 1 + target: tank-0004-ln + local_amt: 100000 + push_amt: 50000 + + - name: tank-0004 + addnode: + - tank-0000 + ln: + lnd: true + lnd: + channels: + - id: + block: 300 + index: 2 + target: tank-0005-ln + local_amt: 50000 + push_amt: 25000 + + - name: tank-0005 + addnode: + - tank-0000 + ln: + lnd: true \ No newline at end of file diff --git a/test/data/ln/node-defaults.yaml b/test/data/ln/node-defaults.yaml new file mode 100644 index 000000000..884ad1343 --- /dev/null +++ b/test/data/ln/node-defaults.yaml @@ -0,0 +1,8 @@ +image: + repository: bitcoindevproject/bitcoin + pullPolicy: IfNotPresent + tag: "27.0" + +lnd: + defaultConfig: | + color=#000000 \ No newline at end of file diff --git a/test/data/signet/node-defaults.yaml b/test/data/signet/node-defaults.yaml index aea980d6a..941f03881 100644 --- a/test/data/signet/node-defaults.yaml +++ b/test/data/signet/node-defaults.yaml @@ -3,7 +3,8 @@ image: pullPolicy: Always tag: "27.0" -chain: signet +global: + chain: signet spec: restartPolicy: Never diff --git a/test/hooks_test.py b/test/hooks_test.py new file mode 100755 index 000000000..70d834fe4 --- /dev/null +++ b/test/hooks_test.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +import os +from pathlib import Path + +import pexpect +from test_base import TestBase + + +class HooksTest(TestBase): + def __init__(self): + super().__init__() + self.network_dir = Path(os.path.dirname(__file__)) / "data" / "12_node_ring" + + def run_test(self): + try: + os.chdir(self.tmpdir) + self.setup_network() + self.generate_plugin_dir() + + finally: + self.cleanup() + + def setup_network(self): + self.log.info("Setting up network") + self.log.info(self.warnet(f"deploy {self.network_dir}")) + self.wait_for_all_tanks_status(target="running") + self.wait_for_all_edges() + + def generate_plugin_dir(self): + self.log.info("Generating the plugin directroy") + self.sut = pexpect.spawn("warnet init") + self.sut.expect("Do you want to create a custom network?", timeout=10) + self.sut.sendline("n") + plugin_dir = Path(os.getcwd()) / "plugins" + assert plugin_dir.exists() + + +if __name__ == "__main__": + test = HooksTest() + test.run_test() diff --git a/test/ln_basic_test.py b/test/ln_basic_test.py new file mode 100755 index 000000000..f32d93bd8 --- /dev/null +++ b/test/ln_basic_test.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python3 + +import json +import os +from pathlib import Path +from time import sleep + +from test_base import TestBase + + +class LNBasicTest(TestBase): + def __init__(self): + super().__init__() + self.network_dir = Path(os.path.dirname(__file__)) / "data" / "ln" + self.scen_dir = Path(os.path.dirname(__file__)).parent / "resources" / "scenarios" + self.lns = [ + "tank-0000-ln", + "tank-0001-ln", + "tank-0002-ln", + "tank-0003-ln", + "tank-0004-ln", + "tank-0005-ln", + ] + + def run_test(self): + try: + # Wait for all nodes to wake up + self.setup_network() + # Send money to all LN nodes + self.fund_wallets() + + # Manually open two channels between first three nodes + # and send a payment using warnet RPC + self.manual_open_channels() + self.wait_for_gossip_sync(self.lns[:3], 2) + self.pay_invoice(sender="tank-0000-ln", recipient="tank-0002-ln") + + # Automatically open channels from network.yaml using warnet RPC + self.automatic_open_channels() + self.wait_for_gossip_sync(self.lns[3:], 2) + # push_amt should enable payments from target to source + self.pay_invoice(sender="tank-0005-ln", recipient="tank-0003-ln") + + # Automatically open channels from inside a scenario commander + self.scenario_open_channels() + self.pay_invoice(sender="tank-0002-ln", recipient="tank-0003-ln") + + finally: + self.cleanup() + + def setup_network(self): + self.log.info("Setting up network") + self.log.info(self.warnet(f"deploy {self.network_dir}")) + self.wait_for_all_tanks_status(target="running") + + self.warnet("bitcoin rpc tank-0000 createwallet miner") + self.warnet("bitcoin rpc tank-0000 -generate 110") + self.wait_for_predicate( + lambda: int(self.warnet("bitcoin rpc tank-0000 getblockcount")) > 100 + ) + + def wait_for_all_ln_rpc(): + for ln in self.lns: + try: + self.warnet(f"ln rpc {ln} getinfo") + except Exception: + print(f"LN node {ln} not ready for rpc yet") + return False + return True + + self.wait_for_predicate(wait_for_all_ln_rpc) + + def fund_wallets(self): + outputs = "" + for lnd in self.lns: + addr = json.loads(self.warnet(f"ln rpc {lnd} newaddress p2wkh"))["address"] + outputs += f',"{addr}":10' + # trim first comma + outputs = outputs[1:] + + self.warnet("bitcoin rpc tank-0000 sendmany '' '{" + outputs + "}'") + self.warnet("bitcoin rpc tank-0000 -generate 1") + + def wait_for_two_txs(self): + self.wait_for_predicate( + lambda: json.loads(self.warnet("bitcoin rpc tank-0000 getmempoolinfo"))["size"] == 2 + ) + + def manual_open_channels(self): + # 0 -> 1 -> 2 + pk1 = self.warnet("ln pubkey tank-0001-ln") + pk2 = self.warnet("ln pubkey tank-0002-ln") + self.log.info(f"pk1: {pk1}") + self.log.info(f"pk2: {pk2}") + + host1 = "" + host2 = "" + + while not host1 or not host2: + if not host1: + host1 = self.warnet("ln host tank-0001-ln") + if not host2: + host2 = self.warnet("ln host tank-0002-ln") + sleep(1) + + print( + self.warnet( + f"ln rpc tank-0000-ln openchannel --node_key {pk1} --local_amt 100000 --connect {host1}" + ) + ) + print( + self.warnet( + f"ln rpc tank-0001-ln openchannel --node_key {pk2} --local_amt 100000 --connect {host2}" + ) + ) + + self.wait_for_two_txs() + + self.warnet("bitcoin rpc tank-0000 -generate 10") + + def wait_for_gossip_sync(self, nodes, expected): + while len(nodes) > 0: + for node in nodes: + chs = json.loads(self.warnet(f"ln rpc {node} describegraph"))["edges"] + if len(chs) >= expected: + nodes.remove(node) + sleep(1) + + def pay_invoice(self, sender: str, recipient: str): + init_balance = int(json.loads(self.warnet(f"ln rpc {recipient} channelbalance"))["balance"]) + inv = json.loads(self.warnet(f"ln rpc {recipient} addinvoice --amt 1000")) + print(inv) + print(self.warnet(f"ln rpc {sender} payinvoice -f {inv['payment_request']}")) + + def wait_for_success(): + return ( + int(json.loads(self.warnet(f"ln rpc {recipient} channelbalance"))["balance"]) + == init_balance + 1000 + ) + + self.wait_for_predicate(wait_for_success) + + def automatic_open_channels(self): + # 3 -> 4 -> 5 + self.warnet("ln open-all-channels") + + self.wait_for_two_txs() + + self.warnet("bitcoin rpc tank-0000 -generate 10") + + def scenario_open_channels(self): + # 2 -> 3 + # connecting all six ln nodes in the graph + scenario_file = self.scen_dir / "test_scenarios" / "ln_basic.py" + self.log.info(f"Running scenario from: {scenario_file}") + self.warnet(f"run {scenario_file} --source_dir={self.scen_dir} --debug") + + +if __name__ == "__main__": + test = LNBasicTest() + test.run_test() diff --git a/test/ln_test.py b/test/ln_test.py index 576846b6b..0a58104aa 100755 --- a/test/ln_test.py +++ b/test/ln_test.py @@ -6,53 +6,52 @@ from test_base import TestBase -from warnet.services import ServiceType +from warnet.cli.process import run_command +from warnet.k8s import get_pods_with_label class LNTest(TestBase): def __init__(self): super().__init__() - self.graph_file_path = Path(os.path.dirname(__file__)) / "data" / "ln.graphml" + self.network_dir = Path(os.path.dirname(__file__)) / "data" / "ln" def run_test(self): - self.start_server() try: self.setup_network() self.run_ln_init_scenario() self.test_channel_policies() self.test_ln_payment_0_to_2() self.test_ln_payment_2_to_0() - self.test_simln() + # self.test_simln() finally: self.cleanup() def setup_network(self): self.log.info("Setting up network") - self.log.info(self.warnet(f"network start {self.graph_file_path}")) + self.log.info(self.warcli(f"network deploy {self.network_dir}")) self.wait_for_all_tanks_status(target="running") self.wait_for_all_edges() - def get_cb_forwards(self, index): - cmd = "wget -q -O - 127.0.0.1:9235/api/forwarding_history" - res = self.wait_for_rpc( - "exec_run", [index, ServiceType.CIRCUITBREAKER.value, cmd, self.network_name] - ) + def get_cb_forwards(self, pod: str): + cmd = f"kubectl exec {pod} -- wget -q -O - 127.0.0.1:9235/api/forwarding_history" + res = run_command(cmd) return json.loads(res) def run_ln_init_scenario(self): self.log.info("Running LN Init scenario") - self.warnet("bitcoin rpc 0 getblockcount") + self.warnet("bitcoin rpc tank-0000 getblockcount") self.warnet("scenarios run ln_init") self.wait_for_all_scenarios() - scenario_return_code = self.get_scenario_return_code("ln_init") - if scenario_return_code != 0: - raise Exception("LN Init scenario failed") def test_channel_policies(self): self.log.info("Ensuring node-level channel policy settings") - node2pub, node2host = json.loads(self.warnet("ln rpc 2 getinfo"))["uris"][0].split("@") - chan_id = json.loads(self.warnet("ln rpc 2 listchannels"))["channels"][0]["chan_id"] - chan = json.loads(self.warnet(f"ln rpc 2 getchaninfo {chan_id}")) + node2pub, node2host = json.loads(self.warnet("ln rpc tank-0002-ln getinfo"))["uris"][ + 0 + ].split("@") + chan_id = json.loads(self.warnet("ln rpc tank-0002-ln listchannels"))["channels"][0][ + "chan_id" + ] + chan = json.loads(self.warnet(f"ln rpc tank-0002-ln getchaninfo {chan_id}")) # node_1 or node_2 is tank 2 with its non-default --bitcoin.timelockdelta=33 if chan["node1_policy"]["time_lock_delta"] != 33: @@ -61,46 +60,60 @@ def test_channel_policies(self): ), "Expected time_lock_delta to be 33" self.log.info("Ensuring no circuit breaker forwards yet") - assert len(self.get_cb_forwards(1)["forwards"]) == 0, "Expected no circuit breaker forwards" + assert ( + len(self.get_cb_forwards("tank-0001-ln")["forwards"]) == 0 + ), "Expected no circuit breaker forwards" def test_ln_payment_0_to_2(self): self.log.info("Test LN payment from 0 -> 2") - inv = json.loads(self.warnet("ln rpc 2 addinvoice --amt=2000"))["payment_request"] - self.log.info(f"Got invoice from node 2: {inv}") + inv = json.loads(self.warnet("ln rpc tank-0002-ln addinvoice --amt=2000"))[ + "payment_request" + ] + self.log.info(f"Got invoice from node tank-0002-ln: {inv}") self.log.info("Paying invoice from node 0...") - self.log.info(self.warnet(f"ln rpc 0 payinvoice -f {inv}")) + self.log.info(self.warnet(f"ln rpc tank-0000-ln payinvoice -f {inv}")) self.wait_for_predicate(self.check_invoice_settled) self.log.info("Ensuring channel-level channel policy settings: source") - payment = json.loads(self.warnet("ln rpc 0 listpayments"))["payments"][0] + payment = json.loads(self.warnet("ln rpc tank-0000-ln listpayments"))["payments"][0] assert ( payment["fee_msat"] == "5506" ), f"Expected fee_msat to be 5506, got {payment['fee_msat']}" self.log.info("Ensuring circuit breaker tracked payment") - assert len(self.get_cb_forwards(1)["forwards"]) == 1, "Expected one circuit breaker forward" + assert ( + len(self.get_cb_forwards("tank-0001-ln")["forwards"]) == 1 + ), "Expected one circuit breaker forward" def test_ln_payment_2_to_0(self): self.log.info("Test LN payment from 2 -> 0") - inv = json.loads(self.warnet("ln rpc 0 addinvoice --amt=1000"))["payment_request"] + inv = json.loads(self.warnet("ln rpc tank-0000-ln addinvoice --amt=1000"))[ + "payment_request" + ] self.log.info(f"Got invoice from node 0: {inv}") self.log.info("Paying invoice from node 2...") - self.log.info(self.warnet(f"ln rpc 2 payinvoice -f {inv}")) + self.log.info(self.warnet(f"ln rpc tank-0002-ln payinvoice -f {inv}")) - self.wait_for_predicate(lambda: self.check_invoices(0) == 1) + self.wait_for_predicate(lambda: self.check_invoices("tank-0000-ln") == 1) self.log.info("Ensuring channel-level channel policy settings: target") - payment = json.loads(self.warnet("ln rpc 2 listpayments"))["payments"][0] + payment = json.loads(self.warnet("ln rpc tank-0002-ln listpayments"))["payments"][0] assert ( payment["fee_msat"] == "2213" ), f"Expected fee_msat to be 2213, got {payment['fee_msat']}" def test_simln(self): self.log.info("Engaging simln") - node2pub, _ = json.loads(self.warnet("ln rpc 2 getinfo"))["uris"][0].split("@") + pods = get_pods_with_label("mission=lightning") + node2pub = self.warnet(f"ln pubkey {pods[1].metadata.name}") activity = [ - {"source": "ln-0", "destination": node2pub, "interval_secs": 1, "amount_msat": 2000} + { + "source": pods[0].metadata.name, + "destination": node2pub, + "interval_secs": 1, + "amount_msat": 2000, + } ] self.warnet( f"network export --exclude=[1] --activity={json.dumps(activity).replace(' ', '')}" @@ -110,16 +123,16 @@ def test_simln(self): assert self.check_invoices(1) == 0, "Expected no invoices for node 1" def check_invoice_settled(self): - invs = json.loads(self.warnet("ln rpc 2 listinvoices"))["invoices"] + invs = json.loads(self.warnet("ln rpc tank-0002-ln listinvoices"))["invoices"] if len(invs) > 0 and invs[0]["state"] == "SETTLED": self.log.info("Invoice settled") return True return False - def check_invoices(self, index): - invs = json.loads(self.warnet(f"ln rpc {index} listinvoices"))["invoices"] + def check_invoices(self, pod: str): + invs = json.loads(self.warnet(f"ln rpc {pod} listinvoices"))["invoices"] settled = sum(1 for inv in invs if inv["state"] == "SETTLED") - self.log.debug(f"Node {index} has {settled} settled invoices") + self.log.debug(f"lnd {pod} has {settled} settled invoices") return settled diff --git a/test/simln_test.py b/test/simln_test.py new file mode 100755 index 000000000..1571ffa47 --- /dev/null +++ b/test/simln_test.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python3 +import ast +import json +import os +from pathlib import Path +from time import sleep + +import pexpect +from test_base import TestBase + +from warnet.k8s import download, get_pods_with_label, pod_log, wait_for_pod +from warnet.process import run_command + +lightning_selector = "mission=lightning" + +UP = "\033[A" +DOWN = "\033[B" +ENTER = "\n" + + +class SimLNTest(TestBase): + def __init__(self): + super().__init__() + self.network_dir = Path(os.path.dirname(__file__)) / "data" / "ln" + + def run_test(self): + try: + os.chdir(self.tmpdir) + self.setup_network() + self.run_plugin() + self.copy_results() + finally: + self.cleanup() + + def setup_network(self): + self.log.info("Setting up network") + self.log.info(self.warnet(f"deploy {self.network_dir}")) + self.wait_for_all_tanks_status(target="running") + + def run_plugin(self): + self.log.info("Initializing SimLN plugin...") + self.sut = pexpect.spawn("warnet init") + self.sut.expect("network", timeout=10) + self.sut.sendline("n") + self.sut.close() + + cmd = "warnet plugins simln run-demo" + self.log.info(f"Running: {cmd}") + run_command(cmd) + self.wait_for_predicate(self.found_results_remotely) + self.log.info("Ran SimLn plugin.") + + def copy_results(self) -> bool: + self.log.info("Copying results") + pod = get_pods_with_label("mission=simln")[0] + self.wait_for_gossip_sync(2) + wait_for_pod(pod.metadata.name, 60) + + log_resp = pod_log(pod.metadata.name, "simln") + self.log.info(log_resp.data.decode("utf-8")) + + download(pod.metadata.name, Path("/working/results"), Path("."), pod.metadata.namespace) + self.wait_for_predicate(self.found_results_locally) + + def wait_for_gossip_sync(self, expected: int): + self.log.info(f"Waiting for sync (expecting {expected})...") + current = 0 + while current < expected: + current = 0 + pods = get_pods_with_label(lightning_selector) + for v1_pod in pods: + node = v1_pod.metadata.name + chs = json.loads(run_command(f"warnet ln rpc {node} describegraph"))["edges"] + self.log.info(f"{node}: {len(chs)} channels") + current += len(chs) + sleep(1) + self.log.info("Synced") + + def found_results_remotely(self) -> bool: + pod_names_literal = run_command("warnet plugins simln list-simln-podnames") + pod_names = ast.literal_eval(pod_names_literal) + pod = pod_names[0] + self.log.info(f"Checking for results file in {pod}") + results_file = run_command(f"warnet plugins simln rpc {pod} ls /working/results").strip() + self.log.info(f"Results file: {results_file}") + results = run_command( + f"warnet plugins simln rpc {pod} cat /working/results/{results_file}" + ).strip() + self.log.info(results) + return results.find("Success") > 0 + + def found_results_locally(self) -> bool: + directory = "results" + self.log.info(f"Searching {directory}") + for root, _dirs, files in os.walk(Path(directory)): + for file_name in files: + file_path = os.path.join(root, file_name) + + with open(file_path) as file: + content = file.read() + if "Success" in content: + self.log.info(f"Found downloaded results in directory: {directory}.") + return True + self.log.info(f"Did not find downloaded results in directory: {directory}.") + return False + + +if __name__ == "__main__": + test = SimLNTest() + test.run_test() diff --git a/test/test_base.py b/test/test_base.py index 2b024da64..51d5935d6 100644 --- a/test/test_base.py +++ b/test/test_base.py @@ -139,13 +139,6 @@ def check_scenarios(): self.wait_for_predicate(check_scenarios) - def get_scenario_return_code(self, scenario_name): - scns = self.rpc("scenarios_list_running") - scns = [scn for scn in scns if scn["cmd"].strip() == scenario_name] - if len(scns) == 0: - raise Exception(f"Scenario {scenario_name} not found in running scenarios") - return scns[0]["return_code"] - def assert_equal(thing1, thing2, *args): if thing1 != thing2 or any(thing1 != arg for arg in args):