From f898132bc3f119a70aea6621b22ae1d369ea60f6 Mon Sep 17 00:00:00 2001 From: Amaury Chamayou Date: Wed, 8 Jan 2025 10:39:58 +0000 Subject: [PATCH 1/4] Update cgmanifest.json for valijson upgrade to 1.0.3 (#6729) (#6736) --- cgmanifest.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cgmanifest.json b/cgmanifest.json index 6a5a80d92434..e448c442b7a6 100644 --- a/cgmanifest.json +++ b/cgmanifest.json @@ -114,7 +114,7 @@ "type": "git", "git": { "repositoryUrl": "https://github.com/tristanpenman/valijson", - "commitHash": "2dfc7499a31b84edef71189f4247919268ebc74e" + "commitHash": "fc9ddf14db683c9443c48ae3a6bf83e0ce3ad37c" } } }, From 7f720b7e4a7d749975dcf90b87dbed0be692fa4a Mon Sep 17 00:00:00 2001 From: Amaury Chamayou Date: Wed, 8 Jan 2025 11:13:21 +0000 Subject: [PATCH 2/4] Prune unused ansible var files (#6738) --- .../setup_vm/roles/ccf_build/vars/clang11.yml | 36 ------------------- .../setup_vm/roles/ccf_run/vars/clang11.yml | 5 --- 2 files changed, 41 deletions(-) delete mode 100644 getting_started/setup_vm/roles/ccf_build/vars/clang11.yml delete mode 100644 getting_started/setup_vm/roles/ccf_run/vars/clang11.yml diff --git a/getting_started/setup_vm/roles/ccf_build/vars/clang11.yml b/getting_started/setup_vm/roles/ccf_build/vars/clang11.yml deleted file mode 100644 index d8da29b7dc93..000000000000 --- a/getting_started/setup_vm/roles/ccf_build/vars/clang11.yml +++ /dev/null @@ -1,36 +0,0 @@ -workspace: "/tmp/" -clang_ver: 11 - -debs: - - apt-transport-https - - ninja-build - - libuv1-dev - - libc++-{{ clang_ver }}-dev - - libc++abi-{{ clang_ver }}-dev - - python3.8-dev - - python3.8-venv - - llvm-{{ clang_ver }} - - clang-{{ clang_ver }} - - clang-format-11 - - clang-tools-{{ clang_ver }} - - lld-{{ clang_ver }} - - build-essential - - expect - - git - - ccache - - kmod # modinfo for sgxinfo.sh - - cmake - - libssl-dev - - jq # operation scripts - - sudo - - curl # client test infra - - shellcheck # bash lint - - iptables # partition test infra - -# Not installed on GitHub Actions environment because of conflicting package -docker_debs: - - docker-ce-cli - -doxygen_ver: "1.9.8" -doxygen_bin: "doxygen-{{ doxygen_ver }}.linux.bin.tar.gz" -doxygen_url: "https://sourceforge.net/projects/doxygen/files/rel-{{ doxygen_ver }}/{{ doxygen_bin }}/download" diff --git a/getting_started/setup_vm/roles/ccf_run/vars/clang11.yml b/getting_started/setup_vm/roles/ccf_run/vars/clang11.yml deleted file mode 100644 index 60c0f972aeda..000000000000 --- a/getting_started/setup_vm/roles/ccf_run/vars/clang11.yml +++ /dev/null @@ -1,5 +0,0 @@ -workspace: "/tmp/" -debs: - - libc++abi1-11 - - libc++1-11 - - libuv1 From 941f6386782d6dcea22f474ff01e69380f74cd4f Mon Sep 17 00:00:00 2001 From: Amaury Chamayou Date: Wed, 8 Jan 2025 16:02:16 +0000 Subject: [PATCH 3/4] Add az-dcap-client back to main for now, to enable LTS-compatible base images (#6742) --- .azure-pipelines-release.yml | 6 +++--- .azure-pipelines-templates/deploy_aci.yml | 2 +- .azure_pipelines_snp.yml | 2 +- .github/workflows/bencher.yml | 2 +- .github/workflows/ci-verification.yml | 6 +++--- .github/workflows/ci.yml | 4 ++-- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/long-test.yml | 8 ++++---- .github/workflows/long-verification.yml | 4 ++-- .github/workflows/release.yml | 2 +- docker/ccf_ci_built | 2 +- getting_started/setup_vm/ccf-dev.yml | 3 +++ .../setup_vm/roles/az_dcap/tasks/install.yml | 18 ++++++++++++++++++ 13 files changed, 41 insertions(+), 20 deletions(-) create mode 100644 getting_started/setup_vm/roles/az_dcap/tasks/install.yml diff --git a/.azure-pipelines-release.yml b/.azure-pipelines-release.yml index 7e7a9d2083dd..90e4218d41b7 100644 --- a/.azure-pipelines-release.yml +++ b/.azure-pipelines-release.yml @@ -8,15 +8,15 @@ pr: none resources: containers: - container: virtual - image: ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 options: --publish-all --cap-add NET_ADMIN --cap-add NET_RAW --cap-add SYS_PTRACE -v /lib/modules:/lib/modules:ro - container: snp - image: ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 options: --publish-all --cap-add NET_ADMIN --cap-add NET_RAW --cap-add SYS_PTRACE -v /lib/modules:/lib/modules:ro - container: sgx - image: ghcr.io/microsoft/ccf/ci/sgx:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/sgx:build-08-01-2025-1 options: --publish-all --cap-add NET_ADMIN --cap-add NET_RAW --device /dev/sgx_enclave:/dev/sgx_enclave --device /dev/sgx_provision:/dev/sgx_provision -v /dev/sgx:/dev/sgx -v /lib/modules:/lib/modules:ro variables: diff --git a/.azure-pipelines-templates/deploy_aci.yml b/.azure-pipelines-templates/deploy_aci.yml index ffb4a7f5d6e4..4b6c3e844c97 100644 --- a/.azure-pipelines-templates/deploy_aci.yml +++ b/.azure-pipelines-templates/deploy_aci.yml @@ -50,7 +50,7 @@ jobs: env: ACR_REGISTRY_RESOURCE_NAME: ccfmsrc ACR_REGISTRY: ccfmsrc.azurecr.io - BASE_IMAGE: ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 + BASE_IMAGE: ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 - script: | set -ex diff --git a/.azure_pipelines_snp.yml b/.azure_pipelines_snp.yml index 76aec28d7c6f..5d3db5770004 100644 --- a/.azure_pipelines_snp.yml +++ b/.azure_pipelines_snp.yml @@ -22,7 +22,7 @@ schedules: resources: containers: - container: virtual - image: ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 options: --publish-all --cap-add NET_ADMIN --cap-add NET_RAW --cap-add SYS_PTRACE -v /lib/modules:/lib/modules:ro jobs: diff --git a/.github/workflows/bencher.yml b/.github/workflows/bencher.yml index ac959e9b665c..3017074044a5 100644 --- a/.github/workflows/bencher.yml +++ b/.github/workflows/bencher.yml @@ -13,7 +13,7 @@ jobs: name: Continuous Benchmarking with Bencher runs-on: [self-hosted, 1ES.Pool=gha-virtual-ccf-sub] container: - image: ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 steps: - uses: actions/checkout@v4 with: diff --git a/.github/workflows/ci-verification.yml b/.github/workflows/ci-verification.yml index 1374ca471d3a..61a55b8bc293 100644 --- a/.github/workflows/ci-verification.yml +++ b/.github/workflows/ci-verification.yml @@ -24,7 +24,7 @@ jobs: name: Model Checking - Consistency runs-on: [self-hosted, 1ES.Pool=gha-virtual-ccf-sub] container: - image: ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 defaults: run: working-directory: tla @@ -102,7 +102,7 @@ jobs: name: Model Checking - Consensus runs-on: [self-hosted, 1ES.Pool=gha-virtual-ccf-sub] container: - image: ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 defaults: run: working-directory: tla @@ -158,7 +158,7 @@ jobs: name: Trace Validation - Consensus runs-on: [self-hosted, 1ES.Pool=gha-virtual-ccf-sub] container: - image: ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cc304c2d3809..1afb379ddd8b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,7 +20,7 @@ jobs: checks: name: "Format and License Checks" runs-on: ubuntu-latest - container: ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 + container: ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 steps: - run: git config --global --add safe.directory "$GITHUB_WORKSPACE" @@ -44,7 +44,7 @@ jobs: options: --user root --publish-all --cap-add NET_ADMIN --cap-add NET_RAW --cap-add SYS_PTRACE -v /lib/modules:/lib/modules:ro runs-on: ${{ matrix.platform.nodes }} container: - image: ghcr.io/microsoft/ccf/ci/${{ matrix.platform.image }}:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/${{ matrix.platform.image }}:build-08-01-2025-1 options: ${{ matrix.platform.options }} steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 18f5d89df7c3..6607e949ec51 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -25,7 +25,7 @@ jobs: # Insufficient space to run on public runner, so use custom pool runs-on: [self-hosted, 1ES.Pool=gha-virtual-ccf-sub] container: - image: ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 options: --user root permissions: diff --git a/.github/workflows/long-test.yml b/.github/workflows/long-test.yml index 91940977a3bc..657998b7baeb 100644 --- a/.github/workflows/long-test.yml +++ b/.github/workflows/long-test.yml @@ -17,7 +17,7 @@ jobs: if: ${{ contains(github.event.pull_request.labels.*.name, 'run-long-test') || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' }} runs-on: [self-hosted, 1ES.Pool=gha-virtual-ccf-sub] container: - image: ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 steps: - uses: actions/checkout@v4 @@ -35,7 +35,7 @@ jobs: if: ${{ contains(github.event.pull_request.labels.*.name, 'run-long-test') || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' }} runs-on: [self-hosted, 1ES.Pool=gha-virtual-ccf-sub] container: - image: ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 steps: - uses: actions/checkout@v4 @@ -79,7 +79,7 @@ jobs: name: TSAN runs-on: [self-hosted, 1ES.Pool=gha-virtual-ccf-sub] container: - image: ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 steps: - uses: actions/checkout@v4 @@ -117,7 +117,7 @@ jobs: name: LTS runs-on: [self-hosted, 1ES.Pool=gha-virtual-ccf-sub] container: - image: ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/long-verification.yml b/.github/workflows/long-verification.yml index a1f47406966a..58ca8599199d 100644 --- a/.github/workflows/long-verification.yml +++ b/.github/workflows/long-verification.yml @@ -22,7 +22,7 @@ jobs: if: ${{ contains(github.event.pull_request.labels.*.name, 'run-long-verification') || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' }} runs-on: [self-hosted, 1ES.Pool=gha-virtual-ccf-sub] container: - image: ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 defaults: run: working-directory: tla @@ -50,7 +50,7 @@ jobs: if: ${{ contains(github.event.pull_request.labels.*.name, 'run-long-verification') || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' }} runs-on: [self-hosted, 1ES.Pool=gha-virtual-ccf-sub] container: - image: ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 defaults: run: working-directory: tla diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index aab772ee1e7f..2e9c3b778e30 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -75,7 +75,7 @@ jobs: nodes: [self-hosted, 1ES.Pool=gha-virtual-ccf-sub] runs-on: ${{ matrix.platform.nodes }} container: - image: ghcr.io/microsoft/ccf/ci/${{ matrix.platform.image }}:build-05-12-2024 + image: ghcr.io/microsoft/ccf/ci/${{ matrix.platform.image }}:build-08-01-2025-1 options: "--user root --publish-all --cap-add NET_ADMIN --cap-add NET_RAW --cap-add SYS_PTRACE -v /lib/modules:/lib/modules:ro ${{ matrix.platform.container_options }}" steps: - uses: actions/checkout@v4 diff --git a/docker/ccf_ci_built b/docker/ccf_ci_built index 5bfb3d872175..c1ddb865c3bc 100644 --- a/docker/ccf_ci_built +++ b/docker/ccf_ci_built @@ -3,7 +3,7 @@ # Also contains CCF source and build directory # Latest image as of this change -ARG base=ghcr.io/microsoft/ccf/ci/default:build-05-12-2024 +ARG base=ghcr.io/microsoft/ccf/ci/default:build-08-01-2025-1 FROM ${base} # SSH. Note that this could (should) be done in the base ccf_ci image instead diff --git a/getting_started/setup_vm/ccf-dev.yml b/getting_started/setup_vm/ccf-dev.yml index 03126eee986d..a3ed43776cc0 100644 --- a/getting_started/setup_vm/ccf-dev.yml +++ b/getting_started/setup_vm/ccf-dev.yml @@ -25,6 +25,9 @@ - import_role: name: lldb tasks_from: install.yml + - import_role: + name: az_dcap + tasks_from: install.yml - import_role: name: autoremove tasks_from: install.yml diff --git a/getting_started/setup_vm/roles/az_dcap/tasks/install.yml b/getting_started/setup_vm/roles/az_dcap/tasks/install.yml new file mode 100644 index 000000000000..14b1d5ead421 --- /dev/null +++ b/getting_started/setup_vm/roles/az_dcap/tasks/install.yml @@ -0,0 +1,18 @@ +- name: Add Microsoft repository key + apt_key: + url: "https://packages.microsoft.com/keys/microsoft.asc" + state: present + become: true + +- name: Add Microsoft sources list + apt_repository: + repo: "deb [arch=amd64] https://packages.microsoft.com/ubuntu/{{ ansible_distribution_version }}/prod {{ ansible_distribution_release }} main" + state: present + become: true + +- name: Install the Azure DCAP Client + apt: + name: az-dcap-client + state: present + force: true + become: true From 3390373fe767b5f246e5fd464be18fc1d6a222e3 Mon Sep 17 00:00:00 2001 From: Amaury Chamayou Date: Wed, 8 Jan 2025 15:45:38 +0000 Subject: [PATCH 4/4] Remove unused DockerRemote (#6740) --- .cmake-format.py | 1 - cmake/common.cmake | 10 +- tests/infra/docker_remote.py | 222 ----------------------------------- tests/infra/network.py | 5 - tests/infra/node.py | 26 +--- tests/lts_compatibility.py | 14 +-- tests/requirements.txt | 1 - 7 files changed, 9 insertions(+), 270 deletions(-) delete mode 100644 tests/infra/docker_remote.py diff --git a/.cmake-format.py b/.cmake-format.py index afc456b82dc9..605fe7eb0cd6 100644 --- a/.cmake-format.py +++ b/.cmake-format.py @@ -28,7 +28,6 @@ "CURL_CLIENT": "*", "CONFIGURATIONS": "*", "ADDITIONAL_ARGS": "*", - "CONTAINER_NODES": "*", }, }, "add_perf_test": { diff --git a/cmake/common.cmake b/cmake/common.cmake index fe3bfb6aa60f..ac50801edb92 100644 --- a/cmake/common.cmake +++ b/cmake/common.cmake @@ -68,7 +68,7 @@ function(add_e2e_test) cmake_parse_arguments( PARSE_ARGV 0 PARSED_ARGS "" "NAME;PYTHON_SCRIPT;LABEL;CURL_CLIENT;PERF_LABEL" - "CONSTITUTION;ADDITIONAL_ARGS;CONFIGURATIONS;CONTAINER_NODES" + "CONSTITUTION;ADDITIONAL_ARGS;CONFIGURATIONS" ) if(NOT PARSED_ARGS_CONSTITUTION) @@ -161,14 +161,6 @@ function(add_e2e_test) PROPERTY ENVIRONMENT "CURL_CLIENT=ON" ) endif() - if((${PARSED_ARGS_CONTAINER_NODES}) AND (LONG_TESTS)) - # Containerised nodes are only enabled with long tests - set_property( - TEST ${PARSED_ARGS_NAME} - APPEND - PROPERTY ENVIRONMENT "CONTAINER_NODES=ON" - ) - endif() if(DEFINED DEFAULT_ENCLAVE_TYPE) set_property( diff --git a/tests/infra/docker_remote.py b/tests/infra/docker_remote.py deleted file mode 100644 index d9700a4eeaad..000000000000 --- a/tests/infra/docker_remote.py +++ /dev/null @@ -1,222 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the Apache 2.0 License. - -import infra.remote -import docker -import re -import os -import pathlib -import grp -import infra.github -import time - -from loguru import logger as LOG - - -def is_docker_env(): - """Returns true if the process executing _this_ code already runs inside Docker""" - return os.path.isfile("/.dockerenv") - - -def is_azure_devops_env(): - return "SYSTEM_TEAMFOUNDATIONCOLLECTIONURI" in os.environ - - -def map_azure_devops_docker_workspace_dir(workspace_dir): - return workspace_dir.replace("__w", "/mnt/vss/_work") - - -# Docker image name prefix -# To update when runtime images are pushed to ACR -MICROSOFT_REGISTRY_NAME = "mcr.microsoft.com" -DOCKER_IMAGE_NAME_PREFIX = "ccf/app/run" - -# Network name -AZURE_DEVOPS_CONTAINER_NETWORK_ENV_VAR = "AGENT_CONTAINERNETWORK" -DOCKER_NETWORK_NAME_LOCAL = "ccf_test_docker_network" - -# Identifier for all CCF test containers -CCF_TEST_CONTAINERS_LABEL = "ccf_test" - -NODE_STARTUP_WRAPPER_SCRIPT = "docker_wrap.sh" -CONTAINER_IP_REPLACE_STR = "CONTAINER_IP" - - -def kernel_has_sgx_builtin(): - with open("/proc/cpuinfo", "r", encoding="utf-8") as cpu_info: - f = re.compile("^flags.*sgx.*") - for line in cpu_info: - if f.match(line): - return True - return False - - -class DockerRemote(infra.remote.LocalRemote): - def _stop_container(self, container): - while True: - try: - container.stop() - container.remove() - LOG.info(f"Stopped container {container.name}") - break - except docker.errors.NotFound: - break - except docker.errors.APIError: - # Container may already be in the process of being cleaned up - time.sleep(0.5) - continue - - @staticmethod - def make_host(host): - # Bind local RPC address to 0.0.0.0, so that it be can be accessed from outside container - for _, rpc_interface in host.rpc_interfaces.items(): - rpc_interface.host = "0.0.0.0" - rpc_interface.public_host = CONTAINER_IP_REPLACE_STR - return host - - @staticmethod - def get_node_address(*args, **kwargs): - return CONTAINER_IP_REPLACE_STR - - def __init__( - self, - *args, - host=None, - label=None, - local_node_id=None, - version=None, - binary_dir=".", - node_container_image=None, - **kwargs, - ): - self.docker_client = docker.DockerClient() - self.container_ip = None # Assigned when container is started - self.host = host - self.binary_dir = binary_dir - - # Sanitise container name, replacing illegal characters with underscores - self.container_name = f"{label}_{local_node_id}" - self.container_name = re.sub(r"[^a-zA-Z0-9_.-]", "_", self.container_name) - - # Create network to connect all containers to (for n2n communication, etc.). - # In a Docker environment, use existing network (either the one provided by - # ADO or the one already created by the runner). - # Otherwise, create network on the fly. - if is_docker_env() and is_azure_devops_env(): - network_name = os.environ[AZURE_DEVOPS_CONTAINER_NETWORK_ENV_VAR] - else: - network_name = DOCKER_NETWORK_NAME_LOCAL - - try: - self.network = self.docker_client.networks.get(network_name) - except docker.errors.NotFound: - LOG.debug(f"Creating network {network_name}") - self.network = self.docker_client.networks.create(network_name) - - # Stop and delete existing container(s) - if local_node_id == 0: - for c in self.docker_client.containers.list( - all=True, filters={"label": [CCF_TEST_CONTAINERS_LABEL, label]} - ): - self._stop_container(c) - - LOG.debug( - f'Network {self.network.name} [{self.network.attrs["IPAM"]["Config"][0]["Gateway"]}]' - ) - - # Group and device for kernel sgx builtin support (or not) - if kernel_has_sgx_builtin(): - gid = grp.getgrnam("sgx_prv").gr_gid - devices = ( - ["/dev/sgx/enclave", "/dev/sgx/provision"] - if os.path.isdir("/dev/sgx") - else None - ) - else: - gid = os.getgid() - devices = ["/dev/sgx"] if os.path.isdir("/dev/sgx") else None - - # Mount workspace volume - cwd = str(pathlib.Path().resolve()) - cwd_host = ( - map_azure_devops_docker_workspace_dir(cwd) if is_azure_devops_env() else cwd - ) - - # Deduce container tag from node version - repo = infra.github.Repository() - if node_container_image is None: - node_container_image = ( - f"{MICROSOFT_REGISTRY_NAME}/{DOCKER_IMAGE_NAME_PREFIX}:" - ) - if version is not None: - node_container_image += version - else: - suffix = "sgx" if os.path.exists("/dev/sgx") else "virtual-clang15" - node_container_image += f"{infra.github.strip_release_tag_name(repo.get_latest_dev_tag())}-{suffix}" - - try: - self.docker_client.images.get(node_container_image) - except docker.errors.ImageNotFound: - LOG.info(f"Pulling image {node_container_image}") - self.docker_client.images.pull(node_container_image) - - super().__init__(*args, host=host, **kwargs) - - self.command = ( - f'./{NODE_STARTUP_WRAPPER_SCRIPT} "{super().get_cmd(include_dir=False)}"' - ) - - self.container = self.docker_client.containers.create( - node_container_image, - volumes={cwd_host: {"bind": cwd, "mode": "rw"}}, - devices=devices, - command=self.command, - name=self.container_name, - init=True, - labels=[label, CCF_TEST_CONTAINERS_LABEL], - publish_all_ports=True, - user=f"{os.getuid()}:{gid}", - working_dir=self.root, - detach=True, - auto_remove=True, - ) - self.network.connect(self.container) - LOG.debug(f"Created container {self.container_name} [{node_container_image}]") - - def setup(self, use_links=False): - src_path = os.path.join(self.binary_dir, NODE_STARTUP_WRAPPER_SCRIPT) - super().setup(use_links=use_links) - super().cp(src_path, self.root) - - def start(self): - LOG.info(self.command) - self.container.start() - self.container.reload() # attrs are cached - self.container_ip = self.container.attrs["NetworkSettings"]["Networks"][ - self.network.name - ]["IPAddress"] - for _, rpc_interface in self.host.rpc_interfaces.items(): - rpc_interface.public_host = self.container_ip - self.hostname = self.container_ip - LOG.debug(f"Started container {self.container_name} [{self.container_ip}]") - - def stop(self): - try: - self.container.stop() - LOG.info(f"Stopped container {self.container.name}") - except docker.errors.NotFound: - pass - - def suspend(self): - self.container.pause() - - def resume(self): - self.container.unpause() - - def check_done(self): - try: - self.container.reload() - LOG.debug(self.container.attrs["State"]) - return self.container.attrs["State"]["Status"] != "running" - except docker.errors.NotFound: - return True diff --git a/tests/infra/network.py b/tests/infra/network.py index aaafa0dbe9ef..3bd6241afc80 100644 --- a/tests/infra/network.py +++ b/tests/infra/network.py @@ -227,7 +227,6 @@ def __init__( version=None, service_load=None, node_data_json_file=None, - nodes_in_container=False, ): # Map of node id to dict of node arg to override value # for example, to set the election timeout to 2s for node 3: @@ -279,7 +278,6 @@ def __init__( self.args = None self.service_certificate_valid_from = None self.service_certificate_validity_days = None - self.nodes_in_container = nodes_in_container # Requires admin privileges self.partitioner = ( @@ -324,7 +322,6 @@ def create_node(self, host, binary_dir=None, library_dir=None, **kwargs): library_dir or self.library_dir, debug, perf, - nodes_in_container=self.nodes_in_container, **kwargs, ) self.nodes.append(node) @@ -1703,7 +1700,6 @@ def network( version=None, service_load=None, node_data_json_file=None, - nodes_in_container=False, ): """ Context manager for Network class. @@ -1734,7 +1730,6 @@ def network( version=version, service_load=service_load, node_data_json_file=node_data_json_file, - nodes_in_container=nodes_in_container, ) try: yield net diff --git a/tests/infra/node.py b/tests/infra/node.py index 696789eea0d5..f44dd753e2b1 100644 --- a/tests/infra/node.py +++ b/tests/infra/node.py @@ -5,7 +5,6 @@ from enum import Enum, auto import infra.crypto import infra.remote -import infra.docker_remote from datetime import datetime, timedelta, timezone import infra.net import infra.path @@ -119,7 +118,6 @@ def __init__( node_port=0, version=None, node_data_json_file=None, - nodes_in_container=False, ): self.local_node_id = local_node_id self.binary_dir = binary_dir @@ -147,8 +145,6 @@ def __init__( self.label = None self.verify_ca_by_default = True - requires_docker_remote = nodes_in_container or os.getenv("CONTAINER_NODES") - if isinstance(self.host, str): raise ValueError("Translate host to HostSpec before you get here") @@ -156,23 +152,13 @@ def __init__( # Main RPC interface determines remote implementation if interface_name == infra.interfaces.PRIMARY_RPC_INTERFACE: if rpc_interface.protocol == "local": - self.remote_impl = ( - infra.docker_remote.DockerRemote - if requires_docker_remote - else infra.remote.LocalRemote - ) - # Node client address does not currently work with DockerRemote - if not requires_docker_remote: - if not self.major_version or self.major_version > 1: - self.node_client_host = str( - ipaddress.ip_address(BASE_NODE_CLIENT_HOST) - + self.local_node_id - ) - elif rpc_interface.protocol == "ssh": - if requires_docker_remote: - raise ValueError( - "Cannot use SSH remote with containerised nodes" + self.remote_impl = infra.remote.LocalRemote + if not self.major_version or self.major_version > 1: + self.node_client_host = str( + ipaddress.ip_address(BASE_NODE_CLIENT_HOST) + + self.local_node_id ) + elif rpc_interface.protocol == "ssh": self.remote_impl = infra.remote.SSHRemote else: assert ( diff --git a/tests/lts_compatibility.py b/tests/lts_compatibility.py index dd6b3c8d9477..fa6c898d27cc 100644 --- a/tests/lts_compatibility.py +++ b/tests/lts_compatibility.py @@ -394,12 +394,8 @@ def run_code_upgrade_from( # Rollover JWKS so that new primary must read historical CA bundle table # and retrieve new keys via auto refresh - if not os.getenv("CONTAINER_NODES"): - jwt_issuer.refresh_keys() - jwt_issuer.wait_for_refresh(network, args) - else: - # https://github.com/microsoft/CCF/issues/2608#issuecomment-924785744 - LOG.warning("Skipping JWT refresh as running nodes in container") + jwt_issuer.refresh_keys() + jwt_issuer.wait_for_refresh(network, args) test_new_service( network, @@ -663,12 +659,6 @@ def add(parser): help='Absolute path to existing CCF release, e.g. "/opt/ccf"', default=None, ) - parser.add_argument( - "--release-install-image", - type=str, - help="If --release-install-path is set, specify a docker image to run release in (only if CONTAINER_NODES envvar is set) ", - default=None, - ) parser.add_argument("--dry-run", action="store_true") args = infra.e2e_args.cli_args(add) diff --git a/tests/requirements.txt b/tests/requirements.txt index 9b0468d38a22..8801ec5a8396 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -7,7 +7,6 @@ docutils python-iptables py-spy GitPython -docker better_exceptions pyasn1 Jinja2