From a12943a1ba030d3ebb150ed81dcd7aa309b8864b Mon Sep 17 00:00:00 2001 From: Matthias Frei Date: Fri, 3 Nov 2023 09:34:51 +0100 Subject: [PATCH] tools: specify project name in docker-compose files (#4396) Simplify the usage of the various docker-compose configurations by including the project name in the configuration file. This has been supported for a while now in docker-compose v2. This allows to drop the `-p`/`--project-name` from all `docker-compose` incantations. Also, streamline the docker-compose files generated by the topogen scripts; remove the explicit `container_name` configurations and drop all the explicit `scion_` prefixes -- managing these prefixes is docker-compose's job. Shut up the warnings on "SCION_EXPERIMENTAL_... variable is not set. Defaulting to a blank string." by using the variable expansion syntax to explicitly default to a blank string. Also, drop the `docker compose` `--compatibility` flag. The compatibility flag affects what word separator is used in the container name and it has long been deprecated. We don't usually rely on specific container names, so this should be fine. In some exception cases where expecting specific container names seems more practical (containers for bazel-remote-cache and go-module-proxy) due to special casing in the CI scripts, container_name is set explicitly. --- .buildkite/hooks/bazel-remote.yml | 1 + .buildkite/hooks/go-module-proxy.yml | 1 + .buildkite/hooks/pre-command | 4 +- acceptance/cert_renewal/test.py | 4 +- acceptance/common/docker.py | 6 +-- acceptance/hidden_paths/test.py | 46 +++++++++----------- acceptance/sig_short_exp_time/test | 14 +++--- acceptance/topo_cs_reload/docker-compose.yml | 2 - acceptance/topo_cs_reload/reload_test.go | 18 +++----- acceptance/topo_daemon_reload/reload_test.go | 13 +++--- acceptance/trc_update/test.py | 2 +- bazel-remote.yml | 1 + demo/drkey/test.py | 9 ++-- demo/file_transfer/file_transfer.py | 5 +-- scion.sh | 4 +- tools/dc | 3 +- tools/integration/docker.go | 5 +-- tools/topology/common.py | 4 -- tools/topology/docker.py | 32 ++++++-------- tools/topology/docker_utils.py | 7 ++- tools/topology/monitoring.py | 4 +- tools/topology/sig.py | 23 ++++------ 22 files changed, 86 insertions(+), 122 deletions(-) diff --git a/.buildkite/hooks/bazel-remote.yml b/.buildkite/hooks/bazel-remote.yml index 07523b1c2a..b4fdae7b29 100644 --- a/.buildkite/hooks/bazel-remote.yml +++ b/.buildkite/hooks/bazel-remote.yml @@ -1,4 +1,5 @@ version: "2.4" +name: bazel_remote services: bazel-remote: container_name: bazel-remote-cache diff --git a/.buildkite/hooks/go-module-proxy.yml b/.buildkite/hooks/go-module-proxy.yml index 4c852825f1..566068cca6 100644 --- a/.buildkite/hooks/go-module-proxy.yml +++ b/.buildkite/hooks/go-module-proxy.yml @@ -1,5 +1,6 @@ --- version: "2.4" +name: athens services: go-module-proxy: container_name: go-module-proxy diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index ada7f07ae8..38f6307520 100755 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -43,7 +43,7 @@ echo "~~~ Starting bazel remote cache proxy" # Start bazel remote cache proxy for S3 # Note that S3 keys are injected by buildkite, see # https://buildkite.com/docs/pipelines/secrets#storing-secrets-with-the-elastic-ci-stack-for-aws -docker compose --compatibility -f .buildkite/hooks/bazel-remote.yml -p bazel_remote up -d +docker compose -f .buildkite/hooks/bazel-remote.yml up -d echo "~~~ Starting go module proxy" -docker compose --compatibility -f .buildkite/hooks/go-module-proxy.yml -p athens up -d +docker compose -f .buildkite/hooks/go-module-proxy.yml up -d diff --git a/acceptance/cert_renewal/test.py b/acceptance/cert_renewal/test.py index 799fa42258..2a3a9ef30d 100755 --- a/acceptance/cert_renewal/test.py +++ b/acceptance/cert_renewal/test.py @@ -72,9 +72,9 @@ def _run(self): end2end.run_fg() logger.info("==> Shutting down control servers and purging caches") - for container in self.dc.list_containers("scion_sd.*"): + for container in self.dc.list_containers("sd.*"): self.dc("rm", container) - for container in self.dc.list_containers("scion_cs.*"): + for container in self.dc.list_containers("cs.*"): self.dc.stop_container(container) for cs_config in cs_configs: files = list((pathlib.Path(self.artifacts) / diff --git a/acceptance/common/docker.py b/acceptance/common/docker.py index e4a1b7014b..0af9a69bc4 100644 --- a/acceptance/common/docker.py +++ b/acceptance/common/docker.py @@ -35,15 +35,12 @@ from plumbum import cmd SCION_DC_FILE = "gen/scion-dc.yml" -DC_PROJECT = "scion" SCION_TESTING_DOCKER_ASSERTIONS_OFF = 'SCION_TESTING_DOCKER_ASSERTIONS_OFF' class Compose(object): def __init__(self, - project: str = DC_PROJECT, compose_file: str = SCION_DC_FILE): - self.project = project self.compose_file = compose_file def __call__(self, *args, **kwargs) -> str: @@ -51,8 +48,7 @@ def __call__(self, *args, **kwargs) -> str: # Note: not using plumbum here due to complications with encodings in the captured output try: res = subprocess.run( - ["docker", "compose", "--compatibility", - "-f", self.compose_file, "-p", self.project, *args], + ["docker", "compose", "-f", self.compose_file, *args], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8") except subprocess.CalledProcessError as e: raise _CalledProcessErrorWithOutput(e) from None diff --git a/acceptance/hidden_paths/test.py b/acceptance/hidden_paths/test.py index cf0d3ae438..90bc3101cb 100755 --- a/acceptance/hidden_paths/test.py +++ b/acceptance/hidden_paths/test.py @@ -5,10 +5,9 @@ import http.server import threading -from plumbum import cmd - from acceptance.common import base from acceptance.common import scion +from tools.topology.scion_addr import ISD_AS class Test(base.TestTopogen): @@ -108,12 +107,6 @@ def setup_start(self): super().setup_start() - self._testers = { - "2": "tester_1-ff00_0_2", - "3": "tester_1-ff00_0_3", - "4": "tester_1-ff00_0_4", - "5": "tester_1-ff00_0_5", - } self._ases = { "2": "1-ff00:0:2", "3": "1-ff00:0:3", @@ -126,27 +119,30 @@ def _run(self): self._server.shutdown() # by now configuration must have been downloaded everywhere # Group 3 - self._showpaths_bidirectional("2", "3", 0) - self._showpaths_bidirectional("2", "5", 0) - self._showpaths_bidirectional("3", "5", 0) + self._showpaths_bidirectional("2", "3") + self._showpaths_bidirectional("2", "5") + self._showpaths_bidirectional("3", "5") # Group 4 - self._showpaths_bidirectional("2", "4", 0) - self._showpaths_bidirectional("2", "5", 0) - self._showpaths_bidirectional("4", "5", 0) + self._showpaths_bidirectional("2", "4") + self._showpaths_bidirectional("2", "5") + self._showpaths_bidirectional("4", "5") # Group 3 X 4 - self._showpaths_bidirectional("3", "4", 1) - - def _showpaths_bidirectional(self, source: str, destination: str, retcode: int): - self._showpaths_run(source, destination, retcode) - self._showpaths_run(destination, source, retcode) - - def _showpaths_run(self, source_as: str, destination_as: str, retcode: int): - print(cmd.docker("exec", "-t", self._testers[source_as], "scion", - "sp", self._ases[destination_as], - "--timeout", "2s", - retcode=retcode)) + try: + self._showpaths_bidirectional("3", "4") + except Exception as e: + print(e) + else: + raise AssertionError("Unexpected success; should not have paths 3 -> 4") + + def _showpaths_bidirectional(self, source: str, destination: str): + self._showpaths_run(source, destination) + self._showpaths_run(destination, source) + + def _showpaths_run(self, source_as: str, destination_as: str): + print(self.execute_tester(ISD_AS(self._ases[source_as]), + "scion", "sp", self._ases[destination_as], "--timeout", "2s")) def configuration_server(server): diff --git a/acceptance/sig_short_exp_time/test b/acceptance/sig_short_exp_time/test index 10bc25eb2a..55bd704850 100755 --- a/acceptance/sig_short_exp_time/test +++ b/acceptance/sig_short_exp_time/test @@ -55,24 +55,24 @@ run_test() {(set -e docker image load -i acceptance/sig_short_exp_time/sig1.tar docker image load -i acceptance/sig_short_exp_time/sig2.tar - docker compose --compatibility -f acceptance/sig_short_exp_time/docker-compose.yml up -d dispatcher1 dispatcher2 sig1 sig2 patha pathb + docker compose -f acceptance/sig_short_exp_time/docker-compose.yml up -d dispatcher1 dispatcher2 sig1 sig2 patha pathb # Set up forward route on network stack 1 and 2 through the sig tunnel # device. The route is a property of the network stack, and persists after # the container that added it has exited. # # If the route configuration fails, the test is not stopped. - docker compose --compatibility -f acceptance/sig_short_exp_time/docker-compose.yml run --name route1 --rm tester1 ip route add 242.254.200.2/32 dev sig || true - docker compose --compatibility -f acceptance/sig_short_exp_time/docker-compose.yml run --name route2 --rm tester2 ip route add 242.254.100.2/32 dev sig || true + docker compose -f acceptance/sig_short_exp_time/docker-compose.yml run --name route1 --rm tester1 ip route add 242.254.200.2/32 dev sig || true + docker compose -f acceptance/sig_short_exp_time/docker-compose.yml run --name route2 --rm tester2 ip route add 242.254.100.2/32 dev sig || true echo "Start background ping, ping every 0.2 seconds" - docker compose --compatibility -f acceptance/sig_short_exp_time/docker-compose.yml run --name tester1 -d tester1 ping -i 0.2 242.254.200.2 + docker compose -f acceptance/sig_short_exp_time/docker-compose.yml run --name tester1 -d tester1 ping -i 0.2 242.254.200.2 echo "Waiting 10 seconds for path A to expire..." sleep 10 echo "Path A expired, simulating it by shutting down path A proxy" # Traffic should have switched beforehand to path b, and no pings should be lost - docker compose --compatibility -f acceptance/sig_short_exp_time/docker-compose.yml stop patha + docker compose -f acceptance/sig_short_exp_time/docker-compose.yml stop patha sleep 1 docker kill -s SIGINT tester1 @@ -104,9 +104,9 @@ OUTPUT_DIR=$TEST_UNDECLARED_OUTPUTS_DIR mkdir -p $OUTPUT_DIR/logs for CNTR in sig1 sig2 dispatcher1 dispatcher2; do - docker compose --compatibility -f acceptance/sig_short_exp_time/docker-compose.yml logs "$CNTR" > "$OUTPUT_DIR/logs/$CNTR.log" + docker compose -f acceptance/sig_short_exp_time/docker-compose.yml logs "$CNTR" > "$OUTPUT_DIR/logs/$CNTR.log" done -docker compose --compatibility -f acceptance/sig_short_exp_time/docker-compose.yml down -v +docker compose -f acceptance/sig_short_exp_time/docker-compose.yml down -v exit $RC diff --git a/acceptance/topo_cs_reload/docker-compose.yml b/acceptance/topo_cs_reload/docker-compose.yml index 00591e1404..ba010d1f7e 100644 --- a/acceptance/topo_cs_reload/docker-compose.yml +++ b/acceptance/topo_cs_reload/docker-compose.yml @@ -8,7 +8,6 @@ networks: - subnet: 242.253.100.0/24 services: topo_cs_reload_dispatcher: - container_name: topo_cs_reload_dispatcher image: bazel/acceptance/topo_cs_reload:dispatcher networks: bridge1: @@ -16,7 +15,6 @@ services: volumes: - vol_topo_cs_reload_disp:/run/shm/dispatcher:rw topo_cs_reload_control_srv: - container_name: topo_cs_reload_control_srv image: bazel/acceptance/topo_cs_reload:control depends_on: - topo_cs_reload_dispatcher diff --git a/acceptance/topo_cs_reload/reload_test.go b/acceptance/topo_cs_reload/reload_test.go index 53186c6428..53f1742d91 100644 --- a/acceptance/topo_cs_reload/reload_test.go +++ b/acceptance/topo_cs_reload/reload_test.go @@ -100,27 +100,23 @@ func setupTest(t *testing.T) testState { require.NoError(t, err) topoFile, err := bazel.Runfile(*topoLocation) require.NoError(t, err) - s.mustExec(t, *genCryptoLocation, scionPKI, - "crypto.tar", topoFile, cryptoLib) + s.mustExec(t, *genCryptoLocation, scionPKI, "crypto.tar", topoFile, cryptoLib) s.mustExec(t, "tar", "-xf", "crypto.tar", "-C", tmpDir) // first load the docker images from bazel into the docker deamon, the // tars are in the same folder as this test runs in bazel. s.mustExec(t, "docker", "image", "load", "-i", "dispatcher.tar") s.mustExec(t, "docker", "image", "load", "-i", "control.tar") // now start the docker containers - s.mustExec(t, "docker", "compose", "--compatibility", "-f", "docker-compose.yml", - "up", "-d") + s.mustExec(t, "docker", "compose", "-f", "docker-compose.yml", "up", "-d") // wait a bit to make sure the containers are ready. time.Sleep(time.Second / 2) t.Log("Test setup done") - s.mustExec(t, "docker", "compose", "--compatibility", "-f", "docker-compose.yml", - "ps") + s.mustExec(t, "docker", "compose", "-f", "docker-compose.yml", "ps") return s } func (s testState) teardownTest(t *testing.T) { - defer s.mustExec(t, "docker", "compose", "--compatibility", - "-f", "docker-compose.yml", "down", "-v") + defer s.mustExec(t, "docker", "compose", "-f", "docker-compose.yml", "down", "-v") outdir, exists := os.LookupEnv("TEST_UNDECLARED_OUTPUTS_DIR") require.True(t, exists, "TEST_UNDECLARED_OUTPUTS_DIR must be defined") @@ -130,7 +126,7 @@ func (s testState) teardownTest(t *testing.T) { "topo_cs_reload_dispatcher": "disp.log", "topo_cs_reload_control_srv": "control.log", } { - cmd := exec.Command("docker", "compose", "--compatibility", + cmd := exec.Command("docker", "compose", "-f", "docker-compose.yml", "logs", "--no-color", service) logFileName := fmt.Sprintf("%s/logs/%s", outdir, file) logFile, err := os.Create(logFileName) @@ -149,9 +145,9 @@ func (s testState) teardownTest(t *testing.T) { func (s testState) loadTopo(t *testing.T, name string) { t.Helper() - s.mustExec(t, "docker", "compose", "--compatibility", "-f", "docker-compose.yml", + s.mustExec(t, "docker", "compose", "-f", "docker-compose.yml", "exec", "-T", "topo_cs_reload_control_srv", "mv", name, "/topology.json") - s.mustExec(t, "docker", "compose", "--compatibility", "-f", "docker-compose.yml", + s.mustExec(t, "docker", "compose", "-f", "docker-compose.yml", "kill", "-s", "SIGHUP", "topo_cs_reload_control_srv") } diff --git a/acceptance/topo_daemon_reload/reload_test.go b/acceptance/topo_daemon_reload/reload_test.go index ee9b9f44bf..56802c18b8 100644 --- a/acceptance/topo_daemon_reload/reload_test.go +++ b/acceptance/topo_daemon_reload/reload_test.go @@ -71,18 +71,17 @@ func setupTest(t *testing.T) { mustExec(t, "docker", "image", "load", "-i", "dispatcher.tar") mustExec(t, "docker", "image", "load", "-i", "daemon.tar") // now start the docker containers - mustExec(t, "docker", "compose", "--compatibility", "-f", "docker-compose.yml", + mustExec(t, "docker", "compose", "-f", "docker-compose.yml", "up", "-d", "topo_daemon_reload_dispatcher", "topo_daemon_reload_daemon") // wait a bit to make sure the containers are ready. time.Sleep(time.Second / 2) t.Log("Test setup done") - mustExec(t, "docker", "compose", "--compatibility", "-f", "docker-compose.yml", + mustExec(t, "docker", "compose", "-f", "docker-compose.yml", "ps") } func teardownTest(t *testing.T) { - defer mustExec(t, "docker", "compose", "--compatibility", - "-f", "docker-compose.yml", "down", "-v") + defer mustExec(t, "docker", "compose", "-f", "docker-compose.yml", "down", "-v") outdir, exists := os.LookupEnv("TEST_UNDECLARED_OUTPUTS_DIR") require.True(t, exists, "TEST_UNDECLARED_OUTPUTS_DIR must be defined") @@ -92,7 +91,7 @@ func teardownTest(t *testing.T) { "topo_daemon_reload_dispatcher": "disp.log", "topo_daemon_reload_daemon": "daemon.log", } { - cmd := exec.Command("docker", "compose", "--compatibility", + cmd := exec.Command("docker", "compose", "-f", "docker-compose.yml", "logs", "--no-color", service) logFileName := fmt.Sprintf("%s/logs/%s", outdir, file) @@ -111,9 +110,9 @@ func teardownTest(t *testing.T) { func loadTopo(t *testing.T, name string) { t.Helper() - mustExec(t, "docker", "compose", "--compatibility", "-f", "docker-compose.yml", + mustExec(t, "docker", "compose", "-f", "docker-compose.yml", "exec", "-T", "topo_daemon_reload_daemon", "mv", name, "/topology.json") - mustExec(t, "docker", "compose", "--compatibility", "-f", "docker-compose.yml", + mustExec(t, "docker", "compose", "-f", "docker-compose.yml", "kill", "-s", "SIGHUP", "topo_daemon_reload_daemon") } diff --git a/acceptance/trc_update/test.py b/acceptance/trc_update/test.py index eed759f090..2edf26a0bb 100755 --- a/acceptance/trc_update/test.py +++ b/acceptance/trc_update/test.py @@ -72,7 +72,7 @@ def _run(self): end2end["-d", "-outDir", artifacts].run_fg() logger.info('==> Shutting down control servers and purging caches') - cs_services = self.dc.list_containers(".*_cs.*") + cs_services = self.dc.list_containers("cs.*") for cs in cs_services: self.dc.stop_container(cs) diff --git a/bazel-remote.yml b/bazel-remote.yml index ba155b1990..b98b18c7b3 100644 --- a/bazel-remote.yml +++ b/bazel-remote.yml @@ -1,4 +1,5 @@ version: "2.4" +name: bazel_remote services: bazel-remote: container_name: bazel-remote-cache diff --git a/demo/drkey/test.py b/demo/drkey/test.py index 19f08d40ce..0a6c589670 100644 --- a/demo/drkey/test.py +++ b/demo/drkey/test.py @@ -71,7 +71,7 @@ def setup_prepare(self): # Enable delegation for tester host on the fast side (server side), i.e. # allow the tester host to directly request the secret value from which # keys can be derived locally for any host. - tester_ip = self._container_ip("scion_disp_tester_%s" % self.server_isd_as.file_fmt()) + tester_ip = self._container_ip("disp_tester_%s" % self.server_isd_as.file_fmt()) cs_config = self._conf_dir(self.server_isd_as) // "cs*-1.toml" scion.update_toml({"drkey.delegation.scmp": [tester_ip]}, cs_config) @@ -82,9 +82,8 @@ def _run(self): # install demo binary in tester containers: drkey_demo = local["realpath"](self.get_executable("drkey-demo").executable).strip() - testers = ["tester_%s" % ia.file_fmt() for ia in {self.server_isd_as, self.client_isd_as}] - for tester in testers: - local["docker"]("cp", drkey_demo, tester + ":/bin/") + for ia in {self.server_isd_as, self.client_isd_as}: + self.dc("cp", drkey_demo, "tester_%s" % ia.file_fmt() + ":/bin/") # Define DRKey protocol identifiers and derivation typ for test for test in [ @@ -134,7 +133,7 @@ def _endhost_ip(self, isd_as: ISD_AS) -> str: """ Determine the IP used for the end host (client or server) in the given ISD-AS """ # The address must be the daemon IP (as it makes requests to the control # service on behalf of the end host application). - return self._container_ip("scion_sd%s" % isd_as.file_fmt()) + return self._container_ip("sd%s" % isd_as.file_fmt()) def _container_ip(self, container: str) -> str: """ Determine the IP of the container """ diff --git a/demo/file_transfer/file_transfer.py b/demo/file_transfer/file_transfer.py index f4ede50682..b5547b3de2 100644 --- a/demo/file_transfer/file_transfer.py +++ b/demo/file_transfer/file_transfer.py @@ -43,7 +43,7 @@ def _set_path_count(self, path_count): with open(config_name, "w") as f: json.dump(t, f, indent=2) # Reload the config. - self.dc("kill", "-s", "SIGHUP", "scion_sig_1-ff00_0_111") + self.dc("kill", "-s", "SIGHUP", "sig_1-ff00_0_111") # Give gateway some time to start using the new path count. time.sleep(2) @@ -86,7 +86,6 @@ def setup_prepare(self): with open(scion_dc, "r") as file: dc = yaml.load(file, Loader=yaml.FullLoader) dc["services"]["tc_setup"] = { - "container_name": "tc_setup", "image": "tester:latest", "cap_add": ["NET_ADMIN"], "volumes": [{ @@ -97,7 +96,7 @@ def setup_prepare(self): "entrypoint": ["/bin/sh", "-exc", "ls -l /share; /share/tc_setup.sh scn_000 16.0mbit ;" " /share/tc_setup.sh scn_001 16.0mbit"], - "depends_on": ["scion_br1-ff00_0_111-1", "scion_br1-ff00_0_111-2"], + "depends_on": ["br1-ff00_0_111-1", "br1-ff00_0_111-2"], "network_mode": "host", } with open(scion_dc, "w") as file: diff --git a/scion.sh b/scion.sh index cf851d4f64..7343c35d0a 100755 --- a/scion.sh +++ b/scion.sh @@ -6,7 +6,7 @@ cmd_bazel-remote() { mkdir -p "$HOME/.cache/bazel/remote" uid=$(id -u) gid=$(id -g) - USER_ID="$uid" GROUP_ID="$gid" docker compose --compatibility -f bazel-remote.yml -p bazel_remote up -d + USER_ID="$uid" GROUP_ID="$gid" docker compose -f bazel-remote.yml up -d } cmd_topo-clean() { @@ -36,7 +36,7 @@ cmd_topodot() { start_scion() { echo "Running the network..." if is_docker_be; then - docker compose --compatibility -f gen/scion-dc.yml -p scion up -d + docker compose -f gen/scion-dc.yml up -d return 0 else run_setup diff --git a/tools/dc b/tools/dc index a2912882aa..4a2951f050 100755 --- a/tools/dc +++ b/tools/dc @@ -76,10 +76,9 @@ cmd_monitoring() { # Runs docker compose for the given project dc() { - local project="$1" local dc_file="gen/$1-dc.yml" shift - COMPOSE_FILE="$dc_file" docker compose --compatibility -p "$project" --ansi never "$@" + COMPOSE_FILE="$dc_file" docker compose --ansi never "$@" } cmd_collect_logs() { diff --git a/tools/integration/docker.go b/tools/integration/docker.go index 055c391830..0b26344a31 100644 --- a/tools/integration/docker.go +++ b/tools/integration/docker.go @@ -38,9 +38,8 @@ var ( var dockerArgs []string func initDockerArgs() { - dockerArgs = []string{"compose", "--compatibility", - "-f", GenFile("scion-dc.yml"), "-p", "scion", "exec", "-T", "-e", - fmt.Sprintf("%s=1", GoIntegrationEnv)} + dockerArgs = []string{"compose", "-f", GenFile("scion-dc.yml"), "exec", "-T", + "-e", fmt.Sprintf("%s=1", GoIntegrationEnv)} } var _ Integration = (*dockerIntegration)(nil) diff --git a/tools/topology/common.py b/tools/topology/common.py index 934725effe..7f9b74a7eb 100644 --- a/tools/topology/common.py +++ b/tools/topology/common.py @@ -182,10 +182,6 @@ def sciond_name(topo_id): return 'sd%s' % topo_id.file_fmt() -def sciond_svc_name(topo_id): - return 'scion_%s' % sciond_name(topo_id) - - def json_default(o): if isinstance(o, AddressProxy): return str(o.ip) diff --git a/tools/topology/docker.py b/tools/topology/docker.py index b0594925e4..8957070591 100644 --- a/tools/topology/docker.py +++ b/tools/topology/docker.py @@ -25,7 +25,7 @@ ArgsTopoDicts, docker_host, docker_image, - sciond_svc_name, + sciond_name, ) from topology.docker_utils import DockerUtilsGenArgs, DockerUtilsGenerator from topology.net import NetworkDescription, IPNetwork @@ -54,6 +54,7 @@ def __init__(self, args): self.args = args self.dc_conf = { 'version': DOCKER_COMPOSE_CONFIG_VERSION, + 'name': 'scion', 'services': {}, 'networks': {}, 'volumes': {} @@ -62,7 +63,6 @@ def __init__(self, args): self.bridges = {} self.output_base = os.environ.get('SCION_OUTPUT_BASE', os.getcwd()) self.user = '%d:%d' % (os.getuid(), os.getgid()) - self.prefix = 'scion_' def generate(self): self._create_networks() @@ -145,18 +145,17 @@ def _br_conf(self, topo_id, topo, base): image = docker_image(self.args, 'posix-router') entry = { 'image': image, - 'container_name': self.prefix + k, 'networks': {}, 'user': self.user, 'volumes': ['%s:/share/conf:ro' % base], 'environment': { 'SCION_EXPERIMENTAL_BFD_DETECT_MULT': - '${SCION_EXPERIMENTAL_BFD_DETECT_MULT}', + '${SCION_EXPERIMENTAL_BFD_DETECT_MULT:-}', 'SCION_EXPERIMENTAL_BFD_DESIRED_MIN_TX': - '${SCION_EXPERIMENTAL_BFD_DESIRED_MIN_TX}', + '${SCION_EXPERIMENTAL_BFD_DESIRED_MIN_TX:-}', 'SCION_EXPERIMENTAL_BFD_REQUIRED_MIN_RX': - '${SCION_EXPERIMENTAL_BFD_REQUIRED_MIN_RX}', + '${SCION_EXPERIMENTAL_BFD_REQUIRED_MIN_RX:-}', }, 'command': ['--config', '/share/conf/%s.toml' % k] } @@ -170,18 +169,16 @@ def _br_conf(self, topo_id, topo, base): entry['networks'][self.bridges[net['net']]] = { '%s_address' % ipv: str(net[ipv]) } - self.dc_conf['services']['scion_%s' % k] = entry + self.dc_conf['services'][k] = entry def _control_service_conf(self, topo_id, topo, base): for k in topo.get("control_service", {}).keys(): entry = { 'image': docker_image(self.args, 'control'), - 'container_name': - self.prefix + k, - 'depends_on': ['scion_disp_%s' % k], + 'depends_on': ['disp_%s' % k], 'network_mode': - 'service:scion_disp_%s' % k, + 'service:disp_%s' % k, 'user': self.user, 'volumes': [ @@ -192,7 +189,7 @@ def _control_service_conf(self, topo_id, topo, base): ], 'command': ['--config', '/share/conf/%s.toml' % k] } - self.dc_conf['services']['scion_%s' % k] = entry + self.dc_conf['services'][k] = entry def _dispatcher_conf(self, topo_id, topo, base): image = 'dispatcher' @@ -221,7 +218,6 @@ def _dispatcher_conf(self, topo_id, topo, base): entry['networks'][self.bridges[net['net']]] = { '%s_address' % ipv: ip } - entry['container_name'] = '%sdisp_%s' % (self.prefix, disp_id) entry['volumes'].append(self._disp_vol(disp_id)) conf = '%s:/share/conf:rw' % base entry['volumes'].append(conf) @@ -229,12 +225,12 @@ def _dispatcher_conf(self, topo_id, topo, base): '--config', '/share/conf/disp_%s.toml' % disp_id ] - self.dc_conf['services']['scion_disp_%s' % disp_id] = entry + self.dc_conf['services']['disp_%s' % disp_id] = entry self.dc_conf['volumes'][self._disp_vol(disp_id).split(':') [0]] = None def _sciond_conf(self, topo_id, base): - name = sciond_svc_name(topo_id) + name = sciond_name(topo_id) net = self.elem_networks["sd" + topo_id.file_fmt()][0] ipv = 'ipv4' if ipv not in net: @@ -245,9 +241,7 @@ def _sciond_conf(self, topo_id, base): 'extra_hosts': ['jaeger:%s' % docker_host(self.args.docker)], 'image': docker_image(self.args, 'daemon'), - 'container_name': - '%ssd%s' % (self.prefix, topo_id.file_fmt()), - 'depends_on': ['scion_disp_%s' % disp_id], + 'depends_on': ['disp_%s' % disp_id], 'user': self.user, 'volumes': [ @@ -266,7 +260,7 @@ def _sciond_conf(self, topo_id, base): self.dc_conf['services'][name] = entry def _disp_vol(self, disp_id): - return 'vol_%sdisp_%s:/run/shm/dispatcher:rw' % (self.prefix, disp_id) + return 'vol_disp_%s:/run/shm/dispatcher:rw' % disp_id def _cache_vol(self): return self.output_base + '/gen-cache:/share/cache:rw' diff --git a/tools/topology/docker_utils.py b/tools/topology/docker_utils.py index fc15124bd7..90d4041f51 100644 --- a/tools/topology/docker_utils.py +++ b/tools/topology/docker_utils.py @@ -75,19 +75,18 @@ def _test_conf(self, topo_id): name = 'tester_%s' % topo_id.file_fmt() entry = { 'image': docker_image(self.args, 'tester'), - 'container_name': 'tester_%s' % topo_id.file_fmt(), - 'depends_on': ['scion_disp_%s' % name], + 'depends_on': ['disp_%s' % name], 'privileged': True, 'entrypoint': 'sh tester.sh', 'environment': {}, # 'user': self.user, 'volumes': [ - 'vol_scion_disp_%s:/run/shm/dispatcher:rw' % name, + 'vol_disp_%s:/run/shm/dispatcher:rw' % name, self.output_base + '/logs:' + cntr_base + '/logs:rw', self.output_base + '/gen:' + cntr_base + '/gen:rw', self.output_base + '/gen-certs:' + cntr_base + '/gen-certs:rw' ], - 'network_mode': 'service:scion_disp_%s' % name, + 'network_mode': 'service:disp_%s' % name, } net = self.args.networks[name][0] ipv = 'ipv4' diff --git a/tools/topology/monitoring.py b/tools/topology/monitoring.py index 930a7b5210..974dde5273 100644 --- a/tools/topology/monitoring.py +++ b/tools/topology/monitoring.py @@ -166,13 +166,12 @@ def _write_disp_file(self): def _write_dc_file(self): # Merged yeager and prometheus files. - name = 'monitoring' monitoring_dc = { 'version': DOCKER_COMPOSE_CONFIG_VERSION, + 'name': 'monitoring', 'services': { 'prometheus': { 'image': 'prom/prometheus:v2.47.2', - 'container_name': name+'prometheus', 'network_mode': 'host', 'volumes': [ self.output_base + '/gen:/prom-config:ro' @@ -181,7 +180,6 @@ def _write_dc_file(self): }, 'jaeger': { 'image': 'jaegertracing/all-in-one:1.22.0', - 'container_name': name+'yeager', 'user': '%s:%s' % (str(os.getuid()), str(os.getgid())), 'ports': [ '6831:6831/udp', diff --git a/tools/topology/sig.py b/tools/topology/sig.py index 9ddd2121ef..73c4fee21f 100644 --- a/tools/topology/sig.py +++ b/tools/topology/sig.py @@ -23,7 +23,7 @@ from topology.common import ( ArgsBase, json_default, - sciond_svc_name, + sciond_name, SD_API_PORT, SIG_CONFIG_NAME, translate_features, @@ -55,7 +55,6 @@ def __init__(self, args): self.dc_conf = args.dc_conf self.user = '%d:%d' % (os.getuid(), os.getgid()) self.output_base = os.environ.get('SCION_OUTPUT_BASE', os.getcwd()) - self.prefix = '' def generate(self): for topo_id, topo in self.args.topo_dicts.items(): @@ -72,8 +71,6 @@ def _dispatcher_conf(self, topo_id, base): entry = { 'image': 'dispatcher', - 'container_name': - 'scion_%sdisp_sig_%s' % (self.prefix, topo_id.file_fmt()), 'depends_on': { 'utils_chowner': { 'condition': 'service_started' @@ -98,15 +95,14 @@ def _dispatcher_conf(self, topo_id, base): entry['networks'][self.args.bridges[net['net']]] = { '%s_address' % ipv: str(net[ipv]) } - self.dc_conf['services']['scion_disp_sig_%s' % + self.dc_conf['services']['disp_sig_%s' % topo_id.file_fmt()] = entry - vol_name = 'vol_scion_%sdisp_sig_%s' % (self.prefix, - topo_id.file_fmt()) + vol_name = 'vol_disp_sig_%s' % topo_id.file_fmt() self.dc_conf['volumes'][vol_name] = None def _sig_dc_conf(self, topo_id, base): - setup_name = 'scion_sig_setup_%s' % topo_id.file_fmt() - disp_id = 'scion_disp_sig_%s' % topo_id.file_fmt() + setup_name = 'sig_setup_%s' % topo_id.file_fmt() + disp_id = 'disp_sig_%s' % topo_id.file_fmt() self.dc_conf['services'][setup_name] = { 'image': 'tester:latest', 'depends_on': [disp_id], @@ -114,14 +110,12 @@ def _sig_dc_conf(self, topo_id, base): 'privileged': True, 'network_mode': 'service:%s' % disp_id, } - self.dc_conf['services']['scion_sig_%s' % topo_id.file_fmt()] = { + self.dc_conf['services']['sig_%s' % topo_id.file_fmt()] = { 'image': 'posix-gateway:latest', - 'container_name': - 'scion_%ssig_%s' % (self.prefix, topo_id.file_fmt()), 'depends_on': [ disp_id, - sciond_svc_name(topo_id), + sciond_name(topo_id), setup_name, ], 'environment': { @@ -196,5 +190,4 @@ def _sig_toml(self, topo_id, topo): write_file(path, toml.dumps(sig_conf)) def _disp_vol(self, topo_id): - return 'vol_scion_%sdisp_sig_%s:/run/shm/dispatcher:rw' % ( - self.prefix, topo_id.file_fmt()) + return 'vol_disp_sig_%s:/run/shm/dispatcher:rw' % topo_id.file_fmt()