From 549eb9578eb4390695e2edbf274fd8bd03370142 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Sat, 17 Oct 2020 21:48:12 +0800 Subject: [PATCH] feat: integrate with RaspiBlitz --- images/utils/launcher/__init__.py | 337 ++++++------ images/utils/launcher/auto_unlock.py | 36 -- images/utils/launcher/{shell => }/banner.txt | 0 images/utils/launcher/check_wallets.py | 533 ------------------ images/utils/launcher/close_other_utils.py | 7 +- images/utils/launcher/config/__init__.py | 2 +- images/utils/launcher/config/config.py | 137 +++-- images/utils/launcher/config/loader.py | 66 --- images/utils/launcher/config/template.py | 4 +- images/utils/launcher/errors.py | 8 + images/utils/launcher/node/__init__.py | 540 ++++++++++--------- images/utils/launcher/node/arby.py | 2 +- images/utils/launcher/node/base.py | 490 ++++++++--------- images/utils/launcher/node/bitcoind.py | 52 +- images/utils/launcher/node/boltz.py | 22 +- images/utils/launcher/node/btcd.py | 54 -- images/utils/launcher/node/connext.py | 50 +- images/utils/launcher/node/geth.py | 52 +- images/utils/launcher/node/image.py | 173 +++--- images/utils/launcher/node/lnd.py | 147 +++-- images/utils/launcher/node/proxy.py | 4 + images/utils/launcher/node/pty.py | 482 +++++++++++++++++ images/utils/launcher/node/webui.py | 4 + images/utils/launcher/node/xud.py | 528 +++++++++++++++--- images/utils/launcher/shell/__init__.py | 1 - images/utils/launcher/shell/command.py | 126 ----- images/utils/launcher/shell/history.py | 67 --- images/utils/launcher/shell/shell.py | 530 ------------------ images/utils/launcher/utils.py | 174 ++++-- images/utils/launcher/warm_up.py | 12 - images/utils/requirements.txt | 3 +- images/utils/setup.py | 4 +- images/xud/entrypoint.sh | 10 - setup.sh | 117 +++- tools/core/docker.py | 4 +- tools/core/image.py | 23 +- 36 files changed, 2250 insertions(+), 2551 deletions(-) delete mode 100644 images/utils/launcher/auto_unlock.py rename images/utils/launcher/{shell => }/banner.txt (100%) delete mode 100644 images/utils/launcher/check_wallets.py delete mode 100644 images/utils/launcher/config/loader.py delete mode 100644 images/utils/launcher/node/btcd.py create mode 100644 images/utils/launcher/node/pty.py delete mode 100644 images/utils/launcher/shell/__init__.py delete mode 100644 images/utils/launcher/shell/command.py delete mode 100644 images/utils/launcher/shell/history.py delete mode 100644 images/utils/launcher/shell/shell.py delete mode 100644 images/utils/launcher/warm_up.py diff --git a/images/utils/launcher/__init__.py b/images/utils/launcher/__init__.py index 920bcac20..fee439062 100644 --- a/images/utils/launcher/__init__.py +++ b/images/utils/launcher/__init__.py @@ -1,19 +1,17 @@ import logging import shlex import traceback -import os.path +from threading import Event -from launcher.config import Config, ConfigLoader -from launcher.shell import Shell -from launcher.node import NodeManager, NodeNotFound -from launcher.utils import ParallelExecutionError, ArgumentError - -from launcher.check_wallets import Action as CheckWalletsAction from launcher.close_other_utils import Action as CloseOtherUtilsAction -from launcher.auto_unlock import Action as AutoUnlockAction -from launcher.warm_up import Action as WarmUpAction -from launcher.errors import FatalError, ConfigError, ConfigErrorScope +from launcher.config import Config +from launcher.errors import FatalError, ConfigError, ConfigErrorScope, NoWaiting, ParallelError +from launcher.node import NodeManager, ServiceNotFound, ContainerNotFound +from launcher.utils import ArgumentError +import docker.errors +import os +logger = logging.getLogger(__name__) HELP = """\ Xucli shortcut commands @@ -91,165 +89,153 @@ --inbound [inbound_balance] deposit from boltz (btc/ltc) boltzcli withdraw
withdraw from boltz channel - """ - -def init_logging(): - fmt = "%(asctime)s.%(msecs)03d %(levelname)5s %(process)d --- [%(threadName)-15s] %(name)-30s: %(message)s" - datefmt = "%Y-%m-%d %H:%M:%S" - if os.path.exists("/mnt/hostfs/tmp"): - logfile = "/mnt/hostfs/tmp/xud-docker.log" - else: - logfile = "xud-docker.log" - - logging.basicConfig(format=fmt, datefmt=datefmt, level=logging.INFO, filename=logfile, filemode="w") - - level_config = { - "launcher": logging.DEBUG, - } - - for logger, level in level_config.items(): - logging.getLogger(logger).setLevel(level) - - -init_logging() +REPORT = """Please click on https://github.com/ExchangeUnion/xud/issues/new?assignees=kilrau&labels=bug&template=bug-\ +report.md&title=Short%2C+concise+description+of+the+bug, describe your issue, drag and drop the file "{network}.log" \ +which is located in "{logs_dir}" into your browser window and submit your issue.""" class XudEnv: - def __init__(self, config, shell): - self.logger = logging.getLogger("launcher.XudEnv") - + def __init__(self, config: Config): self.config = config - self.shell = shell - - self.node_manager = NodeManager(config, shell) - - def delegate_cmd_to_xucli(self, cmd): - self.node_manager.get_node("xud").cli(cmd, self.shell) - - def command_report(self): - logs_dir = f"{self.config.home_dir}/{self.config.network}/logs" - print(f"""Please click on https://github.com/ExchangeUnion/xud/issues/\ -new?assignees=kilrau&labels=bug&template=bug-report.md&title=Short%2C+concise+\ -description+of+the+bug, describe your issue, drag and drop the file "{self.config.network}\ -.log" which is located in "{logs_dir}" into your browser window and submit \ -your issue.""") - - def handle_command(self, cmd): - try: - args = shlex.split(cmd) - arg0 = args[0] + self.node_manager = NodeManager(config) + + def handle_command(self, cmd: str) -> None: + args = shlex.split(cmd) + arg0 = args[0] + args = args[1:] + + if arg0 == "help": + print(HELP) + elif arg0 == "status": + self.node_manager.status() + elif arg0 == "report": + print(REPORT.format(self.config.network, self.config.logs_dir)) + elif arg0 == "logs": + self.node_manager.cmd_logs.execute(*args) + elif arg0 == "start": + self.node_manager.cmd_start.execute(*args) + elif arg0 == "stop": + self.node_manager.cmd_stop.execute(*args) + elif arg0 == "restart": + self.node_manager.cmd_restart.execute(*args) + elif arg0 == "_create": + self.node_manager.cmd_create.execute(*args) + elif arg0 == "rm": + self.node_manager.cmd_remove.execute(*args) + elif arg0 == "down": + self.node_manager.down() + elif arg0 == "up": + self.node_manager.up() + elif arg0 == "bitcoin-cli": + bitcoind = self.node_manager.get_service("bitcoind") + bitcoind.cli(" ".join(args)) + elif arg0 == "litecoin-cli": + litecoind = self.node_manager.get_service("litecoind") + litecoind.cli(" ".join(args)) + elif arg0 == "lndbtc-lncli": + lndbtc = self.node_manager.get_service("lndbtc") + lndbtc.cli(" ".join(args)) + elif arg0 == "lndltc-lncli": + lndltc = self.node_manager.get_service("lndltc") + lndltc.cli(" ".join(args)) + elif arg0 == "geth": + geth = self.node_manager.get_service("geth") + geth.cli(" ".join(args)) + elif arg0 == "xucli": + xud = self.node_manager.get_service("xud") + xud.cli(" ".join(args)) + elif arg0 == "boltzcli": + boltz = self.node_manager.get_service("boltz") + boltz.cli(" ".join(args)) + elif arg0 == "deposit": + boltz = self.node_manager.get_service("boltz") + if len(args) == 0: + print("Missing chain") + chain = args[0] args = args[1:] - if arg0 == "status": - self.node_manager.status() - elif arg0 == "report": - self.command_report() - elif arg0 == "logs": - self.node_manager.logs(*args) - elif arg0 == "start": - self.node_manager.start(*args) - elif arg0 == "stop": - self.node_manager.stop(*args) - elif arg0 == "restart": - self.node_manager.restart(*args) - elif arg0 == "down": - self.node_manager.down() - elif arg0 == "up": - self.node_manager.up() - elif arg0 == "btcctl": - self.node_manager.cli("btcd", *args) - elif arg0 == "ltcctl": - self.node_manager.cli("ltcd", *args) - elif arg0 == "bitcoin-cli": - self.node_manager.cli("bitcoind", *args) - elif arg0 == "litecoin-cli": - self.node_manager.cli("litecoind", *args) - elif arg0 == "lndbtc-lncli": - self.node_manager.cli("lndbtc", *args) - elif arg0 == "lndltc-lncli": - self.node_manager.cli("lndltc", *args) - elif arg0 == "geth": - self.node_manager.cli("geth", *args) - elif arg0 == "xucli": - self.node_manager.cli("xud", *args) - elif arg0 == "boltzcli": - self.node_manager.cli("boltz", *args) - elif arg0 == "deposit": - if len(args) == 0: - print("Missing chain") - chain = args[0] - args = args[1:] - if chain == "btc": - self.node_manager.cli("boltz", "btc", "deposit", *args) - elif chain == "ltc": - self.node_manager.cli("boltz", "ltc", "deposit", *args) - else: - self.node_manager.cli("xud", "walletdeposit", chain, *args) - elif arg0 == "withdraw": - if len(args) == 0: - print("Missing chain") - chain = args[0] - args = args[1:] - if chain == "btc": - self.node_manager.cli("boltz", "btc", "withdraw", *args) - elif chain == "ltc": - self.node_manager.cli("boltz", "ltc", "withdraw", *args) - else: - self.node_manager.cli("xud", "walletwithdraw", chain, *args) - elif arg0 == "help": - print(HELP) + if chain == "btc": + boltz.cli("btc deposit " + " ".join(args)) + elif chain == "ltc": + boltz.cli("ltc deposit " + " ".join(args)) else: - self.delegate_cmd_to_xucli(cmd) - - except NodeNotFound as e: - if str(e) == "boltz" and self.config.network == "simnet": - print("Not available on simnet") - return - - print(f"Node not found: {e}") - except ArgumentError as e: - print(e.usage) - print(f"error: {e}") - - def check_wallets(self): - CheckWalletsAction(self.node_manager).execute() - - def wait_for_channels(self): - # TODO wait for channels - pass - - def auto_unlock(self): - AutoUnlockAction(self.node_manager).execute() + xud = self.node_manager.get_service("xud") + xud.cli("walletdeposit %s %s" % (chain, " ".join(args))) + elif arg0 == "withdraw": + boltz = self.node_manager.get_service("boltz") + if len(args) == 0: + print("Missing chain") + chain = args[0] + args = args[1:] + if chain == "btc": + boltz.cli("btc withdraw " + " ".join(args)) + elif chain == "ltc": + boltz.cli("ltc withdraw " + " ".join(args)) + else: + xud = self.node_manager.get_service("xud") + xud.cli("walletwithdraw %s %s" % (chain, " ".join(args))) + else: + xud = self.node_manager.get_service("xud") + xud.cli(cmd) def close_other_utils(self): - CloseOtherUtilsAction(self.config.network, self.shell).execute() - - def warm_up(self): - WarmUpAction(self.node_manager).execute() - - def pre_start(self): - self.warm_up() - self.check_wallets() + CloseOtherUtilsAction(self.config.network).execute() - if self.config.network == "simnet": - self.wait_for_channels() + def pre_shell(self): + print("\nšŸƒ Warming up...\n") - self.auto_unlock() + xud = self.node_manager.get_service("xud") + stop = Event() + try: + # FIXME pty signal only works in main thread + xud.ensure_ready(stop) + except (KeyboardInterrupt, NoWaiting): + stop.set() + raise self.close_other_utils() def start(self): - self.logger.info("Start %s", self.config.network) - - up_env = self.node_manager.update() - - if up_env: - self.node_manager.up() - self.pre_start() - - self.logger.info("Start shell") - self.shell.start(f"{self.config.network} > ", self.handle_command) + logger.info("Start %s", self.config.network) + + self.node_manager.update() + + self.node_manager.up() + + self.pre_shell() + + logger.info("Start shell") + banner_file = os.path.dirname(__file__) + "/banner.txt" + with open(banner_file) as f: + print(f.read(), end="", flush=True) + prompt = f"{self.config.network} > " + while True: + try: + cmd = input(prompt) + cmd = cmd.strip() + if cmd == "": + continue + if cmd == "exit": + break + try: + self.handle_command(cmd) + except KeyboardInterrupt: + pass + except ServiceNotFound as e: + print("Service not found: %s" % e) + except ContainerNotFound as e: + print("Service not running: %s" % e) + except docker.errors.APIError as e: + print(e) + except ArgumentError as e: + print(e.usage) + print(f"Error: {e}") + except: + logger.exception("[Shell] Failed to execute command: %s", cmd) + traceback.print_exc() + except KeyboardInterrupt: + print() def print_config_error_cause(e: ConfigError) -> None: @@ -262,40 +248,25 @@ def print_config_error_cause(e: ConfigError) -> None: class Launcher: - def __init__(self): - self.logger = logging.getLogger("launcher.Launcher") - self.logfile = None - def launch(self): - shell = Shell() config = None try: - config = Config(ConfigLoader()) - shell.set_network_dir(config.network_dir) # will create shell history file in network_dir - env = XudEnv(config, shell) + config = Config() + env = XudEnv(config) env.start() except KeyboardInterrupt: print() - except ConfigError as e: - if e.scope == ConfigErrorScope.COMMAND_LINE_ARGS: - print("Failed to parse command-line arguments, exiting.") - print_config_error_cause(e) - elif e.scope == ConfigErrorScope.GENERAL_CONF: - print("Failed to parse config file {}, exiting.".format(e.conf_file)) - print_config_error_cause(e) - elif e.scope == ConfigErrorScope.NETWORK_CONF: - print("Failed to parse config file {}, exiting.".format(e.conf_file)) - print_config_error_cause(e) + exit(1) + except NoWaiting: + exit(1) except FatalError as e: - if config and config.logfile: - print("{}. For more details, see {}".format(e, config.logfile)) - else: - traceback.print_exc() - except ParallelExecutionError: - pass - except Exception: # exclude system exceptions like SystemExit - self.logger.exception("Unexpected exception during launching") - traceback.print_exc() - finally: - shell.stop() - + msg = "šŸ’€ %s." % str(e) + if config: + msg += " For more details, see %s" % config.host_logfile + print(msg) + exit(1) + except ParallelError: + if config: + msg = "For more details, see %s" % config.host_logfile + print(msg) + exit(1) diff --git a/images/utils/launcher/auto_unlock.py b/images/utils/launcher/auto_unlock.py deleted file mode 100644 index 430680116..000000000 --- a/images/utils/launcher/auto_unlock.py +++ /dev/null @@ -1,36 +0,0 @@ -from .node import XudApiError - - -class Action: - def __init__(self, node_manager): - self.node_manager = node_manager - - @property - def shell(self): - return self.node_manager.shell - - def xud_is_locked(self, xud): - try: - info = xud.api.getinfo() - return False - except XudApiError as e: - if "xud is locked" in str(e): - return True - return False - - def xucli_unlock_wrapper(self, xud): - while True: - try: - print() - xud.cli("unlock", self.shell) - break - except KeyboardInterrupt: - break - except: - pass - - def execute(self): - xud = self.node_manager.get_node("xud") - if not self.xud_is_locked(xud): - return - self.xucli_unlock_wrapper(xud) diff --git a/images/utils/launcher/shell/banner.txt b/images/utils/launcher/banner.txt similarity index 100% rename from images/utils/launcher/shell/banner.txt rename to images/utils/launcher/banner.txt diff --git a/images/utils/launcher/check_wallets.py b/images/utils/launcher/check_wallets.py deleted file mode 100644 index 9b884d32f..000000000 --- a/images/utils/launcher/check_wallets.py +++ /dev/null @@ -1,533 +0,0 @@ -import logging -import sys -import os -import docker -import time -from concurrent.futures import ThreadPoolExecutor, wait -from docker.models.containers import Container -from datetime import datetime -import re - -from .node import NodeManager -from .node.xud import PasswordNotMatch, InvalidPassword, MnemonicNot24Words -from .utils import normalize_path, get_hostfs_file -from .errors import FatalError -from .types import LndChain, XudNetwork -from .table import ServiceTable - - -class CFHeaderState: - def __init__(self): - self.current = 0 - self.total = 0 - self.ready = False - - -class Action: - def __init__(self, node_manager: NodeManager): - self.logger = logging.getLogger("launcher.CheckWalletsAction") - self.node_manager = node_manager - self.lnd_cfheaders = {} - - @property - def shell(self): - return self.node_manager.shell - - @property - def config(self): - return self.node_manager.config - - @property - def network(self) -> XudNetwork: - return self.config.network - - def lnd_has_unlock_log_line(self, c): - pass - - def restart_lnds(self, network: XudNetwork): - """ - This is temporary solution for lnd unlock stuck problem - TODO remove it later - """ - def restart(name): - client = docker.from_env() - c = client.containers.get(name) - c.restart() - return c - - def stop(name): - client = docker.from_env() - c = client.containers.get(name) - c.stop() - return c - - def start(name): - client = docker.from_env() - c = client.containers.get(name) - c.start() - return c - - def xud_restart(): - name = f"{network}_xud_1" - - self.logger.debug("Restarting %s", name) - c = restart(name) - self.logger.debug("Restarted %s", name) - - # xud is locked, run 'xucli unlock', 'xucli create', or 'xucli restore' then try again - for i in range(10): - exit_code, output = c.exec_run("xucli getinfo") - result = output.decode() - if "xud is locked" in result: - self.logger.debug("Xud is locked") - return - time.sleep(10) - - raise RuntimeError("Restarted xud should be locked") - - def lnd_restart(chain): - if chain == "bitcoin": - name = f"{network}_lndbtc_1" - short_name = "lndbtc" - else: - name = f"{network}_lndltc_1" - short_name = "lndltc" - - client = docker.from_env() - c: Container = client.containers.get(name) - cmd = f"lncli -n {network} -c {chain} getinfo" - exit_code, output = c.exec_run(cmd) - self.logger.debug("[Execute] %s: exit_code=%s\n%s", cmd, exit_code, output.decode()) - - if exit_code == 0: - self.logger.debug("Skip restarting %s", name) - return - - self.logger.debug("Restarting %s", name) - c = stop(name) - t1 = datetime.now() - c = start(name) - self.logger.debug("Restarted %s", name) - - # [INF] LTND: Waiting for wallet encryption password. Use `lncli create` to create a wallet, `lncli unlock` to unlock an existing wallet, or `lncli changepassword` to change the password of an existing wallet and unlock it. - for line in c.logs(stream=True, follow=True, since=t1): - line = line.decode().strip() - self.logger.debug("<%s> %s", short_name, line) - if "Waiting for wallet encryption password" in line: - break - - self.logger.debug("Sleep 15 seconds. For God's sake may %s work normally!!!", short_name) - time.sleep(15) - - with ThreadPoolExecutor(max_workers=3, thread_name_prefix="RestartLnd") as executor: - f1 = executor.submit(lnd_restart, "bitcoin") - f2 = executor.submit(lnd_restart, "litecoin") - - try: - f1.result() - except Exception as e: - raise RuntimeError("Failed to restart lndbtc") from e - - try: - f2.result() - except Exception as e: - raise RuntimeError("Failed to restart lndltc") from e - - f3 = executor.submit(xud_restart) - - try: - f3.result() - except Exception as e: - raise RuntimeError("Failed to restart xud") from e - - @staticmethod - def _get_percentage(current, total): - if total == 0: - return "0.00%% (%d/%d)" % (current, total) - if current >= total: - return "100.00%% (%d/%d)" % (current, total) - p = current / total * 100 - if p > 0.005: - p = p - 0.005 - else: - p = 0 - return "%.2f%% (%d/%d)" % (p, current, total) - - def _print_lnd_cfheaders(self, erase_last_line=True): - services = {} - - if "bitcoin" in self.lnd_cfheaders: - lndbtc = self.lnd_cfheaders["bitcoin"] - services["lndbtc"] = "Syncing " + self._get_percentage(lndbtc.current, lndbtc.total) - - if "litecoin" in self.lnd_cfheaders: - lndltc = self.lnd_cfheaders["litecoin"] - services["lndltc"] = "Syncing " + self._get_percentage(lndltc.current, lndltc.total) - - table = ServiceTable(services) - table_str = str(table) - if erase_last_line: - print("\033[%dF" % len(table_str.splitlines()), end="", flush=True) - print(table_str) - - def lnd_ready(self, chain: LndChain) -> bool: - network = self.node_manager.config.network - client = docker.from_env() - if chain == "bitcoin": - name = f"{network}_lndbtc_1" - layer1_node = "bitcoind" - else: - name = f"{network}_lndltc_1" - layer1_node = "litecoind" - lnd: Container = client.containers.get(name) - assert lnd.status == "running" - - nodes = self.config.nodes - - # Wait for lnd synced_to_chain = true - if self.node_manager.newly_installed: - if layer1_node in nodes and nodes[layer1_node]["mode"] in ["neutrino", "light"] \ - or self.config.network == "simnet": - started_at = lnd.attrs["State"]["StartedAt"] # e.g. 2020-06-22T17:26:01.541780733Z - started_at = started_at.split(".")[0] - t_utc = datetime.strptime(started_at, "%Y-%m-%dT%H:%M:%S") - t_local = datetime.fromtimestamp(t_utc.timestamp()) - - p0 = re.compile(r"^.*Fully caught up with cfheaders at height (\d+), waiting at tip for new blocks$") - if self.config.network == "simnet": - p1 = re.compile(r"^.*Writing cfheaders at height=(\d+) to next checkpoint$") - else: - p1 = re.compile(r"^.*Fetching set of checkpointed cfheaders filters from height=(\d+).*$") - p2 = re.compile(r"^.*Syncing to block height (\d+) from peer.*$") - - for line in lnd.logs(stream=True, follow=True, since=t_local): - line = line.decode().strip() - self.logger.debug("<%s> %s", name, line) - m = p0.match(line) - if m: - self.lnd_cfheaders[chain].current = int(m.group(1)) - self.lnd_cfheaders[chain].ready = True - self._print_lnd_cfheaders() - break - else: - m = p1.match(line) - if m: - self.lnd_cfheaders[chain].current = int(m.group(1)) - self._print_lnd_cfheaders() - else: - m = p2.match(line) - if m: - self.lnd_cfheaders[chain].total = int(m.group(1)) - self._print_lnd_cfheaders() - - cmd = f"lncli -n {network} -c {chain} getinfo" - try: - exit_code, output = lnd.exec_run(cmd) - self.logger.debug("[Execute] %s: exit_code=%s\n%s", cmd, exit_code, output.decode()) - except: - self.logger.exception("Failed to exec \"%s\" in container %s", cmd, name) - return False - - - # [lncli] open /root/.lnd/tls.cert: no such file or directory - # [lncli] unable to read macaroon path (check the network setting!): open /root/.lnd/data/chain/bitcoin/testnet/admin.macaroon: no such file or directory - # [lncli] Wallet is encrypted. Please unlock using 'lncli unlock', or set password using 'lncli create' if this is the first time starting lnd. - return exit_code == 0 or exit_code == 1 and ( - "unable to read macaroon path" in output.decode() or - "Wallet is encrypted" in output.decode() - ) - - def ensure_lnd_ready(self, chain: LndChain) -> None: - if chain == "bitcoin": - name = f"lndbtc" - else: - name = f"lndltc" - for i in range(100): - if self.lnd_ready(chain): - self.logger.debug(f"{name.capitalize()} is ready") - return - time.sleep(1) - raise RuntimeError(f"{name.capitalize()} took too long to be ready") - - def ensure_layer2_ready(self) -> None: - client = docker.from_env() - xud: Container = client.containers.get(f"{self.network}_xud_1") - cmd = "xucli getinfo -j" - - xud_ok = False - - # Error: ENOENT: no such file or directory, open '/root/.xud/tls.cert' - # xud is starting... try again in a few seconds - # xud is locked, run 'xucli unlock', 'xucli create', or 'xucli restore' then try again - while True: - exit_code, output = xud.exec_run(cmd) - self.logger.debug("[Execute] %s: exit_code=%s\n%s", cmd, exit_code, output.decode()) - if exit_code == 0: - xud_ok = True - break - if exit_code == 1 and "xud is locked" in output.decode(): - break - time.sleep(3) - self.logger.debug("Xud is ready") - - if xud_ok: - return - - nodes = self.config.nodes - if self.node_manager.newly_installed: - if self.network == "simnet": - if self.config.nodes["lndbtc"]["mode"] == "native": - self.lnd_cfheaders["bitcoin"] = CFHeaderState() - if self.config.nodes["lndltc"]["mode"] == "native": - self.lnd_cfheaders["litecoin"] = CFHeaderState() - self.lnd_cfheaders["litecoin"] = CFHeaderState() - if "bitcoind" in nodes and nodes["bitcoind"]["mode"] in ["neutrino", "light"]: - self.lnd_cfheaders["bitcoin"] = CFHeaderState() - if "litecoind" in nodes and nodes["litecoind"]["mode"] in ["neutrino", "light"]: - self.lnd_cfheaders["litecoin"] = CFHeaderState() - - if len(self.lnd_cfheaders) > 0: - print("Syncing light clients:") - self._print_lnd_cfheaders(erase_last_line=False) - - with ThreadPoolExecutor(max_workers=len(self.lnd_cfheaders), thread_name_prefix="LndReady") as executor: - futs = {} - for chain in self.lnd_cfheaders: - futs[executor.submit(self.ensure_lnd_ready, chain)] = chain - - done, not_done = wait(futs) - - if len(not_done) > 0: - for f in not_done: - f.cancel() - lnds = ", ".join([futs[f] for f in not_done]) - raise FatalError("Failed to wait for {} to be ready".format(lnds)) - - if self.node_manager.newly_installed: - print() - - def xucli_create_wrapper(self, xud): - counter = 0 - ok = False - while counter < 3: - try: - xud.cli("create", self.shell) - while True: - confirmed = self.shell.confirm("YOU WILL NOT BE ABLE TO DISPLAY YOUR XUD SEED AGAIN. Press ENTER to continue...") - if confirmed: - break - ok = True - break - except (PasswordNotMatch, InvalidPassword): - counter += 1 - continue - if not ok: - raise Exception("Failed to create wallets") - - def xucli_restore_wrapper(self, xud): - counter = 0 - ok = False - while counter < 3: - try: - if self.config.restore_dir == "/tmp/fake-backup": - command = f"restore" - else: - command = f"restore /mnt/hostfs{self.config.restore_dir}" - xud.cli(command, self.shell) - ok = True - break - except (PasswordNotMatch, InvalidPassword, MnemonicNot24Words): - counter += 1 - continue - if not ok: - raise Exception("Failed to restore wallets") - - def check_backup_dir(self, backup_dir): - assert not backup_dir.startswith("/mnt/hostfs") - - hostfs_dir = get_hostfs_file(backup_dir) - - if not os.path.exists(hostfs_dir): - return False, "not existed" - - if not os.path.isdir(hostfs_dir): - return False, "not a directory" - - if not os.access(hostfs_dir, os.R_OK): - return False, "not readable" - - if not os.access(hostfs_dir, os.W_OK): - return False, "not writable" - - return True, None - - def check_restore_dir(self, restore_dir): - return self.check_backup_dir(restore_dir) - - def check_restore_dir_files(self, restore_dir): - files = os.listdir(get_hostfs_file(restore_dir)) - contents = [] - if "xud" in files: - contents.append("xud") - if "lnd-BTC" in files: - contents.append("lndbtc") - if "lnd-LTC" in files: - contents.append("lndltc") - return contents - - def setup_backup_dir(self): - if self.config.backup_dir: - return - - backup_dir = None - - while True: - reply = self.shell.input("Enter path to backup location: ") - reply = reply.strip() - if len(reply) == 0: - continue - - backup_dir = normalize_path(reply) - - print("Checking backup location... ", end="") - sys.stdout.flush() - ok, reason = self.check_backup_dir(backup_dir) - if ok: - print("OK.") - break - else: - print(f"Failed. ", end="") - self.logger.debug(f"Failed to check backup dir {backup_dir}: {reason}") - sys.stdout.flush() - r = self.shell.no_or_yes("Retry?") - if r == "no": - self.node_manager.down() - raise FatalError("Backup directory not available") - - self.config.backup_dir = backup_dir - - def is_backup_available(self): - if self.config.backup_dir is None: - return False - - ok, reason = self.check_backup_dir(self.config.backup_dir) - - if not ok: - return False - - return True - - def setup_restore_dir(self) -> None: - """This function will try to interactively setting up restore_dir. And - store it in self._config.restore_dir - - :return: None - """ - if self.config.restore_dir: - return - - restore_dir = None - - while True: - reply = self.shell.input("Please paste the path to your XUD backup to restore your channel balance, your keys and other historical data: ") - reply = reply.strip() - if len(reply) == 0: - continue - - restore_dir = normalize_path(reply) - - print("Checking files... ", end="") - sys.stdout.flush() - ok, reason = self.check_restore_dir(restore_dir) - if ok: - contents = self.check_restore_dir_files(restore_dir) - if len(contents) > 0: - if len(contents) > 1: - contents_text = ", ".join(contents[:-1]) + " and " + contents[-1] - else: - contents_text = contents[0] - r = self.shell.yes_or_no(f"Looking good. This will restore {contents_text}. Do you wish to continue?") - if r == "yes": - break - else: - restore_dir = None - break - else: - r = self.shell.yes_or_no("No backup files found. Do you wish to continue WITHOUT restoring channel balance, keys and historical data?") - if r == "yes": - restore_dir = "/tmp/fake-backup" - break - else: - print(f"Path not available. ", end="") - self.logger.info(f"Failed to check restore dir {restore_dir}: {reason}") - sys.stdout.flush() - r = self.shell.yes_or_no("Do you wish to continue WITHOUT restoring channel balance, keys and historical data?") - if r == "yes": - restore_dir = "/tmp/fake-backup" - break - - self.config.restore_dir = restore_dir - - def execute(self): - xud = self.node_manager.get_node("xud") - self.ensure_layer2_ready() - if self.node_manager.newly_installed: - while True: - print("Do you want to create a new xud environment or restore an existing one?") - print("1) Create New") - print("2) Restore Existing") - reply = self.shell.input("Please choose: ") - reply = reply.strip() - if reply == "1": - try: - self.xucli_create_wrapper(xud) - break - except: - pass - elif reply == "2": - self.setup_restore_dir() - if self.config.restore_dir: - if self.config.restore_dir != "/tmp/fake-backup": - r = self.shell.yes_or_no("BEWARE: Restoring your environment will close your existing lnd channels and restore channel balance in your wallet. Do you wish to continue?") - if r == "yes": - try: - self.xucli_restore_wrapper(xud) - break - except: - pass - else: - try: - self.xucli_restore_wrapper(xud) - break - except: - pass - - self.config.restore_dir = None - - if not self.is_backup_available(): - print() - print("Please enter a path to a destination where to store a backup of your environment. It includes everything, but NOT your wallet balance which is secured by your XUD SEED. The path should be an external drive, like a USB or network drive, which is permanently available on your device since backups are written constantly.") - print() - self.config.backup_dir = None - self.setup_backup_dir() - - if self.network in ["simnet", "testnet", "mainnet"]: - print("\nClient restart required. This could take up to 3 minutes and you will be prompted to re-enter your password. Restarting...", end="") - sys.stdout.flush() - try: - self.restart_lnds(self.network) - print(" Done.") - except: - self.logger.exception("Failed to do restaring logic here") - print(" Failed.") - else: - if not self.is_backup_available(): - print("Backup location not available.") - self.config.backup_dir = None - self.setup_backup_dir() - - cmd = f"/update-backup-dir.sh '{get_hostfs_file(self.config.backup_dir)}'" - exit_code, output = xud.exec(cmd) - lines = output.decode().splitlines() - if len(lines) > 0: - print(lines[0]) diff --git a/images/utils/launcher/close_other_utils.py b/images/utils/launcher/close_other_utils.py index 63f1dcafc..041fa3aee 100644 --- a/images/utils/launcher/close_other_utils.py +++ b/images/utils/launcher/close_other_utils.py @@ -3,11 +3,12 @@ from typing import List from subprocess import check_output +from launcher.utils import yes_or_no + class Action: - def __init__(self, network, shell): + def __init__(self, network): self.network = network - self.shell = shell self.client = docker.from_env() def get_utils_containers(self): @@ -45,7 +46,7 @@ def execute(self): if n == 0: return - reply = self.shell.yes_or_no("Found {} existing xud ctl sessions. Do you want to close these?".format(n)) + reply = yes_or_no("Found {} existing xud ctl sessions. Do you want to close these?".format(n)) if reply == "yes": for c in result: if c.status == "running": diff --git a/images/utils/launcher/config/__init__.py b/images/utils/launcher/config/__init__.py index bbcfcb5e5..c4fbf7ea4 100644 --- a/images/utils/launcher/config/__init__.py +++ b/images/utils/launcher/config/__init__.py @@ -1 +1 @@ -from .config import Config, ArgumentParser, PortPublish, ConfigLoader +from .config import Config, ArgumentParser, PortPublish diff --git a/images/utils/launcher/config/config.py b/images/utils/launcher/config/config.py index 739f622ca..e9b83ac40 100644 --- a/images/utils/launcher/config/config.py +++ b/images/utils/launcher/config/config.py @@ -4,13 +4,13 @@ from logging.handlers import TimedRotatingFileHandler import os from typing import Optional, List +from concurrent.futures import ThreadPoolExecutor import toml from ..utils import get_hostfs_file, ArgumentParser from ..errors import ConfigError, ConfigErrorScope from .template import nodes_config, general_config, PortPublish -from .loader import ConfigLoader class Config: @@ -24,18 +24,21 @@ class Config: restore_dir: Optional[str] eth_providers: List[str] - def __init__(self, loader: ConfigLoader): - self.logger = logging.getLogger("launcher.Config") + executor: ThreadPoolExecutor - self.loader = loader + def __init__(self): + self.logger = logging.getLogger("launcher.Config") + self.executor = ThreadPoolExecutor(max_workers=10, thread_name_prefix="Pool") self.branch = "master" self.disable_update = False self.external_ip = None self.network = os.environ["NETWORK"] - self.home_dir = self.loader.ensure_home_dir(os.environ["HOST_HOME"]) - self.network_dir = os.path.join(self.home_dir, self.network) + self.network_dir = os.path.join("/root", self.network) + self.host_network_dir = os.environ["NETWORK_DIR"] + self.host_home = os.environ["HOST_HOME"] + self.host_pwd = os.environ["HOST_PWD"] self.backup_dir = None self.restore_dir = None @@ -44,8 +47,28 @@ def __init__(self, loader: ConfigLoader): self.nodes = nodes_config[self.network] self._parse_command_line_arguments() - self._parse_general_config_file() - self.network_dir = self.loader.ensure_network_dir(self.network_dir) + + + if not os.path.exists(self.logs_dir): + os.mkdir(self.logs_dir) + + + fmt = "%(asctime)s.%(msecs)03d %(levelname)5s %(process)d --- [%(threadName)-15s] %(name)-30s: %(message)s" + datefmt = "%Y-%m-%d %H:%M:%S" + logging.basicConfig(format=fmt, datefmt=datefmt, level=logging.INFO, filename=self.logfile, filemode="w") + logging.getLogger("launcher").setLevel(logging.DEBUG) + # fh = TimedRotatingFileHandler(self.logfile, when="d", interval=1, backupCount=7) + # fh.setFormatter(logging.Formatter(fmt=fmt)) + # logging.getLogger().addHandler(fh) + + if hasattr(self.args, "branch"): + self.branch = self.args.branch + + if hasattr(self.args, "disable_update"): + self.disable_update = True + + if hasattr(self.args, "external_ip"): + self.external_ip = self.args.external_ip self._parse_network_config_file() @@ -62,14 +85,6 @@ def _parse_command_line_arguments(self) -> None: except Exception as e: raise ConfigError(ConfigErrorScope.COMMAND_LINE_ARGS) from e - def _parse_general_config_file(self) -> None: - filename = "xud-docker.conf" - conf_file = os.path.join(self.home_dir, filename) - try: - self.parse_general_config() - except Exception as e: - raise ConfigError(ConfigErrorScope.GENERAL_CONF, conf_file=conf_file) from e - def _parse_network_config_file(self) -> None: filename = "{}.conf".format(self.network) conf_file = os.path.join(self.network_dir, filename) @@ -448,39 +463,9 @@ def parse_command_line_arguments(self): help="Expose proxy service ports to your host machine" ) - self.args = parser.parse_args() + self.args, unknown = parser.parse_known_args() self.logger.info("Parsed command-line arguments: %r", self.args) - def parse_general_config(self): - network = self.network - - parsed = toml.loads(self.loader.load_general_config(self.home_dir)) - self.logger.info("Parsed xud-docker.conf: %r", parsed) - - key = f"{network}-dir" - if key in parsed: - self.network_dir = parsed[key] - if hasattr(self.args, f"{self.network}_dir"): - self.network_dir = getattr(self.args, f"{self.network}_dir") - - logs_dir = get_hostfs_file(f"{self.network_dir}/logs") - if not os.path.exists(logs_dir): - os.makedirs(logs_dir, exist_ok=True) - logfile = f"{logs_dir}/{self.network}.log" - fh = TimedRotatingFileHandler(logfile, when="d", interval=1, backupCount=7) - fmt = "%(asctime)s %(levelname)s %(process)d --- [%(threadName)s] %(name)s: %(message)s" - fh.setFormatter(logging.Formatter(fmt=fmt)) - logging.getLogger().addHandler(fh) - - if hasattr(self.args, "branch"): - self.branch = self.args.branch - - if hasattr(self.args, "disable_update"): - self.disable_update = True - - if hasattr(self.args, "external_ip"): - self.external_ip = self.args.external_ip - def update_volume(self, volumes, container_dir, host_dir): target = [v for v in volumes if v["container"] == container_dir] if len(target) == 0: @@ -960,11 +945,19 @@ def update_proxy(self, parsed): "28889": "28889:8080", }) + @property + def conf_file(self) -> str: + filename = "{}.conf".format(self.network) + return os.path.join(self.network_dir, filename) + def parse_network_config(self): - network = self.network + try: + with open(self.conf_file) as f: + conf = f.read() + except FileNotFoundError: + conf = "" - parsed = toml.loads(self.loader.load_network_config(network, self.network_dir)) - self.logger.info("Parsed %s.conf: %r", network, parsed) + parsed = toml.loads(conf) # parse backup-dir value from # 1) data/xud/.backup-dir-value @@ -993,7 +986,11 @@ def parse_network_config(self): parts = value.split(",") parts = [p.strip() for p in parts] for p in parts: - self.nodes[p]["use_local_image"] = True + node = self.nodes[p] + node["use_local_image"] = True + image = node["image"] + parts = image.split(":") + node["image"] = parts[0] + ":" + "latest" for node in self.nodes.values(): name = node["name"] @@ -1029,35 +1026,54 @@ def expand_vars(self, value): if value is None: return None if isinstance(value, str): - if "$home_dir" in value: - value = value.replace("$home_dir", self.home_dir) - if f"${self.network}_dir" in value: - value = value.replace(f"${self.network}_dir", self.network_dir) + network_dir = os.environ["NETWORK_DIR"] if "$data_dir" in value: - value = value.replace("$data_dir", self.network_dir + "/data") + value = value.replace("$data_dir", network_dir + "/data") if "$logs_dir" in value: - value = value.replace("$logs_dir", self.logs_dir) + value = value.replace("$logs_dir", network_dir + "/logs") return value @property def logs_dir(self) -> str: return os.path.join(self.network_dir, "logs") + @property + def host_logs_dir(self) -> str: + return os.path.join(self.host_network_dir, "logs") + + @property + def data_dir(self) -> str: + return os.path.join(self.network_dir, "data") + + @property + def host_data_dir(self) -> str: + return os.path.join(self.host_network_dir, "data") + @property def logfile(self) -> str: filename = f"{self.network}.log" return os.path.join(self.logs_dir, filename) + @property + def host_logfile(self) -> str: + filename = f"{self.network}.log" + return os.path.join(self.host_logs_dir, filename) + @property def dumpfile(self) -> str: filename = f"config.sh" return os.path.join(self.logs_dir, filename) + @property + def host_dumpfile(self) -> str: + filename = f"config.sh" + return os.path.join(self.host_logs_dir, filename) + def dump(self) -> None: """Dump xud-docker configurations as bash key-value file in logs_dir""" prefix = "XUD_DOCKER" - with open("/mnt/hostfs" + self.dumpfile, "w") as f: + with open(self.dumpfile, "w") as f: def dump_attr(attr: str) -> None: key = f"{prefix}_{attr.upper()}" value = getattr(self, attr) @@ -1071,7 +1087,6 @@ def dump_attr(attr: str) -> None: dump_attr("disable_update") dump_attr("external_ip") dump_attr("network") - dump_attr("home_dir") dump_attr("network_dir") dump_attr("backup_dir") dump_attr("restore_dir") @@ -1143,3 +1158,7 @@ def dump_node_attr(node: str, attr: str) -> None: # dump_node_attr(node, "cex_api_key") # dump_node_attr(node, "cex_api_secret") dump_node_attr(node, "margin") + + @property + def dev_mode(self) -> bool: + return self.args.dev diff --git a/images/utils/launcher/config/loader.py b/images/utils/launcher/config/loader.py deleted file mode 100644 index 7c20dd2f3..000000000 --- a/images/utils/launcher/config/loader.py +++ /dev/null @@ -1,66 +0,0 @@ -from shutil import copyfile -import os - -from ..errors import FatalError -from ..utils import normalize_path, get_hostfs_file - - -class ConfigLoader: - def load_general_config(self, home_dir): - config_file = get_hostfs_file(f"{home_dir}/xud-docker.conf") - sample_config_file = get_hostfs_file(f"{home_dir}/sample-xud-docker.conf") - copyfile(os.path.dirname(__file__) + "/xud-docker.conf", sample_config_file) - if os.path.exists(config_file): - with open(config_file) as f: - return f.read() - return "" - - def load_network_config(self, network, network_dir): - config_file = get_hostfs_file(f"{network_dir}/{network}.conf") - sample_config_file = get_hostfs_file(f"{network_dir}/sample-{network}.conf") - copyfile(os.path.dirname(__file__) + f'/{network}.conf', sample_config_file) - if os.path.exists(config_file): - with open(config_file) as f: - return f.read() - return "" - - def load_lndenv(self, network_dir): - lndenv = get_hostfs_file(f"{network_dir}/lnd.env") - try: - with open(lndenv) as f: - return f.read() - except FileNotFoundError: - return "" - - def ensure_home_dir(self, host_home): - home_dir = host_home + "/.xud-docker" - hostfs_dir = get_hostfs_file(home_dir) - if os.path.exists(hostfs_dir): - if not os.path.isdir(hostfs_dir): - raise FatalError("{} is not a directory".format(home_dir)) - else: - if not os.access(hostfs_dir, os.R_OK): - raise FatalError("{} is not readable".format(home_dir)) - if not os.access(hostfs_dir, os.W_OK): - raise FatalError("{} is not writable".format(home_dir)) - else: - os.mkdir(hostfs_dir) - return home_dir - - def ensure_network_dir(self, network_dir): - network_dir = normalize_path(network_dir) - hostfs_dir = get_hostfs_file(network_dir) - if os.path.exists(hostfs_dir): - if not os.path.isdir(hostfs_dir): - raise FatalError("{} is not a directory".format(network_dir)) - else: - if not os.access(hostfs_dir, os.R_OK): - raise FatalError("{} is not readable".format(network_dir)) - if not os.access(hostfs_dir, os.W_OK): - raise FatalError("{} is not writable".format(network_dir)) - else: - os.makedirs(hostfs_dir) - - if not os.path.exists(hostfs_dir + "/logs"): - os.mkdir(hostfs_dir + "/logs") - return network_dir diff --git a/images/utils/launcher/config/template.py b/images/utils/launcher/config/template.py index ab212aceb..04e484c32 100644 --- a/images/utils/launcher/config/template.py +++ b/images/utils/launcher/config/template.py @@ -1,5 +1,5 @@ import re -from ..errors import FatalError +from launcher.errors import FatalError class PortPublish: @@ -643,7 +643,7 @@ def __str__(self): }, "xud": { "name": "xud", - "image": "exchangeunion/xud:1.2.0", + "image": "exchangeunion/xud:latest", "volumes": [ { "host": "$data_dir/xud", diff --git a/images/utils/launcher/errors.py b/images/utils/launcher/errors.py index 98f3315a8..9da1ba50b 100644 --- a/images/utils/launcher/errors.py +++ b/images/utils/launcher/errors.py @@ -17,3 +17,11 @@ def __init__(self, scope: ConfigErrorScope, conf_file: Optional[str] = None): super().__init__(scope) self.scope = scope self.conf_file = conf_file + + +class NoWaiting(Exception): + pass + + +class ParallelError(Exception): + pass diff --git a/images/utils/launcher/node/__init__.py b/images/utils/launcher/node/__init__.py index f3eddd757..834a77fe0 100644 --- a/images/utils/launcher/node/__init__.py +++ b/images/utils/launcher/node/__init__.py @@ -1,134 +1,195 @@ +from __future__ import annotations + import functools import logging -import os import sys import threading -import time +from abc import ABC, abstractmethod +from concurrent.futures import wait from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict import docker from docker.errors import NotFound from docker.types import IPAMConfig, IPAMPool +from launcher.utils import ArgumentParser, yes_or_no, parallel +from launcher.errors import FatalError +from .DockerTemplate import DockerTemplate from .arby import Arby -from .base import Node -from .boltz import Boltz +from .base import Node, ContainerNotFound from .bitcoind import Bitcoind, Litecoind -from .btcd import Btcd, Ltcd +from .boltz import Boltz from .connext import Connext from .geth import Geth from .image import Image, ImageManager -from .lnd import Lndbtc, Lndltc +from .lnd import Lnd, Lndbtc, Lndltc +from .proxy import Proxy from .webui import Webui from .xud import Xud, XudApiError -from .proxy import Proxy -from .DockerTemplate import DockerTemplate -from ..config import Config -from ..errors import FatalError -from ..shell import Shell -from ..utils import parallel_execute, get_useful_error_message, get_hostfs_file, ArgumentParser +if TYPE_CHECKING: + from launcher.config import Config + from docker.client import DockerClient -class LogsCommand: - def __init__(self, get_container, shell): - self._get_container = get_container - self._shell = shell +logger = logging.getLogger(__name__) - parser = ArgumentParser(prog="logs", description="fetch the logs of a container") - parser.add_argument("--tail", metavar='N', type=int, help="number of lines to show from the end of the logs", default=100) - parser.add_argument("container") - self._parser = parser - def execute(self, args): - args = self._parser.parse_args(args) - container = self._get_container(args.container) - for line in container.logs(tail=args.tail): - self._shell.println(line) +class Command(ABC): + def __init__(self, get_service): + self.get_service = get_service + self.parser = self.create_parser() + @abstractmethod + def create_parser(self) -> ArgumentParser: + pass -class StartCommand: - def __init__(self, get_container, shell): - self._get_container = get_container - self._shell = shell + @abstractmethod + def execute(self, *args) -> None: + pass - parser = ArgumentParser(prog="start") - parser.add_argument("container") - self._parser = parser - def execute(self, args): - args = self._parser.parse_args(args) - container = self._get_container(args.container) - container.start() +class LogsCommand(Command): + def create_parser(self) -> ArgumentParser: + parser = ArgumentParser(prog="logs", description="fetch the logs of a container") + parser.add_argument("--tail", metavar='N', + help="number of lines to show from the end of the logs (default \"100\")", + default="100") + parser.add_argument("--since", + help="show logs since timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes)") + parser.add_argument("--until", + help="show logs before a timestamp (e.g. 2013-01-02T13:23:37) or relative (e.g. 42m for 42 minutes)") + parser.add_argument("--follow", "-f", action="store_true", + help="follow log output") + parser.add_argument("--timestamps", "-t", action="store_true", + help="show timestamps") + parser.add_argument("service") + return parser + + def execute(self, *args) -> None: + args = self.parser.parse_args(args) + name = args.service + service = self.get_service(name) + for line in service.logs(tail=args.tail, since=args.since, until=args.until, follow=args.follow, + timestamps=args.timestamps): + print(line) + + +class StartCommand(Command): + def create_parser(self) -> ArgumentParser: + parser = ArgumentParser(prog="start") + parser.add_argument("service") + return parser + def execute(self, *args) -> None: + args = self.parser.parse_args(args) + name = args.service + service = self.get_service(name) + service.start() -class StopCommand: - def __init__(self, get_container, shell): - self._get_container = get_container - self._shell = shell +class StopCommand(Command): + def create_parser(self) -> ArgumentParser: parser = ArgumentParser(prog="stop") - parser.add_argument("container") - self._parser = parser + parser.add_argument("service") + return parser - def execute(self, args): - args = self._parser.parse_args(args) - container = self._get_container(args.container) - container.stop() + def execute(self, *args) -> None: + args = self.parser.parse_args(args) + name = args.service + service = self.get_service(name) + service.stop() -class RestartCommand: - def __init__(self, get_container, shell): - self._get_container = get_container - self._shell = shell - +class RestartCommand(Command): + def create_parser(self) -> ArgumentParser: parser = ArgumentParser(prog="restart") - parser.add_argument("container") - self._parser = parser + parser.add_argument("service") + return parser + + def execute(self, *args) -> None: + args = self.parser.parse_args(args) + name = args.service + service = self.get_service(name) + service.stop() + service.start() + + +class RemoveCommand(Command): + def create_parser(self) -> ArgumentParser: + parser = ArgumentParser(prog="rm") + parser.add_argument("service") + return parser - def execute(self, args): - args = self._parser.parse_args(args) - container = self._get_container(args.container) - container.stop() - container.start() + def execute(self, *args) -> None: + args = self.parser.parse_args(args) + name = args.service + service = self.get_service(name) + service.remove() + + +class CreateCommand(Command): + def create_parser(self) -> ArgumentParser: + parser = ArgumentParser(prog="create") + parser.add_argument("service") + return parser + + def execute(self, *args) -> None: + args = self.parser.parse_args(args) + name = args.service + service = self.get_service(name) + service.create() @dataclass class Context: config: Config - shell: Shell client: docker.DockerClient image_manager: ImageManager - node_manager: 'NodeManager' + node_manager: NodeManager + + +class ServiceNotFound(Exception): + pass -class NodeNotFound(Exception): +class NetworkNotFound(Exception): pass class NodeManager: - def __init__(self, config, shell): - self.logger = logging.getLogger("launcher.node.NodeManager") + config: Config + client: DockerClient + + def __init__(self, config: Config): + self.logger = logger self.config = config - self.shell = shell self.client = docker.from_env() - self.image_manager = ImageManager(self.config, self.shell, self.client) + self.image_manager = ImageManager(self.config, self.client) - self.branch = self.config.branch - self.network = self.config.network - - ctx = Context(self.config, self.shell, self.client, self.image_manager, self) + ctx = Context(self.config, self.client, self.image_manager, self) self.nodes = {name: globals()[name.capitalize()](name, ctx) for name in self.config.nodes} - self.docker_network = self.create_docker_network() + # self.docker_network = self.create_docker_network() - self.cmd_logs = LogsCommand(self.get_node, self.shell) - self.cmd_start = StartCommand(self.get_node, self.shell) - self.cmd_stop = StopCommand(self.get_node, self.shell) - self.cmd_restart = RestartCommand(self.get_node, self.shell) + self.cmd_logs = LogsCommand(self.get_service) + self.cmd_start = StartCommand(self.get_service) + self.cmd_stop = StopCommand(self.get_service) + self.cmd_restart = RestartCommand(self.get_service) + self.cmd_create = CreateCommand(self.get_service) + self.cmd_remove = RemoveCommand(self.get_service) self.docker_template = DockerTemplate() + @property + def branch(self): + return self.config.branch + + @property + def network(self): + return self.config.network + @property def network_name(self): return self.network + "_default" @@ -142,187 +203,153 @@ def get_network_ipam_pool(self): elif self.network == "mainnet": return IPAMPool(subnet='10.0.3.0/24', gateway='10.0.3.1') - def create_docker_network(self): - name = self.network_name - try: - network = self.client.networks.get(name) - return network - except NotFound: - pass + def _create_docker_network(self) -> None: ipam_pool = self.get_network_ipam_pool() ipam_config = IPAMConfig(pool_configs=[ipam_pool]) - network = self.client.networks.create(name, driver="bridge", ipam=ipam_config) - return network + network = self.client.networks.create(self.network_name, driver="bridge", ipam=ipam_config) + logger.info("Created network: %r", network) - def get_node(self, name): - try: - return self.valid_nodes[name] - except KeyError: - raise NodeNotFound(name) + def _remove_docker_network(self) -> None: + network = self.docker_network + network.remove() + logger.info("Removed network: %r", network) - def check_wallets(self): - pass + @property + def docker_network(self): + try: + return self.client.networks.get(self.network_name) + except docker.errors.NotFound as e: + raise NetworkNotFound(self.network_name) from e - def wait_for_channels(self): - pass + def get_service(self, name: str) -> Node: + try: + return self.nodes[name] + except KeyError as e: + raise ServiceNotFound(name) from e @property - def valid_nodes(self): + def valid_nodes(self) -> Dict[str, Node]: return {name: node for name, node in self.nodes.items() if node.mode == "native" and not node.disabled} - @property - def enabled_nodes(self): - return {name: node for name, node in self.nodes.items() if not node.disabled} - def up(self): - self.docker_network = self.create_docker_network() + nodes = self.valid_nodes - nodes = self.valid_nodes.values() + logger.info("Up services: %s", ", ".join(nodes)) + + try: + _ = self.docker_network + except NetworkNotFound: + self._create_docker_network() - def print_failed(failed): - print("Failed to start these services:") - for f in failed: - print(f"- {f[0].name}: {str(f[1])}") + def linehead(node): + return "starting %s" % node.container_name - def try_again(): - answer = self.shell.yes_or_no("Try again?") - return answer == "yes" + def start(node, stop): + node.start() - parallel_execute(nodes, lambda n: n.start(), 60, print_failed, try_again) + nodes = [node for node in nodes.values() if not node.is_running] - if self.network in ["testnet", "mainnet"]: - self.check_wallets() - elif self.network == "simnet": - self.wait_for_channels() + parallel(self.config.executor, nodes, linehead, start) def down(self): nodes = self.valid_nodes - for name, container in nodes.items(): - print(f"Stopping {name}...") - container.stop() - for name, container in nodes.items(): - print(f"Removing {name}...") - container.remove() + logger.info("Down services: %s", ", ".join(nodes)) + + running_nodes = [node for node in nodes.values() if node.is_running] + + parallel(self.config.executor, running_nodes, + lambda node: "stopping %s" % node.container_name, + lambda node, stop: node.stop()) + + parallel(self.config.executor, list(nodes.values()), + lambda node: "removing %s" % node.container_name, + lambda node, stop: node.remove()) + print(f"Removing network {self.network_name}") - self.docker_network.remove() - - def _display_container_status_text(self, status): - if status == "missing": - return "create" - elif status == "outdated": - return "recreate" - elif status == "external_with_container": - return "remove" - elif status == "disabled_with_container": - return "remove" - - def update(self) -> bool: + self._remove_docker_network() + + def check_for_updates(self) -> Dict[Node, str]: + logger.info("Checking for container updates") + containers = self.nodes.values() + result = {c: None for c in containers} + + executor = self.config.executor + futs = {executor.submit(c.get_update_action): c for c in containers} + done, not_done = wait(futs, 30) + if len(not_done) > 0: + raise RuntimeError("Failed to create all containers") + for f in done: + action = f.result() + result[futs[f]] = action + return result + + def _apply_changes(self, images, containers) -> None: + + pulls = [img for img, action in images.items() if action == "PULL"] + + if len(pulls) > 0: + reply = yes_or_no( + "A new version is available. Would you like to upgrade (Warning: this may restart your environment and cancel all open orders)?") + if reply == "yes": + for img in pulls: + img.pull() + + b1 = len(pulls) == 0 + b2 = functools.reduce(lambda r, item: r and item == "NONE", containers.values(), True) + + if b1 and b2: + print("All up-to-date.") + + def linehead(node): + action = containers[node] + if action == "CREATE": + return "creating %s" % node.container_name + elif action == "RECREATE": + return "recreating %s" % node.container_name + elif action == "REMOVE": + return "removing %s" % node.container_name + + def update(node, stop): + action = containers[node] + if action == "CREATE": + node.create() + elif action == "RECREATE": + if node.is_running: + node.stop() + node.remove() + node.create() + elif action == "REMOVE": + if node.is_running: + node.stop() + node.remove() + + items = [] + for container, action in containers.items(): + if action != "NONE": + items.append(container) + + parallel(self.config.executor, items, linehead, update) + + def update(self) -> None: if self.config.disable_update: - self.logger.info("Disable update checking") - return True + self.logger.info("Skip update checking") + return - self.logger.info("Checking for updates") print("šŸŒ Checking for updates...") - # Checking for image updates - self.logger.info("Checking for image updates") - outdated = False - image_outdated = False images = self.image_manager.check_for_updates() + containers = self.check_for_updates() - for image in images: - status = image.status - if status in ["LOCAL_MISSING", "LOCAL_OUTDATED"]: - print("- Image %s: %s" % (image.name, image.status_message)) - outdated = True - image_outdated = True - elif status == "UNAVAILABLE": - all_unavailable_images = [x.name for x in images if x.status == "UNAVAILABLE"] - raise FatalError("Image(s) not found: %s" % ", ".join(all_unavailable_images)) - - # Checking for container updates - self.logger.info("Checking for container updates") - containers = self.nodes.values() - container_check_result = {c: None for c in containers} - - def print_failed(failed): - pass - - def try_again(): - return False - - def handle_result(container, result): - container_check_result[container] = result - - def wrapper(c): - self.logger.info("(%s) Checking for updates", c.container_name) - try: - status, details = c.check_for_updates() - self.logger.info("(%s) Checking for updates: %s", c.container_name, status) - return status, details - except Exception as e: - self.logger.exception("(%s) Checking for updates: ERRORED", c.container_name) - raise e - - parallel_execute(containers, lambda c: wrapper(c), 30, print_failed, try_again, handle_result) - - for container, result in container_check_result.items(): - status, details = result - # when mode internal -> external or others, status will be "external_with_container" - # when mode external or others -> internal, status will be "missing" because we deleted the container before - # when disabled False -> True, status will be "disabled_with_container" - # when disabled True -> False, status will be "missing" because we deleted the container before - if status in ["missing", "outdated", "external_with_container", "disabled_with_container"]: - print("- Container %s: %s" % (container.container_name, self._display_container_status_text(status))) - outdated = True - - if not outdated: - print("All up-to-date.") - return True + for image, action in images.items(): + if action != "NONE": + print("- Image %s: %s" % (image.name, action.lower())) - all_containers_missing = functools.reduce(lambda a, b: a and b[0] in ["missing", "external", "disabled"], container_check_result.values(), True) + for container, action in containers.items(): + if action != "NONE": + print("- Container %s: %s" % (container.container_name, action.lower())) - if all_containers_missing: - if self.newly_installed: - answer = "yes" - else: - if image_outdated: - answer = "yes" - else: - return True # FIXME unintended containers (configuration) update - else: - answer = self.shell.yes_or_no("A new version is available. Would you like to upgrade (Warning: this may restart your environment and cancel all open orders)?") - - if answer == "yes": - # Step 1. update images - self.image_manager.update_images() - - # Step 2. update containers - # 2.1) stop all running containers - for container in containers: - container.stop() - # 2.2) recreate outdated containers - for container, result in container_check_result.items(): - container.update(result) - return True - else: - return False - - def logs(self, *args): - self.cmd_logs.execute(args) - - def start(self, *args): - self.cmd_start.execute(args) - - def stop(self, *args): - self.cmd_stop.execute(args) - - def restart(self, *args): - self.cmd_restart.execute(args) - - def cli(self, name, *args): - self.get_node(name).cli(" ".join(args), self.shell) + self._apply_changes(images, containers) def _get_status_nodes(self): optional_nodes = ["arby", "boltz", "webui", "proxy"] @@ -359,10 +386,13 @@ def status(self): print(f"{border_style}ā”Œā”€%sā”€ā”¬ā”€%sā”€ā”{RESET}" % ("ā”€" * col1_width, "ā”€" * col2_width)) print( - f"{border_style}ā”‚{RESET} {title_style}%s{RESET} {border_style}ā”‚{RESET} {title_style}%s{RESET} {border_style}ā”‚{RESET}" % (col1_fmt % col1_title, col2_fmt % col2_title)) + f"{border_style}ā”‚{RESET} {title_style}%s{RESET} {border_style}ā”‚{RESET} {title_style}%s{RESET} {border_style}ā”‚{RESET}" % ( + col1_fmt % col1_title, col2_fmt % col2_title)) for name in names: print(f"{border_style}ā”œā”€%sā”€ā”¼ā”€%sā”€ā”¤{RESET}" % ("ā”€" * col1_width, "ā”€" * col2_width)) - print(f"{border_style}ā”‚{RESET} {service_style}%s{RESET} {border_style}ā”‚{RESET} {border_style}%s{RESET} {border_style}ā”‚{RESET}" % (col1_fmt % name, col2_fmt % "")) + print( + f"{border_style}ā”‚{RESET} {service_style}%s{RESET} {border_style}ā”‚{RESET} {border_style}%s{RESET} {border_style}ā”‚{RESET}" % ( + col1_fmt % name, col2_fmt % "")) print(f"{border_style}ā””ā”€%sā”€ā”“ā”€%sā”€ā”˜{RESET}" % ("ā”€" * col1_width, "ā”€" * col2_width)) lock = threading.Lock() @@ -374,28 +404,22 @@ def update_line(name, text, fetching=False): y = (n - i) * 2 x = col1_width + 2 if fetching: - print(f"\033[%dA\033[%dC{border_style}%s{RESET}\033[%dD\033[%dB" % (y, x + 3, col2_fmt % text[:col2_width], x + col2_width + 3, y), end="") + print(f"\033[%dA\033[%dC{border_style}%s{RESET}\033[%dD\033[%dB" % ( + y, x + 3, col2_fmt % text[:col2_width], x + col2_width + 3, y), end="") else: - print("\033[%dA\033[%dC%s\033[%dD\033[%dB" % (y, x + 3, col2_fmt % text[:col2_width], x + col2_width + 3, y), end="") + print("\033[%dA\033[%dC%s\033[%dD\033[%dB" % ( + y, x + 3, col2_fmt % text[:col2_width], x + col2_width + 3, y), end="") sys.stdout.flush() result = {name: None for name in names} - def update_status(node, status): + def update_status(node: Node, status: str) -> None: + assert status is not None nonlocal result with lock: result[node.name] = status update_line(node.name, status) - def status_wrapper(container, name, update_status): - status = container.status() - if status.startswith("could not connect"): - update_status(name, "Waiting for xud...") - time.sleep(5) - status = container.status() - - update_status(name, status) - class State: def __init__(self, result): self.counter = 0 @@ -422,20 +446,26 @@ def fetching_loop(stop_event: threading.Event): stop_fetching_animation = threading.Event() threading.Thread(target=fetching_loop, args=(stop_fetching_animation,), name="status_fetching").start() - def print_failed(failed): - for node, error in failed: - update_status(node, get_useful_error_message(error)) - - def try_again(): - return False - - def handle_result(node, result): - update_status(node, result) - - parallel_execute(nodes.values(), lambda n: n.status(), 30, print_failed, try_again, handle_result) - - stop_fetching_animation.set() + try: + executor = self.config.executor + + def wrapper(node): + try: + status = node.status() + update_status(node, status) + except Exception as e: + logger.exception("Failed to get %s status", node.name) + update_status(node, str(e)) + + futs = {executor.submit(wrapper, node): node for node in nodes.values()} + done, not_done = wait(futs, 30) + for f in not_done: + node = futs[f] + update_status(node, "timeout") + finally: + stop_fetching_animation.set() @property def newly_installed(self): - return not os.path.exists(f"{get_hostfs_file(self.config.network_dir)}/data/xud/nodekey.dat") + xud = self.get_service("xud") + return not xud.has_wallets() diff --git a/images/utils/launcher/node/arby.py b/images/utils/launcher/node/arby.py index ff2ef4388..502a9a263 100644 --- a/images/utils/launcher/node/arby.py +++ b/images/utils/launcher/node/arby.py @@ -66,7 +66,7 @@ def __init__(self, name, ctx): self.container_spec.environment.extend(environment) self._cli = "curl -s" - self.api = ArbyApi(CliBackend(self.client, self.container_name, self._logger, self._cli)) + self.api = ArbyApi(CliBackend(self.name, self.container_name, self._cli)) def status(self): status = super().status() diff --git a/images/utils/launcher/node/base.py b/images/utils/launcher/node/base.py index 6d08b0b32..68e519edc 100644 --- a/images/utils/launcher/node/base.py +++ b/images/utils/launcher/node/base.py @@ -1,17 +1,28 @@ +from __future__ import annotations + import datetime import itertools import logging import os -import sys -from typing import List, Dict, Any +from threading import Event +from typing import List, Dict, Any, Optional, Tuple +from typing import TYPE_CHECKING + import docker from docker import DockerClient from docker.errors import NotFound from docker.models.containers import Container +from launcher.config import PortPublish from .image import Image -from ..config import PortPublish -from ..types import XudNetwork +from .pty import exec_command + +if TYPE_CHECKING: + from launcher.config import Config + from .image import ImageManager + from . import NodeManager + +logger = logging.getLogger(__name__) class InvalidNetwork(Exception): @@ -20,8 +31,13 @@ def __init__(self, network): self.network = network +class ContainerNotFound(Exception): + pass + + class ContainerSpec: - def __init__(self, name: str, image: Image, hostname: str, environment: List[str], command: List[str], volumes: Dict, ports: Dict): + def __init__(self, name: str, image: Image, hostname: str, environment: List[str], command: List[str], + volumes: Dict, ports: Dict): self.name = name self.image = image self.hostname = hostname @@ -30,31 +46,43 @@ def __init__(self, name: str, image: Image, hostname: str, environment: List[str self.volumes = volumes self.ports = ports - def __repr__(self): - return f"" - -class CompareEntity: - def __init__(self, obj: Any, diff: Any = None): - self.obj = obj - self.diff = diff +class OutputStream: + def __init__(self, fd): + self.fd = fd - def __repr__(self): - return f"" + def isatty(self) -> bool: + return True + def fileno(self) -> int: + return self.fd -class CompareResult: - def __init__(self, same: bool, message: str, old: CompareEntity, new: CompareEntity): - self.same = same - self.message = message - self.old = old - self.new = new - def __repr__(self): - return f"" +def diff_details(s1, s2): + d1 = s1 - s2 + d2 = s2 - s1 + lines = [] + for item in d1: + lines.append("D %s" % item) + for item in d2: + lines.append("A %s" % item) + return "\n".join(lines) class Node: + client: DockerClient + config: Config + image_manager: ImageManager + node_manager: NodeManager + name: str + container_spec: ContainerSpec + + _container: Optional[Container] + _image_status: Optional[str] + _container_status: Optional[str] + _logger: logging.Logger + _cli: Any + def __init__(self, name: str, ctx): self.client = docker.from_env(timeout=999999999) self.config = ctx.config @@ -78,10 +106,13 @@ def __init__(self, name: str, ctx): self._image_status = None self._container_status = None - self._logger = logging.getLogger("launcher.node." + self.name) + self._logger = logger self._cli = None + def __repr__(self): + return "" % self.name + def generate_environment(self): environment = [f"NETWORK={self.network}"] if self.node_config["preserve_config"]: @@ -111,7 +142,7 @@ def generate_ports(self): return ports @property - def network(self) -> XudNetwork: + def network(self) -> str: return self.config.network @property @@ -141,6 +172,13 @@ def disabled(self) -> bool: result = self.node_config["disabled"] return result + @property + def data_dir(self) -> str: + return os.path.join(self.config.data_dir, self.name) + + def get_service(self, name) -> Node: + return self.node_manager.get_service(name) + def _get_ports(self, spec_ports: Dict): ports = [] for key, value in spec_ports.items(): @@ -157,11 +195,16 @@ def _get_volumes(self, spec_volumes: Dict): volumes.append(value["bind"]) return volumes - def create_container(self): + def create(self): spec = self.container_spec api = self.client.api + + image = spec.image.use_image + + logger.debug("Creating container %s with image %s", self.container_name, image) + resp = api.create_container( - image=spec.image.use_image, + image=image, command=spec.command, hostname=spec.hostname, detach=True, @@ -184,93 +227,66 @@ def create_container(self): container = self.client.containers.get(id) return container - def get_container(self, create=False): + @property + def container(self) -> Container: try: return self.client.containers.get(self.container_name) - except NotFound: - if create: - return self.create_container() - else: - return None + except docker.errors.NotFound as e: + raise ContainerNotFound(self.name) from e - def start(self): + def start(self) -> None: if self.mode != "native": return - if self._container is None: - self._container = self.get_container(create=True) - assert self._container is not None - self._container.start() + self.container.start() - def stop(self): + def stop(self, timeout=180) -> None: if self.mode != "native": return - if self._container is not None: - self._container.stop(timeout=180) + self.container.stop(timeout=timeout) - def remove(self): + def remove(self, force=False) -> None: if self.mode != "native": return - if self._container is not None: - self._container.remove() + self.container.remove(force=force) - def status(self): - self._container = self.get_container() - if self._container is None: + @property + def is_running(self) -> bool: + return self.container.status == "running" + + def status(self) -> str: + try: + return "Container " + self.container.status + except ContainerNotFound: return "Container missing" - return self._container.status - def exec(self, cmd): - if self._container is not None: - return self._container.exec_run(cmd) + def exec(self, command: str) -> Tuple[int, str]: + exit_code, output = self.container.exec_run(command) + return exit_code, output.decode() - def cli(self, cmd, shell): + def cli(self, command: str, exception=False, parse_output=None) -> None: if self.mode != "native": return - full_cmd = "%s %s" % (self._cli, cmd) - self._logger.debug("[Execute] %s", full_cmd) - _, socket = self._container.exec_run(full_cmd, stdin=True, tty=True, socket=True) - shell.redirect_stdin(socket._sock) try: - output = "" - pre_data = None - while True: - data = socket.read(1024) - - if pre_data is not None: - data = pre_data + data - - if len(data) == 0: - break - - try: - text = data.decode() - pre_data = None - except: - pre_data = data - continue - - text = self.cli_filter(cmd, text) - output += text - - # Write text in chunks in case trigger BlockingIOError: could not complete without blocking - # because text is too large to fit the output buffer - # https://stackoverflow.com/questions/54185874/logging-chokes-on-blockingioerror-write-could-not-complete-without-blocking - i = 0 - while i < len(text): - os.write(sys.stdout.fileno(), text[i: i + 1024].encode()) - i = i + 1024 - sys.stdout.flush() - finally: - shell.stop_redirect_stdin() - - # TODO get exit code here - exception = self.extract_exception(cmd, output) - if exception: - raise exception + full_cmd = "%s %s" % (self._cli, command) + logger.debug("[Execute] %s (interactive)", full_cmd) + # FIXME use blocking docker client here + output = exec_command(self.client.api, self.container_name, full_cmd) + try: + self.extract_exception(command, output) + except KeyboardInterrupt: + raise + except: + if exception: + raise + if parse_output: + parse_output(output) + except docker.errors.NotFound: + # FIXME use self.container + raise ContainerNotFound(self.name) def extract_exception(self, cmd, text): - return None + pass def cli_filter(self, cmd, text): return text @@ -281,59 +297,60 @@ def _get_container_created_timestamp(self): t = datetime.datetime.strptime(parts[0], "%Y-%m-%dT%H:%M:%S") return t - def logs(self, tail="all"): - if self._container is None: - return None - t = self._get_container_created_timestamp() - result = self._container.logs(since=t, tail=tail) - return itertools.chain(result.decode().splitlines()) + def logs(self, tail: str = None, since: str = None, until: str = None, follow: bool = False, timestamps: bool = False): + assert since is None, "String since is not supported yet" + assert until is None, "String until is not supported yet" + try: + tail = int(tail) + except: + pass + + kwargs = { + "tail": tail, + "follow": follow, + "timestamps": timestamps, + } + + if follow: + kwargs["stream"] = True + + result = self.container.logs(**kwargs) + if isinstance(result, bytes): + for line in result.decode().splitlines(): + yield line + else: + for line in result: + yield line.decode().rstrip() - def compare_image(self, container: Container) -> CompareResult: + def _compare_image(self, container: Container) -> bool: attrs = container.attrs old_name = attrs["Config"]["Image"] new_name = self.image.use_image - old = CompareEntity(old_name) - new = CompareEntity(new_name) - if old_name != new_name: - return CompareResult(False, "Image names are different", old, new) + logger.info("(%s) Image %s -> %s", self.container_name, old_name, new_name) + return False if self.image.pull_image: - # the names are same but a new image needs to be pulled - if self.image.status == "LOCAL_MISSING": - msg = "Local image is missing" - elif self.image.status == "LOCAL_OUTDATED": - msg = "Local image is outdated" - else: - raise RuntimeError("The pull_image should be None with status {}".format(self.image.status)) - return CompareResult(False, msg, old, new) + # the names are the same but new image available on registry + logger.info("(%s) Image pulling required", self.container_name) + return False old_digest = attrs["Image"] new_digest = self.image.digest if old_digest != new_digest: - # the names are same and no image needs to be pulled but image + # the names are the same and no image needs to be pulled but image # digests are different - old.diff = old_digest - new.diff = new_digest - return CompareResult(False, "Image digests are different", old, new) - return CompareResult(True, "Images are same", old, new) + logger.info("(%s) Image (digest) %s -> %s", self.container_name, old_digest, new_digest) + return False - def compare_hostname(self, container: Container) -> CompareResult: - attrs = container.attrs - old_hostname = attrs["Config"]["Hostname"] - new_hostname = self.container_spec.hostname - old = CompareEntity(old_hostname) - new = CompareEntity(new_hostname) - if old_hostname != new_hostname: - return CompareResult(False, "Hostnames are different", old, new) - return CompareResult(True, "", old, new) + return True - def compare_environment(self, container: Container) -> CompareResult: + def _compare_env(self, container: Container) -> bool: - old_environment = [] + old_env = [] ignore = [ "NODE_VERSION", @@ -354,24 +371,20 @@ def ignored(item): for item in env: if ignored(item): continue - old_environment.append(item) + old_env.append(item) - new_environment = self.container_spec.environment + new_env = self.container_spec.environment - old_set = set(old_environment) - new_set = set(new_environment) - - old = CompareEntity(old_set) - new = CompareEntity(new_set) + old_set = set(old_env) + new_set = set(new_env) if old_set != new_set: - old.diff = old_set - new_set - new.diff = new_set - old_set - return CompareResult(False, "Environments are different", old, new) + logger.info("(%s) Environment\n%s", self.container_name, diff_details(old_set, new_set)) + return False - return CompareResult(True, "", old, new) + return True - def compare_command(self, container: Container) -> CompareResult: + def _compare_command(self, container: Container) -> bool: attrs = container.attrs old_command = attrs["Config"]["Cmd"] new_command = self.container_spec.command @@ -379,35 +392,33 @@ def compare_command(self, container: Container) -> CompareResult: if not old_command: old_command = [] - old = CompareEntity(old_command) - new = CompareEntity(new_command) - old_set = set(old_command) new_set = set(new_command) if old_set != new_set: - old.diff = old_set - new_set - new.diff = new_set - old_set - return CompareResult(False, "Commands are different", old, new) + logger.info("(%s) Command\n%s", self.container_name, diff_details(old_set, new_set)) + return False - return CompareResult(True, "", old, new) + return True - def compare_volumes(self, container: Container) -> CompareResult: + def _compare_volumes(self, container: Container) -> bool: attrs = container.attrs old_volumes = ["{}:{}:{}".format(m["Source"], m["Destination"], m["Mode"]) for m in attrs["Mounts"]] - new_volumes = ["{}:{}:{}".format(key, value["bind"], value["mode"]) for key, value in self.container_spec.volumes.items()] + + # macOS workaround + old_volumes = [v.replace("/host_mnt", "") for v in old_volumes] + + new_volumes = ["{}:{}:{}".format(key, value["bind"], value["mode"]) for key, value in + self.container_spec.volumes.items()] old_set = set(old_volumes) new_set = set(new_volumes) - old = CompareEntity(old_set) - new = CompareEntity(new_set) - if old_set != new_set: - old.diff = old_set - new_set - new.diff = new_set - old_set - return CompareResult(False, "Volumes are different", old, new) - return CompareResult(True, "", old, new) + logger.info("(%s) Volumes\n%s", self.container_name, diff_details(old_set, new_set)) + return False + + return True def _normalize_docker_port_bindings(self, port_bindings): result = [] @@ -423,7 +434,7 @@ def _normalize_docker_port_bindings(self, port_bindings): result.append(key + "-" + ",".join(mapping)) return result - def compare_ports(self, container: Container) -> CompareResult: + def _compare_ports(self, container: Container) -> bool: attrs = container.attrs port_bindings = attrs["HostConfig"]["PortBindings"] old_ports = self._normalize_docker_port_bindings(port_bindings) @@ -443,112 +454,48 @@ def normalize(value): old_set = set(old_ports) new_set = set(new_ports) - old = CompareEntity(old_set) - new = CompareEntity(new_set) - if old_set != new_set: - old.diff = old_set - new_set - new.diff = new_set - old_set - return CompareResult(False, "Ports are different", old, new) - return CompareResult(True, "", old, new) - - @staticmethod - def _beautify_details(details): - def expand(c): - result = "" - result += " old: %s\n" % c.old.obj - result += " diff: %s\n" % c.old.diff - result += " new: %s\n" % c.new.obj - result += " diff: %s\n" % c.new.diff - return result - - result = "" - result += "- Image:\n" - result += expand(details["image"]) - result += "- Hostname:\n" - result += expand(details["hostname"]) - result += "- Environment:\n" - result += expand(details["environment"]) - result += "- Command:\n" - result += expand(details["command"]) - result += "- Volumes:\n" - result += expand(details["volumes"]) - result += "- Ports:\n" - result += expand(details["ports"]) - return result - - def compare(self, container): - - details = { - "image": self.compare_image(container), - "hostname": self.compare_hostname(container), - "environment": self.compare_environment(container), - "command": self.compare_command(container), - "volumes": self.compare_volumes(container), - "ports": self.compare_ports(container), - } - - same = True - for d in details.values(): - if not d.same: - same = False - break + logger.info("%s: Ports\n%s", self.container_name, diff_details(old_set, new_set)) + return False - self._logger.info("(%s) Comparing result\n%s", self.container_name, self._beautify_details(details)) + return True - return same, details + def _same(self, container: Container) -> bool: + return self._compare_image(container) \ + and self._compare_env(container) \ + and self._compare_command(container) \ + and self._compare_volumes(container) \ + and self._compare_ports(container) - def check_for_updates(self): - config = self.config.nodes[self.name] - assert config is not None + def _update_action(self) -> str: try: container = self.client.containers.get(self.container_name) if self.mode != "native": - return "external_with_container", None + return "REMOVE" # external if self.disabled: - return "disabled_with_container", None - - same, details = self.compare(container) + return "REMOVE" # disabled - if same: - return "up-to-date", details + if self._same(container): + return "NONE" else: - return "outdated", details + return "RECREATE" except NotFound: - if config["mode"] != "native": - return "external", None + if self.mode != "native": + return "NONE" if self.disabled: - return "disabled", None - return "missing", None - - def update(self, check_result): - status, details = check_result - if status == "missing": - print("Creating %s..." % self.container_name) - self._container = self.create_container() - elif status == "outdated": - print("Recreating %s..." % self.container_name) - container = self.get_container() - assert container is not None - container.stop() - container.remove() - self._container = self.create_container() - elif status == "external_with_container" or status == "disabled_with_container": - print("Removing %s..." % self.container_name) - container = self.get_container() - assert container is not None - container.stop() - container.remove() - self._container = None + return "NONE" + return "CREATE" - def __repr__(self): - name = self.name - mode = self.mode - container = self.container_name - return f"" + def get_update_action(self) -> str: + action = self._update_action() + logger.info("Container %s: action=%s", self.container_name, action) + return action + + def ensure_ready(self, stop: Event) -> None: + pass class CliError(Exception): @@ -559,31 +506,40 @@ def __init__(self, exit_code, output): class CliBackend: - def __init__(self, client: DockerClient, container_name, logger, cli): - self.client = docker.from_env() + def __init__(self, name, container_name, cli): + self.name = name self.container_name = container_name - self.logger = logger self.cli = cli - def get_container(self): - return self.client.containers.get(self.container_name) + self.client = docker.from_env() + self.blocking_client = docker.from_env(timeout=9999999) - def __getitem__(self, item): # Not implementing __getaddr__ because `eth.sycning` cannot be invoked as a function name - def f(*args): - if len(args) > 0: - cmd = "%s %s" % (item, " ".join(args)) - else: - cmd = item - full_cmd = "%s %s" % (self.cli, cmd) - if "create" in cmd or "restore" in cmd: - self.client = docker.from_env(timeout=999999999) + def get_container(self, blocking=False): + try: + if blocking: + return self.blocking_client.containers.get(self.container_name) else: - self.client = docker.from_env(timeout=20) + return self.client.containers.get(self.container_name) + except docker.errors.NotFound as e: + raise ContainerNotFound() from e + + def invoke(self, method, *args): + if len(args) > 0: + cmd = "%s %s" % (method, " ".join(args)) + else: + cmd = method + full_cmd = "%s %s" % (self.cli, cmd) + if cmd.startswith("create") or cmd.startswith("restore"): + exit_code, output = self.get_container(blocking=True).exec_run(full_cmd) + else: exit_code, output = self.get_container().exec_run(full_cmd) - text: str = output.decode() - self.logger.debug("[Execute] %s: exit_code=%s\n%s", full_cmd, exit_code, text) - if exit_code != 0: - raise CliError(exit_code, text) - return text - return f + text = output.decode().rstrip() + + if exit_code == 0: + logger.debug("[Execute] %s", full_cmd) + else: + logger.debug("[Execute] %s (exit_code=%s)\n%s", full_cmd, exit_code, text) + raise CliError(exit_code, text) + + return text diff --git a/images/utils/launcher/node/bitcoind.py b/images/utils/launcher/node/bitcoind.py index e304ab04f..8ad96e0e8 100644 --- a/images/utils/launcher/node/bitcoind.py +++ b/images/utils/launcher/node/bitcoind.py @@ -11,7 +11,8 @@ def __init__(self, backend): def getblockchaininfo(self): try: - return json.loads(self._backend["getblockchaininfo"]()) + info = self._backend.invoke("getblockchaininfo") + return json.loads(info) except CliError as e: # error code: -28 # error message: @@ -70,7 +71,7 @@ def __init__(self, name, ctx, litecoin: bool = False): if self.network == "testnet": self._cli += " -testnet" - self.api = BitcoindApi(CliBackend(self.client, self.container_name, self._logger, self._cli)) + self.api = BitcoindApi(CliBackend(self.name, self.container_name, self._cli)) def get_external_status(self): s = socket.socket() @@ -94,33 +95,30 @@ def status(self): return "Ready (light mode)" status = super().status() - if status == "exited": - # TODO analyze exit reason - return "Container exited" - elif status == "running": - try: - info = self.api.getblockchaininfo() - current: int = info["blocks"] - total: int = info["headers"] - if current > 0 and current == total: - return "Ready" + if status != "Container running": + return status + + try: + info = self.api.getblockchaininfo() + current: int = info["blocks"] + total: int = info["headers"] + if current > 0 and current == total: + return "Ready" + else: + if total == 0: + return "Syncing 0.00% (0/0)" else: - if total == 0: - return "Syncing 0.00% (0/0)" + p = current / total * 100 + if p > 0.005: + p = p - 0.005 else: - p = current / total * 100 - if p > 0.005: - p = p - 0.005 - else: - p = 0 - return "Syncing %.2f%% (%d/%d)" % (p, current, total) - except BitcoindApiError as e: - return str(e) - except: - self._logger.exception("Failed to get advanced running status") - return "Waiting for bitcoind to come up..." - else: - return status + p = 0 + return "Syncing %.2f%% (%d/%d)" % (p, current, total) + except BitcoindApiError as e: + return str(e) + except: + self._logger.exception("Failed to get advanced running status") + return "Waiting for bitcoind to come up..." class Litecoind(Bitcoind): diff --git a/images/utils/launcher/node/boltz.py b/images/utils/launcher/node/boltz.py index 05d07a970..97bfa3c35 100644 --- a/images/utils/launcher/node/boltz.py +++ b/images/utils/launcher/node/boltz.py @@ -18,7 +18,7 @@ def __init__(self, backend): def getinfo(self, node): try: - info = self._backend[node + " getinfo"]() + info = self._backend.invoke(node + " getinfo") return json.loads(info) except CliError as e: raise BoltzApiError(e.output) @@ -33,7 +33,7 @@ def __init__(self, name, ctx): self.container_spec.environment.extend(environment) self._cli = "wrapper" - self.api = BoltzApi(CliBackend(self.client, self.container_name, self._logger, self._cli)) + self.api = BoltzApi(CliBackend(self.name, self.container_name, self._cli)) def check_node(self, node): try: @@ -44,17 +44,13 @@ def check_node(self, node): def status(self): status = super().status() + if status != "Container running": + return status - if status == "exited": - return "Container exited" - elif status == "running": - btc_status = self.check_node("btc") - ltc_status = self.check_node("ltc") - - if btc_status.isUp and ltc_status.isUp: - return "Ready" - else: - return btc_status.status + "; " + ltc_status.status + btc_status = self.check_node("btc") + ltc_status = self.check_node("ltc") + if btc_status.isUp and ltc_status.isUp: + return "Ready" else: - return status + return btc_status.status + "; " + ltc_status.status diff --git a/images/utils/launcher/node/btcd.py b/images/utils/launcher/node/btcd.py deleted file mode 100644 index be019949a..000000000 --- a/images/utils/launcher/node/btcd.py +++ /dev/null @@ -1,54 +0,0 @@ -from .base import Node, CliBackend -from .bitcoind import BitcoindApi - - -class Btcd(Node): - def __init__(self, name, ctx, litecoin: bool = False): - self.litecoin = litecoin - super().__init__(name, ctx) - - command = [ - "--simnet", - "--txindex", - "--rpcuser=xu", - "--rpcpass=xu", - "--rpclisten=:18556", - "--nolisten", - "--addpeer=btcd.simnet.exchangeunion.com:39555", - ] - - self.container_spec.command.extend(command) - - if self.litecoin: - self._cli = "ltcctl --rpcuser=xu --rpcpass=xu" - else: - self._cli = "btcctl --rpcuser=xu --rpcpass=xu" - if self.network == "simnet": - self._cli += " --simnet" - - self.api = BitcoindApi(CliBackend(self.client, self.container_name, self._logger, self._cli)) - - def status(self): - status = super().status() - if status == "exited": - # TODO analyze exit reason - return "Container exited" - elif status == "running": - try: - info = self.api.getblockchaininfo() - current: int = info["blocks"] - total: int = info["headers"] - if current == total: - return "Ready" - else: - return "Syncing %.2f (%d/%d)" % (current / total, current, total) - except: - self._logger.exception("Failed to get advanced running status") - return "Waiting for {} to come up...".format("ltcd" if self.litecoin else "btcd") - else: - return status - - -class Ltcd(Btcd): - def __init__(self, *args): - super().__init__(*args, litecoin=True) diff --git a/images/utils/launcher/node/connext.py b/images/utils/launcher/node/connext.py index 8913dc0dc..201784d3f 100644 --- a/images/utils/launcher/node/connext.py +++ b/images/utils/launcher/node/connext.py @@ -1,6 +1,10 @@ +import logging from .base import Node, CliBackend, CliError +logger = logging.getLogger(__name__) + + class ConnextApiError(Exception): pass @@ -11,10 +15,12 @@ def __init__(self, backend): def is_healthy(self): try: - result = self._backend["http://localhost:5040/health"]() + result = self._backend.invoke("http://localhost:5040/health") return result == "" except CliError as e: - raise ConnextApiError("Starting...") + logger.info(e.exit_code) + logger.info(e.output) + raise ConnextApiError("Starting...") from e class Connext(Node): @@ -72,7 +78,7 @@ def __init__(self, name, ctx): self.container_spec.environment.extend(environment) self._cli = "curl -s" - self.api = ConnextApi(CliBackend(self.client, self.container_name, self._logger, self._cli)) + self.api = ConnextApi(CliBackend(self.name, self.container_name, self._cli)) def get_xud_getinfo_connext_status(self): xud = self.node_manager.nodes["xud"] @@ -87,25 +93,21 @@ def get_xud_getinfo_connext_status(self): def status(self): status = super().status() - if status == "exited": - # TODO: analyze exit reason - return "Container exited" - elif status == "running": - try: - return self.get_xud_getinfo_connext_status() - except: - self._logger.exception("Failed to get connext status from xud getinfo") - try: - healthy = self.api.is_healthy() - if healthy: - return "Ready" - else: - return "Starting..." - except ConnextApiError as e: - self._logger.exception("Failed to get advanced running status") - return str(e) - except: - self._logger.exception("Failed to get advanced running status") - return "Waiting for connext to come up..." - else: + if status != "Container running": return status + try: + return self.get_xud_getinfo_connext_status() + except: + self._logger.exception("Failed to get connext status from xud getinfo") + try: + healthy = self.api.is_healthy() + if healthy: + return "Ready" + else: + return "Starting..." + except ConnextApiError as e: + self._logger.exception("Failed to get advanced running status") + return str(e) + except: + self._logger.exception("Failed to get advanced running status") + return "Waiting for connext to come up..." diff --git a/images/utils/launcher/node/geth.py b/images/utils/launcher/node/geth.py index 6d5b725c0..71e3ba8b9 100644 --- a/images/utils/launcher/node/geth.py +++ b/images/utils/launcher/node/geth.py @@ -12,11 +12,11 @@ def __init__(self, backend): self._backend = backend def eth_syncing(self): - js_obj = self._backend["--exec eth.syncing attach"]() + js_obj = self._backend.invoke("--exec eth.syncing attach") return demjson.decode(js_obj) def eth_blockNumber(self): - js_obj = self._backend["--exec eth.blockNumber attach"]() + js_obj = self._backend.invoke("--exec eth.blockNumber attach") return demjson.decode(js_obj) @@ -57,7 +57,7 @@ def __init__(self, name, ctx): elif self.network == "mainnet": self._cli = "geth" - self.api = GethApi(CliBackend(self.client, self.container_name, self._logger, self._cli)) + self.api = GethApi(CliBackend(self.name, self.container_name, self._cli)) def get_environment(self): result = [] @@ -160,29 +160,25 @@ def status(self): return self.get_light_status() status = super().status() - if status == "exited": - # TODO analyze exit reason - return "Container exited" - elif status == "running": - try: - syncing = self.api.eth_syncing() - if syncing: - current: int = syncing["currentBlock"] - total: int = syncing["highestBlock"] - p = current / total * 100 - if p > 0.005: - p = p - 0.005 - else: - p = 0 - return "Syncing %.2f%% (%d/%d)" % (p, current, total) - else: - block_number = self.api.eth_blockNumber() - if block_number == 0: - return "Waiting for sync" - else: - return "Ready" - except: - self._logger.exception("Failed to get advanced running status") - return "Waiting for geth to come up..." - else: + if status != "Container running": return status + try: + syncing = self.api.eth_syncing() + if syncing: + current: int = syncing["currentBlock"] + total: int = syncing["highestBlock"] + p = current / total * 100 + if p > 0.005: + p = p - 0.005 + else: + p = 0 + return "Syncing %.2f%% (%d/%d)" % (p, current, total) + else: + block_number = self.api.eth_blockNumber() + if block_number == 0: + return "Waiting for sync" + else: + return "Ready" + except: + self._logger.exception("Failed to get advanced running status") + return "Waiting for geth to come up..." diff --git a/images/utils/launcher/node/image.py b/images/utils/launcher/node/image.py index 6676008d6..cece39c12 100644 --- a/images/utils/launcher/node/image.py +++ b/images/utils/launcher/node/image.py @@ -1,25 +1,30 @@ from __future__ import annotations + +import http.client +import json import logging -from typing import Dict, TYPE_CHECKING import platform -from datetime import datetime -from urllib.request import urlopen, Request -from urllib.error import HTTPError -import json -import http.client import re -import time -from typing import List import sys +import time +from concurrent.futures import wait +from dataclasses import dataclass +from datetime import datetime +from typing import TYPE_CHECKING, Dict, Any +from urllib.error import HTTPError +from urllib.request import urlopen, Request from docker import DockerClient from docker.errors import ImageNotFound -from launcher.utils import parallel_execute, ParallelExecutionError -from launcher.errors import FatalError +from launcher.errors import FatalError, NoWaiting +from launcher.utils import yes_or_no if TYPE_CHECKING: from .base import Node + from launcher.config import Config + +logger = logging.getLogger(__name__) def get_line(record): @@ -65,18 +70,16 @@ def __init__(self, digest: str, created: datetime, branch: str, revision: str, n self.revision = revision self.name = name - def __repr__(self): - digest = self.digest - created = self.created - branch = self.branch - revision = self.revision - name = self.name - return f"" + +@dataclass +class Action: + type: str + details: Any class Image: def __init__(self, repo: str, tag: str, branch: str, client: DockerClient, node: Node): - self.logger = logging.getLogger("launcher.node.Image") + self.logger = logger self.id = None self.repo = repo self.tag = tag @@ -93,6 +96,9 @@ def __init__(self, repo: str, tag: str, branch: str, client: DockerClient, node: else: self.use_image = self.name + def __repr__(self): + return "" % self.name + @property def name(self): return "{}:{}".format(self.repo, self.tag) @@ -213,7 +219,7 @@ def fetch_cloud_metadata(self): except: self.logger.exception("Failed to fetch cloud image metadata") - def get_status(self) -> str: + def _get_update_status(self) -> str: """Get image update status :return: image status @@ -223,10 +229,11 @@ def get_status(self) -> str: - LOCAL_MISSING: The cloud image exists but no local image. - LOCAL_ONLY: The image only exists locally. - UNAVAILABLE: The image is not found locally or remotely. + - USE_LOCAL """ if self.node.node_config["use_local_image"]: self.cloud_metadata = None - return "LOCAL_NEWER" + return "USE_LOCAL" local = self.local_metadata @@ -247,45 +254,58 @@ def get_status(self) -> str: else: return "LOCAL_OUTDATED" - @property - def status_message(self): - if self.status == "UNAVAILABLE": - return "unavailable" - elif self.status == "LOCAL_ONLY": - return "using local version" - elif self.status == "LOCAL_MISSING": - return "pull" - elif self.status == "LOCAL_NEWER": - return "using local version" - elif self.status == "LOCAL_OUTDATED": - return "pull" - elif self.status == "UP_TO_DATE": - return "up-to-date" - - def check_for_updates(self): - self.status = self.get_status() - if self.status in ["LOCAL_MISSING", "LOCAL_OUTDATED"]: + def get_update_action(self) -> str: + status = self._get_update_status() + + if status in ["LOCAL_MISSING", "LOCAL_OUTDATED"]: self.pull_image = self.cloud_metadata.name self.use_image = self.pull_image - def __repr__(self): - name = self.name - use = self.use_image - pull = self.pull_image - return f"" + if status == "UNAVAILABLE": + raise Exception("Image unavailable: " + self.name) + elif status == "LOCAL_ONLY": + raise UserWarning("Registry image not found (will use local version): " + self.name) + elif status == "LOCAL_MISSING": + action = "PULL" + elif status == "LOCAL_NEWER": + raise UserWarning("Your local image version is newer than registry one: " + self.name) + elif status == "LOCAL_OUTDATED": + action = "PULL" + elif status == "UP_TO_DATE": + action = "NONE" + elif status == "USE_LOCAL": + action = "NONE" + else: + raise Exception("Unexpected status " + status) + + logger.info("Image %s: status=%s, action=%s", self.name, status, action) + return action + + def pull(self): + print("Pulling %s..." % self.pull_image) + repo, tag = self.pull_image.split(":") + output = self.client.api.pull(repo, tag=tag, stream=True, decode=True) + print_status(output) class ImageManager: - def __init__(self, config, shell, client): - self.logger = logging.getLogger("launcher.node.ImageManager") + config: Config + client: DockerClient - self.branch = config.branch - self.client = client - self.shell = shell - self.nodes = config.nodes + def __init__(self, config: Config, docker_client: DockerClient): + self.config = config + self.client = docker_client self.images: Dict[str, Image] = {} + @property + def branch(self): + return self.config.branch + + @property + def nodes(self): + return self.config.nodes + def normalize_name(self, name): if "/" in name: if ":" in name: @@ -323,44 +343,33 @@ def get_image(self, name: str, node: Node) -> Image: else: raise FatalError("Invalid image name: " + name) - def check_for_updates(self) -> List[Image]: + def check_for_updates(self) -> Dict[Image, str]: + logger.info("Checking for image updates") + images = list(self.images.values()) images = [image for image in images if image.node.mode == "native" and not image.node.disabled] - def print_failed(failed): - pass + executor = self.config.executor + + futs = {executor.submit(img.get_update_action): img for img in images} - def try_again(): - return False + while True: + done, not_done = wait(futs, 30) + if len(not_done) > 0: + names = ", ".join([futs[f].name for f in not_done]) + print("Still waiting for update checking results of image(s): %s" % names) + reply = yes_or_no("Would you like to keep waiting?") + if reply == "no": + raise NoWaiting + else: + break - def wrapper(i): - self.logger.info("(%s) Checking for updates", i.name) + result = {} + for f in done: try: - i.check_for_updates() - except Exception as e: - logging.exception("(%s) Checking for updates: ERRORED", i.name) - raise e - self.logger.info("(%s) Checking for updates: %s", i.name, i.status) + result[futs[f]] = f.result() + except UserWarning as e: + print("WARNING: %s" % e) - try: - parallel_execute(images, lambda i: wrapper(i), 30, print_failed, try_again) - except ParallelExecutionError as e: - for image, error in e.failed: - error_msg = str(error) - if error_msg == "": - error_msg = type(error) - print("- Image %s: %s" % (image.name, error_msg)) - raise FatalError("Failed to check for image updates") - - return images - - def update_images(self) -> None: - for image in self.images.values(): - status = image.status - pull_image = image.pull_image - if status in ["LOCAL_MISSING", "LOCAL_OUTDATED"]: - print("Pulling %s..." % pull_image) - repo, tag = pull_image.split(":") - output = self.client.api.pull(repo, tag=tag, stream=True, decode=True) - print_status(output) + return result diff --git a/images/utils/launcher/node/lnd.py b/images/utils/launcher/node/lnd.py index f33cd252f..1a14b62e1 100644 --- a/images/utils/launcher/node/lnd.py +++ b/images/utils/launcher/node/lnd.py @@ -1,7 +1,13 @@ -from .base import Node, CliBackend, CliError import json +import logging import re from datetime import datetime, timedelta +from threading import Event + +from launcher.utils import get_percentage +from .base import Node, CliBackend, CliError + +logger = logging.getLogger(__name__) class InvalidChain(Exception): @@ -19,11 +25,26 @@ def __init__(self, backend): def getinfo(self): try: - return json.loads(self._backend["getinfo"]()) + info = self._backend.invoke("getinfo") + return json.loads(info) except CliError as e: raise LndApiError(e.output) +class CFHeaderState: + def __init__(self): + self.current = 0 + self.total = 0 + self.ready = False + + def __repr__(self): + return "%s/%s (%s)" % (self.current, self.total, self.ready) + + @property + def message(self): + return "Syncing " + get_percentage(self.current, self.total) + + class Lnd(Node): def __init__(self, name, ctx, chain: str): super().__init__(name, ctx) @@ -36,7 +57,7 @@ def __init__(self, name, ctx, chain: str): self.container_spec.environment.extend(environment) self._cli = f"lncli -n {self.network} -c {self.chain}" - self.api = LndApi(CliBackend(self.client, self.container_name, self._logger, self._cli)) + self.api = LndApi(CliBackend(self.name, self.container_name, self._cli)) def get_command(self): if self.network != "simnet": @@ -106,6 +127,7 @@ def get_current_height(self): try: c = self.get_container() since = datetime.now() - timedelta(hours=1) + # TODO use base logs lines = c.logs(since=since).decode().splitlines() p = re.compile(r".*New block: height=(\d+),.*") for line in reversed(lines): @@ -125,41 +147,96 @@ def status(self): return self.get_external_status() status = super().status() - if status == "exited": - # TODO analyze exit reason - return "Container exited" - elif status == "running": - try: - info = self.api.getinfo() - synced_to_chain = info["synced_to_chain"] - total = info["block_height"] - current = self.get_current_height() - if current: - if total <= current: - msg = "Ready" - else: - msg = "Syncing" - p = current / total * 100 - if p > 0.005: - p = p - 0.005 - else: - p = 0 - msg += " %.2f%% (%d/%d)" % (p, current, total) + if status != "Container running": + return status + try: + info = self.api.getinfo() + synced_to_chain = info["synced_to_chain"] + total = info["block_height"] + current = self.get_current_height() + if current: + if total <= current: + msg = "Ready" else: - if synced_to_chain: - msg = "Ready" + msg = "Syncing" + p = current / total * 100 + if p > 0.005: + p = p - 0.005 else: - msg = "Syncing" - return msg - except LndApiError as e: - # [lncli] Wallet is encrypted. Please unlock using 'lncli unlock', or set password using 'lncli create' if this is the first time starting lnd. - if "Wallet is encrypted" in str(e): - return "Wallet locked. Unlock with xucli unlock." - except: - self._logger.exception("Failed to get advanced running status") - return "Waiting for lnd ({}) to come up...".format(self.chain) + p = 0 + msg += " %.2f%% (%d/%d)" % (p, current, total) + else: + if synced_to_chain: + msg = "Ready" + else: + msg = "Syncing" + return msg + except LndApiError as e: + # [lncli] Wallet is encrypted. Please unlock using 'lncli unlock', or set password using 'lncli create' if this is the first time starting lnd. + if "Wallet is encrypted" in str(e): + return "Wallet locked. Unlock with xucli unlock." + except: + self._logger.exception("Failed to get advanced running status") + return "Waiting for lnd ({}) to come up...".format(self.chain) + + def ensure_ready(self, stop: Event): + # [lncli] open /root/.lnd/tls.cert: no such file or directory + # [lncli] unable to read macaroon path (check the network setting!): open /root/.lnd/data/chain/bitcoin/testnet/admin.macaroon: no such file or directory + # [lncli] Wallet is encrypted. Please unlock using 'lncli unlock', or set password using 'lncli create' if this is the first time starting lnd. + while not stop.is_set(): + exit_code, output = self.exec(self._cli + " getinfo") + if exit_code == 0: + break + if "unable to read macaroon path" in output: + break + if "Wallet is encrypted" in output: + break + stop.wait(3) + + def update_cfheader(self, state: CFHeaderState, stop: Event): + container = self.container + started_at = container.attrs["State"]["StartedAt"] # e.g. 2020-06-22T17:26:01.541780733Z + started_at = started_at.split(".")[0] + t_utc = datetime.strptime(started_at, "%Y-%m-%dT%H:%M:%S") + t_local = datetime.fromtimestamp(t_utc.timestamp()) + + p0 = re.compile(r"^.*Fully caught up with cfheaders at height (\d+), waiting at tip for new blocks$") + if self.config.network == "simnet": + p1 = re.compile(r"^.*Writing cfheaders at height=(\d+) to next checkpoint$") else: - return status + p1 = re.compile(r"^.*Fetching set of checkpointed cfheaders filters from height=(\d+).*$") + p2 = re.compile(r"^.*Syncing to block height (\d+) from peer.*$") + + if stop.is_set(): + return + + for line in container.logs(stream=True, follow=True, since=t_local): + if stop.is_set(): + break + line = line.decode().strip() + m = p0.match(line) + + if m: + #logger.debug("[%s] (match 1) %s", self.name, line) + state.current = int(m.group(1)) + state.ready = True + h = max(state.current, state.total) + state.current = h + state.total = h + break + + m = p1.match(line) + if m: + #logger.debug("[%s] (match 2) %s", self.name, line) + state.current = int(m.group(1)) + continue + + m = p2.match(line) + if m: + #logger.debug("[%s] (match 3) %s", self.name, line) + state.total = int(m.group(1)) + + logger.debug("[%s] update_cfheader ends" % self.name) class Lndbtc(Lnd): diff --git a/images/utils/launcher/node/proxy.py b/images/utils/launcher/node/proxy.py index 6a21647aa..52250eeb2 100644 --- a/images/utils/launcher/node/proxy.py +++ b/images/utils/launcher/node/proxy.py @@ -6,4 +6,8 @@ def __init__(self, name, ctx): super().__init__(name, ctx) def status(self): + status = super().status() + if status != "Container running": + return status + return "Ready" diff --git a/images/utils/launcher/node/pty.py b/images/utils/launcher/node/pty.py new file mode 100644 index 000000000..a806a6cf1 --- /dev/null +++ b/images/utils/launcher/node/pty.py @@ -0,0 +1,482 @@ +from dockerpty.pty import exec_create, Operation, PseudoTerminal +import sys +import os +import fcntl +import errno +import struct +import select as builtin_select +import six +import logging + + +logger = logging.getLogger(__name__) + + +def exec_command( + client, container, command, interactive=True, stdout=None, stderr=None, stdin=None) -> str: + """ + Run provided command via exec API in provided container. + + This is just a wrapper for PseudoTerminal(client, container).exec_command() + """ + exec_id = exec_create(client, container, command, interactive=interactive) + + operation = ExecOperation(client, exec_id, + interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin) + PseudoTerminal(client, operation).start() + + return operation.output.data.decode() + + +class ExecOperation(Operation): + """ + class for handling `docker exec`-like command + """ + + def __init__(self, client, exec_id, interactive=True, stdout=None, stderr=None, stdin=None): + self.exec_id = exec_id + self.client = client + self.raw = None + self.interactive = interactive + self.stdout = stdout or sys.stdout + self.stderr = stderr or sys.stderr + self.stdin = stdin or sys.stdin + self._info = None + self.output = None + + def start(self, sockets=None, **kwargs): + """ + start execution + """ + stream = sockets or self.sockets() + pumps = [] + + if self.interactive: + pumps.append(Pump(Stream(self.stdin), stream, wait_for_output=False)) + + self.output = Stream(self.stdout) + + pumps.append(Pump(stream, self.output, propagate_close=False)) + # FIXME: since exec_start returns a single socket, how do we + # distinguish between stdout and stderr? + # pumps.append(io.Pump(stream, io.Stream(self.stderr), propagate_close=False)) + + return pumps + + def israw(self, **kwargs): + """ + Returns True if the PTY should operate in raw mode. + + If the exec was not started with tty=True, this will return False. + """ + + if self.raw is None: + self.raw = self.stdout.isatty() and self.is_process_tty() + + return self.raw + + def sockets(self): + """ + Return a single socket which is processing all I/O to exec + """ + socket = self.client.exec_start(self.exec_id, socket=True, tty=self.interactive) + stream = Stream(socket) + if self.is_process_tty(): + return stream + else: + return Demuxer(stream) + + def resize(self, height, width, **kwargs): + """ + resize pty of an execed process + """ + self.client.exec_resize(self.exec_id, height=height, width=width) + + def is_process_tty(self): + """ + does execed process have allocated tty? + """ + return self._exec_info()["ProcessConfig"]["tty"] + + def _exec_info(self): + """ + Caching wrapper around client.exec_inspect + """ + if self._info is None: + self._info = self.client.exec_inspect(self.exec_id) + return self._info + + +def set_blocking(fd, blocking=True): + """ + Set the given file-descriptor blocking or non-blocking. + + Returns the original blocking status. + """ + + old_flag = fcntl.fcntl(fd, fcntl.F_GETFL) + + if blocking: + new_flag = old_flag & ~ os.O_NONBLOCK + else: + new_flag = old_flag | os.O_NONBLOCK + + fcntl.fcntl(fd, fcntl.F_SETFL, new_flag) + + return not bool(old_flag & os.O_NONBLOCK) + + +def select(read_streams, write_streams, timeout=0): + """ + Select the streams from `read_streams` that are ready for reading, and + streams from `write_streams` ready for writing. + + Uses `select.select()` internally but only returns two lists of ready streams. + """ + + exception_streams = [] + + try: + return builtin_select.select( + read_streams, + write_streams, + exception_streams, + timeout, + )[0:2] + except builtin_select.error as e: + # POSIX signals interrupt select() + no = e.errno if six.PY3 else e[0] + if no == errno.EINTR: + return ([], []) + else: + raise e + + +class Stream(object): + """ + Generic Stream class. + + This is a file-like abstraction on top of os.read() and os.write(), which + add consistency to the reading of sockets and files alike. + """ + + """ + Recoverable IO/OS Errors. + """ + ERRNO_RECOVERABLE = [ + errno.EINTR, + errno.EDEADLK, + errno.EWOULDBLOCK, + ] + + def __init__(self, fd): + """ + Initialize the Stream for the file descriptor `fd`. + + The `fd` object must have a `fileno()` method. + """ + self.fd = fd + self.buffer = b'' + self.data = bytearray() + self.close_requested = False + self.closed = False + + def fileno(self): + """ + Return the fileno() of the file descriptor. + """ + + return self.fd.fileno() + + def set_blocking(self, value): + if hasattr(self.fd, 'setblocking'): + self.fd.setblocking(value) + return True + else: + return set_blocking(self.fd, value) + + def read(self, n=4096): + """ + Return `n` bytes of data from the Stream, or None at end of stream. + """ + + while True: + try: + if hasattr(self.fd, 'recv'): + return self.fd.recv(n) + return os.read(self.fd.fileno(), n) + except EnvironmentError as e: + if e.errno not in Stream.ERRNO_RECOVERABLE: + raise e + + + def write(self, data): + """ + Write `data` to the Stream. Not all data may be written right away. + Use select to find when the stream is writeable, and call do_write() + to flush the internal buffer. + """ + + if not data: + return None + + self.buffer += data + self.do_write() + + return len(data) + + def do_write(self): + """ + Flushes as much pending data from the internal write buffer as possible. + """ + while True: + try: + written = 0 + + if hasattr(self.fd, 'send'): + written = self.fd.send(self.buffer) + else: + written = os.write(self.fd.fileno(), self.buffer) + + self.data.extend(self.buffer) + + self.buffer = self.buffer[written:] + + # try to close after writes if a close was requested + if self.close_requested and len(self.buffer) == 0: + self.close() + + return written + except EnvironmentError as e: + if e.errno not in Stream.ERRNO_RECOVERABLE: + raise e + + def needs_write(self): + """ + Returns True if the stream has data waiting to be written. + """ + return len(self.buffer) > 0 + + def close(self): + self.close_requested = True + + # We don't close the fd immediately, as there may still be data pending + # to write. + if not self.closed and len(self.buffer) == 0: + self.closed = True + if hasattr(self.fd, 'close'): + self.fd.close() + else: + os.close(self.fd.fileno()) + + def __repr__(self): + return "{cls}({fd})".format(cls=type(self).__name__, fd=self.fd) + + +class Demuxer(object): + """ + Wraps a multiplexed Stream to read in data demultiplexed. + + Docker multiplexes streams together when there is no PTY attached, by + sending an 8-byte header, followed by a chunk of data. + + The first 4 bytes of the header denote the stream from which the data came + (i.e. 0x01 = stdout, 0x02 = stderr). Only the first byte of these initial 4 + bytes is used. + + The next 4 bytes indicate the length of the following chunk of data as an + integer in big endian format. This much data must be consumed before the + next 8-byte header is read. + """ + + def __init__(self, stream): + """ + Initialize a new Demuxer reading from `stream`. + """ + + self.stream = stream + self.remain = 0 + + def fileno(self): + """ + Returns the fileno() of the underlying Stream. + + This is useful for select() to work. + """ + + return self.stream.fileno() + + def set_blocking(self, value): + return self.stream.set_blocking(value) + + def read(self, n=4096): + """ + Read up to `n` bytes of data from the Stream, after demuxing. + + Less than `n` bytes of data may be returned depending on the available + payload, but the number of bytes returned will never exceed `n`. + + Because demuxing involves scanning 8-byte headers, the actual amount of + data read from the underlying stream may be greater than `n`. + """ + + size = self._next_packet_size(n) + + if size <= 0: + return + else: + data = six.binary_type() + while len(data) < size: + nxt = self.stream.read(size - len(data)) + if not nxt: + # the stream has closed, return what data we got + return data + data = data + nxt + return data + + def write(self, data): + """ + Delegates the the underlying Stream. + """ + + return self.stream.write(data) + + def needs_write(self): + """ + Delegates to underlying Stream. + """ + + if hasattr(self.stream, 'needs_write'): + return self.stream.needs_write() + + return False + + def do_write(self): + """ + Delegates to underlying Stream. + """ + + if hasattr(self.stream, 'do_write'): + return self.stream.do_write() + + return False + + def close(self): + """ + Delegates to underlying Stream. + """ + + return self.stream.close() + + def _next_packet_size(self, n=0): + size = 0 + + if self.remain > 0: + size = min(n, self.remain) + self.remain -= size + else: + data = six.binary_type() + while len(data) < 8: + nxt = self.stream.read(8 - len(data)) + if not nxt: + # The stream has closed, there's nothing more to read + return 0 + data = data + nxt + + if data is None: + return 0 + if len(data) == 8: + __, actual = struct.unpack('>BxxxL', data) + size = min(n, actual) + self.remain = actual - size + + return size + + def __repr__(self): + return "{cls}({stream})".format(cls=type(self).__name__, + stream=self.stream) + + +class Pump(object): + """ + Stream pump class. + + A Pump wraps two Streams, reading from one and and writing its data into + the other, much like a pipe but manually managed. + + This abstraction is used to facilitate piping data between the file + descriptors associated with the tty and those associated with a container's + allocated pty. + + Pumps are selectable based on the 'read' end of the pipe. + """ + + def __init__(self, + from_stream, + to_stream, + wait_for_output=True, + propagate_close=True): + """ + Initialize a Pump with a Stream to read from and another to write to. + + `wait_for_output` is a flag that says that we need to wait for EOF + on the from_stream in order to consider this pump as "done". + """ + + self.from_stream = from_stream + self.to_stream = to_stream + self.eof = False + self.wait_for_output = wait_for_output + self.propagate_close = propagate_close + + def fileno(self): + """ + Returns the `fileno()` of the reader end of the Pump. + + This is useful to allow Pumps to function with `select()`. + """ + + return self.from_stream.fileno() + + def set_blocking(self, value): + return self.from_stream.set_blocking(value) + + def flush(self, n=4096): + """ + Flush `n` bytes of data from the reader Stream to the writer Stream. + + Returns the number of bytes that were actually flushed. A return value + of zero is not an error. + + If EOF has been reached, `None` is returned. + """ + + try: + read = self.from_stream.read(n) + + if read is None or len(read) == 0: + self.eof = True + if self.propagate_close: + self.to_stream.close() + return None + + return self.to_stream.write(read) + except OSError as e: + if e.errno != errno.EPIPE: + raise e + + def is_done(self): + """ + Returns True if the read stream is done (either it's returned EOF or + the pump doesn't have wait_for_output set), and the write + side does not have pending bytes to send. + """ + + return (not self.wait_for_output or self.eof) and \ + not (hasattr(self.to_stream, 'needs_write') and self.to_stream.needs_write()) + + def __repr__(self): + return "{cls}(from={from_stream}, to={to_stream})".format( + cls=type(self).__name__, + from_stream=self.from_stream, + to_stream=self.to_stream) diff --git a/images/utils/launcher/node/webui.py b/images/utils/launcher/node/webui.py index db989fdfd..bab4f6496 100644 --- a/images/utils/launcher/node/webui.py +++ b/images/utils/launcher/node/webui.py @@ -6,4 +6,8 @@ def __init__(self, name, ctx): super().__init__(name, ctx) def status(self): + status = super().status() + if status != "Container running": + return status + return "Ready" diff --git a/images/utils/launcher/node/xud.py b/images/utils/launcher/node/xud.py index e8ca4fe75..d20365c0f 100644 --- a/images/utils/launcher/node/xud.py +++ b/images/utils/launcher/node/xud.py @@ -1,8 +1,22 @@ +from __future__ import annotations + +import functools import json +import logging +import os import re -from typing import List +from concurrent.futures import wait, TimeoutError +import threading +from typing import List, Optional +import docker.errors +from launcher.table import ServiceTable +from launcher.utils import yes_or_no, normalize_path +from launcher.errors import NoWaiting, FatalError from .base import Node, CliBackend, CliError +from .lnd import CFHeaderState + +logger = logging.getLogger(__name__) class XudApiError(Exception): @@ -15,22 +29,25 @@ def __init__(self, backend): def getinfo(self): try: - s = self._backend["getinfo -j"]() - s = re.sub(r"D.*Warning: insecure environment read function 'getenv' used[\s\n\r]+", "", s) + s = self._backend.invoke("getinfo -j") return json.loads(s) except CliError as e: raise XudApiError(e.output) +class InvalidPassword(Exception): + pass + + class PasswordNotMatch(Exception): pass -class MnemonicNot24Words(Exception): +class WrongPassword(Exception): pass -class InvalidPassword(Exception): +class MnemonicNot24Words(Exception): pass @@ -38,6 +55,14 @@ class NoWalletsInitialized(Exception): pass +class Cancelled(Exception): + pass + + +class InvalidDirectory(Exception): + pass + + class Xud(Node): def __init__(self, name, ctx): super().__init__(name, ctx) @@ -46,7 +71,7 @@ def __init__(self, name, ctx): self._cli = "xucli" - self.api = XudApi(CliBackend(self.client, self.container_name, self._logger, self._cli)) + self.api = XudApi(CliBackend(self.name, self.container_name, self._cli)) def _get_environment(self) -> List[str]: env = [ @@ -54,7 +79,6 @@ def _get_environment(self) -> List[str]: ] lndbtc = self.config.nodes["lndbtc"] - env.append("LNDBTC_MODE={}".format(lndbtc["mode"])) if lndbtc["mode"] == "external": env.extend([ "LNDBTC_RPC_HOST={}".format(lndbtc["rpc_host"]), @@ -64,7 +88,6 @@ def _get_environment(self) -> List[str]: ]) lndltc = self.config.nodes["lndltc"] - env.append("LNDLTC_MODE={}".format(lndltc["mode"])) if lndltc["mode"] == "external": env.extend([ "LNDLTC_RPC_HOST={}".format(lndbtc["rpc_host"]), @@ -77,48 +100,44 @@ def _get_environment(self) -> List[str]: def status(self): status = super().status() - if status == "exited": - # TODO analyze exit reason - return "Container exited" - elif status == "running": - try: - info = self.api.getinfo() - lndbtc_status = info["lndMap"][0][1]["status"] - lndltc_status = info["lndMap"][1][1]["status"] - connext_status = info["connext"]["status"] - - if "Ready" == lndbtc_status \ - or "Ready" == lndltc_status \ - or "Ready" == connext_status: - return "Ready" - - if "has no active channels" in lndbtc_status \ - or "has no active channels" in lndltc_status \ - or "has no active channels" in connext_status: - return "Waiting for channels" - else: - not_ready = [] - if lndbtc_status != "Ready": - not_ready.append("lndbtc") - if lndltc_status != "Ready": - not_ready.append("lndltc") - if connext_status != "Ready": - not_ready.append("connext") - return "Waiting for " + ", ".join(not_ready) - except XudApiError as e: - if "xud is locked" in str(e): - return "Wallet locked. Unlock with xucli unlock." - elif "no such file or directory, open '/root/.xud/tls.cert'" in str(e): - return "Starting..." - elif "xud is starting" in str(e): - return "Starting..." - else: - return str(e) - except: - self._logger.exception("Failed to get advanced running status") - return "Waiting for xud to come up..." - else: + if status != "Container running": return status + try: + info = self.api.getinfo() + lndbtc_status = info["lndMap"][0][1]["status"] + lndltc_status = info["lndMap"][1][1]["status"] + connext_status = info["connext"]["status"] + + if "Ready" == lndbtc_status \ + or "Ready" == lndltc_status \ + or "Ready" == connext_status: + return "Ready" + + if "has no active channels" in lndbtc_status \ + or "has no active channels" in lndltc_status \ + or "has no active channels" in connext_status: + return "Waiting for channels" + else: + not_ready = [] + if lndbtc_status != "Ready": + not_ready.append("lndbtc") + if lndltc_status != "Ready": + not_ready.append("lndltc") + if connext_status != "Ready": + not_ready.append("connext") + return "Waiting for " + ", ".join(not_ready) + except XudApiError as e: + if "xud is locked" in str(e): + return "Wallet locked. Unlock with xucli unlock." + elif "no such file or directory, open '/root/.xud/tls.cert'" in str(e): + return "Starting..." + elif "xud is starting" in str(e): + return "Starting..." + else: + return str(e) + except: + self._logger.exception("Failed to get advanced running status") + return "Waiting for xud to come up..." def cli_filter(self, cmd, text): text = re.sub(r"D.*Warning: insecure environment read function 'getenv' used[\s\n\r]+", "", text) @@ -127,30 +146,413 @@ def cli_filter(self, cmd, text): def extract_exception(self, cmd: str, output: str): if cmd.startswith("create"): if "password must be at least 8 characters" in output: - return InvalidPassword() + raise InvalidPassword elif "Passwords do not match, please try again" in output: - return PasswordNotMatch() + raise PasswordNotMatch elif "xud was initialized without a seed because no wallets could be initialized" in output: - return NoWalletsInitialized() + raise NoWalletsInitialized + elif "Error: " in output: + raise FatalError("Failed to create wallets") elif "it is your ONLY backup in case of data loss" in output: - return None + return else: - return Exception("Unexpected xucli create error: " + output.strip()) + print("^C") + raise KeyboardInterrupt elif cmd.startswith("restore"): if "Password must be at least 8 characters" in output: - return InvalidPassword() + raise InvalidPassword elif "Passwords do not match, please try again" in output: - return PasswordNotMatch() + raise PasswordNotMatch elif "Mnemonic must be exactly 24 words" in output: - return MnemonicNot24Words() + raise MnemonicNot24Words + elif "Error: " in output: + raise FatalError("Failed to restore wallets") elif "The following wallets were restored" in output: - return None + return else: - return Exception("Unexpected xucli restore error: " + output.strip()) + print("^C") + raise KeyboardInterrupt elif cmd.startswith("unlock"): if "xud was unlocked successfully" in output: - return None - elif output == "Enter master xud password: ": - return KeyboardInterrupt() + return + elif "password is incorrect" in output: + raise WrongPassword + elif "Error: " in output: + raise FatalError("Failed to unlock wallets") + else: + print("^C") + raise KeyboardInterrupt() + + def _ensure_dependencies_ready(self, stop: threading.Event): + deps = [ + self.get_service("lndbtc"), + self.get_service("lndltc"), + self.get_service("connext"), + ] + + executor = self.config.executor + + futs = {executor.submit(getattr(d, "ensure_ready"), stop): d for d in deps} + + while True: + done, not_done = wait(futs, 30) + if len(not_done) == 0: + break + names = [futs[f].name for f in not_done] + names_str = ", ".join(names) + reply = yes_or_no("Keep waiting for {} to be ready?".format(names_str)) + if reply == "no": + raise NoWaiting + + def _save_seed(self, output): + s = output + p1 = re.compile(r"^.*BEGIN XUD SEED-+([^-]+)-+END XUD SEED.*$", re.MULTILINE | re.DOTALL) + m = p1.match(s) + s = m.group(1) + p2 = re.compile(r"\s*\d+\.\s*") + s = re.sub(p2, " ", s) + s = s.strip() + + seed_file = os.path.join(self.config.network_dir, "seed.txt") + with open(seed_file, "w") as f: + f.write(s) + + print(f"[DEV] XUD seed is saved in file {self.config.host_network_dir}/seed.txt") + + def _create_wallets(self) -> None: + retry = 3 + i = 0 + while i < retry: + try: + if self.config.dev_mode: + self.cli("create", exception=True, parse_output=self._save_seed) + else: + self.cli("create", exception=True) + input("YOU WILL NOT BE ABLE TO DISPLAY YOUR XUD SEED AGAIN. Press ENTER to continue...") + return + except (PasswordNotMatch, InvalidPassword): + pass + i += 1 + raise Cancelled + + def _check_restore_dir(self, value) -> List[str]: + try: + self._check_backup_dir(value) + except Exception as e: + raise Exception("Path not available ({})".format(e)) from e + + files = os.listdir("/mnt/hostfs" + value) + contents = [] + if "xud" in files: + contents.append("xud") + if "lnd-BTC" in files: + contents.append("lndbtc") + if "lnd-LTC" in files: + contents.append("lndltc") + + if len(contents) > 0: + return contents + else: + raise Exception("No backup files found") + + def _get_restore_dir(self) -> str: + restore_dir = self.config.restore_dir + + if restore_dir: + try: + self._check_backup_dir(restore_dir) + return restore_dir + except InvalidDirectory: + restore_dir = None + logger.exception("config.restore_dir is not valid") + + while True: + reply = input("Please paste the path to your XUD backup to restore your channel balance, your keys and other historical data: ") + reply = reply.strip() + path = normalize_path(reply) + print("Checking files... ", end="", flush=True) + try: + contents = self._check_restore_dir(path) + except InvalidDirectory as e: + print("{}. ".format(e), end="", flush=True) + reply = yes_or_no("Do you wish to continue WITHOUT restoring channel balance, keys and historical data?") + if reply == "yes": + restore_dir = "" + break + continue + + if len(contents) > 1: + contents_text = ", ".join(contents[:-1]) + " and " + contents[-1] + else: + contents_text = contents[0] + print(f"Looking good. This will restore {contents_text}. ", end="", flush=True) + reply = yes_or_no("Do you wish to continue?") + if reply == "no": + raise Cancelled + restore_dir = path + break + + return restore_dir + + def _restore_wallets(self) -> None: + restore_dir = self._get_restore_dir() + + retry = 3 + i = 0 + while i < retry: + try: + if restore_dir == "": + self.cli("restore", exception=True) + else: + self.cli("restore /mnt/hostfs{}".format(restore_dir), exception=True) + return + except (PasswordNotMatch, InvalidPassword, MnemonicNot24Words): + pass + i += 1 + raise Cancelled + + def _setup_wallets(self) -> None: + while True: + print("Do you want to create a new xud environment or restore an existing one?") + print("1) Create New") + print("2) Restore Existing") + reply = input("Please choose: ") + reply = reply.strip() + if reply == "1": + try: + self._create_wallets() + break + except Cancelled: + continue + elif reply == "2": + try: + self._restore_wallets() + break + except Cancelled: + continue + + @property + def backup_dir(self) -> Optional[str]: + value_file = os.path.join(self.data_dir, ".backup-dir-value") + if os.path.exists(value_file): + with open(value_file) as f: + value = f.read().strip() + value = value.replace("/mnt/hostfs", "") + return value + return None + + def update_backup_dir(self, value: str) -> None: + cmd = "/update-backup-dir.sh '/mnt/hostfs{}'".format(value) + exit_code, output = self.exec(cmd) + print(output) + if exit_code != 0: + raise Exception("Failed to update backup location") + + def _check_backup_dir(self, value: str) -> None: + value = "/mnt/hostfs" + value + + if not os.path.exists(value): + raise InvalidDirectory("not existed") + + if not os.path.isdir(value): + raise InvalidDirectory("not a directory") + + if not os.access(value, os.R_OK): + raise InvalidDirectory("not readable") + + if not os.access(value, os.W_OK): + raise InvalidDirectory("not writable") + + def _setup_backup(self) -> None: + logger.info("Setup backup") + + current_backup_dir = self.backup_dir + + if current_backup_dir: + backup_dir = self.config.backup_dir + + if backup_dir: + if current_backup_dir != backup_dir: + self.update_backup_dir(backup_dir) + else: + backup_dir = self.config.backup_dir + + if not backup_dir: + print() + print("Please enter a path to a destination where to store a backup of your environment. " + "It includes everything, but NOT your wallet balance which is secured by your XUD SEED. " + "The path should be an external drive, like a USB or network drive, which is permanently " + "available on your device since backups are written constantly.") + print() + + while True: + reply = input("Enter path to backup location: ") + reply = reply.strip() + path = normalize_path(reply) + print("Checking backup location... ", end="", flush=True) + try: + self._check_backup_dir(path) + print("OK.") + except InvalidDirectory as e: + print("Failed (%s)." % e) + continue + + self.update_backup_dir(path) + break + else: - return Exception("Unexpected xucli unlock error: " + output.strip()) + if current_backup_dir != backup_dir: + self.update_backup_dir(backup_dir) + + def has_wallets(self) -> bool: + nodekey = os.path.join(self.data_dir, "nodekey.dat") + return os.path.exists(nodekey) + + def _is_locked(self) -> bool: + try: + self.api.getinfo() + except XudApiError as e: + if "xud is locked" in str(e): + return True + return False + + def _unlock(self) -> None: + logger.info("Unlock wallets") + self.cli("unlock") + + def _ensure_lnds_synced(self, stop): + lnds = {} + lndbtc = self.get_service("lndbtc") + lndltc = self.get_service("lndltc") + if lndbtc.mode == "native" and \ + (self.network == "simnet" or self.get_service("bitcoind").mode in ["neutrino", "light"]): + lnds[lndbtc] = CFHeaderState() + if lndltc.mode == "native" and \ + (self.network == "simnet" or self.get_service("litecoind").mode in ["neutrino", "light"]): + lnds[lndltc] = CFHeaderState() + + if len(lnds) > 0: + def all_ready(): + nonlocal lnds + + return functools.reduce(lambda r, item: r and item.ready, lnds.values(), True) + + def print_syncing(stop: threading.Event): + nonlocal lnds + + print("Syncing light clients:") + + rows = {} + for lnd, state in lnds.items(): + rows[lnd.name] = state.message + + print("%s" % ServiceTable(rows)) + + n = len(rows) + + while not stop.is_set(): + i = 0 + logger.debug("lnds %r", lnds) + for lnd, state in lnds.items(): + old_msg = rows[lnd.name] + msg = state.message + if old_msg != msg: + if len(old_msg) > len(msg): + fmt = "%%%ds" % len(old_msg) + msg = fmt % msg + y = (n - i) * 2 + x = 12 + update = "\033[%dA\033[%dC%s\033[%dD\033[%dB" % (y, x, msg, x + len(msg), y) + print(update, end="", flush=True) + i += 1 + + if all_ready(): + break + + stop.wait(1) + + logger.debug("Light clients syncing ends") + + executor = self.config.executor + + f = executor.submit(print_syncing, stop) + + for lnd in lnds: + executor.submit(lnd.update_cfheader, lnds[lnd], stop) + + f.result() + logger.debug("print_syncing ends") + + def _wait_tls_cert(self, stop: threading.Event): + tls_file = os.path.join(self.data_dir, "tls.cert") + while not stop.is_set(): + if os.path.exists(tls_file): + break + stop.wait(1) + + def _wait_xud_ready(self, stop: threading.Event): + # Error: ENOENT: no such file or directory, open '/root/.xud/tls.cert' + # xud is starting... try again in a few seconds + # xud is locked, run 'xucli unlock', 'xucli create', or 'xucli restore' then try again + cmd = self._cli + " getinfo -j" + while not stop.is_set(): + try: + if not self.is_running: + raise FatalError("XUD container \"%s\" stopped unexpectedly" % self.container_name) + exit_code, output = self.exec(cmd) + if exit_code == 0: + break + if "xud is locked" in output: + break + logger.debug("[Execute] %s (exit_code=%s)\n%s", cmd, exit_code, output.rstrip()) + except docker.errors.APIError: + logger.exception("Failed to getinfo") + stop.wait(1) + + def ensure_ready(self, stop: threading.Event): + logger.info("Ensuring XUD is ready") + + if self.node_manager.newly_installed: + logger.info("Ensuring LNDs are synced (light)") + self._ensure_lnds_synced(stop) + + logger.info("Ensuring XUD dependencies are ready (lndbtc, lndltc and connext)") + self._ensure_dependencies_ready(stop) + + logger.info("Waiting for XUD to be ready") + executor = self.config.executor + + f = executor.submit(self._wait_tls_cert, stop) + while not stop.is_set(): + logger.info("Waiting for XUD tls.cert to be created") + try: + f.result(30) + break + except TimeoutError: + print("XUD should not take so long to create \"tls.cert\" file. please check container \"%s\" logs for more details." % self.container_name) + reply = yes_or_no("Would you like to keep waiting?") + if reply == "no": + raise NoWaiting + + f = executor.submit(self._wait_xud_ready, stop) + while not stop.is_set(): + logger.info("Waiting for XUD to be ready") + try: + f.result(10) + break + except TimeoutError: + print("XUD should not take so long to be ready. please check container \"%s\" logs for more details." % self.container_name) + reply = yes_or_no("Would you like to keep waiting?") + if reply == "no": + raise NoWaiting + + if not self.has_wallets(): + logger.info("Setting up XUD wallets") + self._setup_wallets() + + logger.info("Setting up XUD backup") + self._setup_backup() + + if self._is_locked(): + logger.info("Unlock XUD") + self._unlock() diff --git a/images/utils/launcher/shell/__init__.py b/images/utils/launcher/shell/__init__.py deleted file mode 100644 index 6ff69ce8a..000000000 --- a/images/utils/launcher/shell/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .shell import Shell diff --git a/images/utils/launcher/shell/command.py b/images/utils/launcher/shell/command.py deleted file mode 100644 index 1936274a5..000000000 --- a/images/utils/launcher/shell/command.py +++ /dev/null @@ -1,126 +0,0 @@ -import sys -import logging - - -def output(seq): - print(seq, end="") - sys.stdout.flush() - - -class Command: - _logger = logging.getLogger("launcher.shell.Command") - - def __init__(self): - self.value = "" - self.uncommitted_value = None - self.index = 0 - - def append_character(self, c): - if self.index < len(self.value): - head = self.value[:self.index] - tail = self.value[self.index:] - self.value = head + c + tail - self.index += 1 - output("%s%s\033[%dD" % (c, tail, len(tail))) - else: - self.value += c - self.index += 1 - output(c) - - def print(self): - if self.index < len(self.value): - output("%s\033[%dD" % (self.value, len(self.value) - self.index)) - else: - output(self.value) - - def reset(self): - self.value = "" - self.index = 0 - self.uncommitted_value = None - - def clear(self): - raise NotImplementedError() - - def __repr__(self): - return f"" - - def __str__(self): - return self.normalized_value - - @property - def normalized_value(self): - return self.value.strip() - - def is_exit(self): - cmd = self.normalized_value.lower() - return cmd == "exit" or cmd == "quit" - - def is_empty(self): - return len(self.normalized_value) == 0 - - def change(self, new_cmd): - if self.uncommitted_value is None: - self.uncommitted_value = self.value - self.move_begin() - self.value = new_cmd - self.index = len(self.value) - output("\033[K%s" % self.value) - - def restore(self): - if self.uncommitted_value is not None: - self.move_begin() - self.value = self.uncommitted_value - self.index = len(self.value) - output("\033[K%s" % self.value) - self.uncommitted_value = None - - def delete_backward(self): - if self.index > 0: - head = self.value[:self.index - 1] - tail = self.value[self.index:] - self.value = head + tail - if len(tail) > 0: - output('\b\033[K%s\033[%dD' % (tail, len(tail))) - else: - output('\b\033[K') - self.index = len(head) - - def delete_forward(self): - raise NotImplementedError() - - def delete_to_begin(self): - if self.index < len(self.value): - tail = self.value[self.index:] - output("\033[%dD\033[K%s\033[%dD" % (self.index, tail, len(tail))) - self.value = tail - self.index = 0 - else: - output("\033[%dD\033[K" % self.index) - self.value = "" - self.index = 0 - - def move_backward(self): - if self.index > 0: - self.index -= 1 - output("\033[1D") - - def move_backward_word(self): - raise NotImplementedError() - - def move_forward(self): - if self.index < len(self.value): - self.index += 1 - output("\033[1C") - - def move_forward_word(self): - raise NotImplementedError() - - def move_begin(self): - if self.index > 0: - output("\033[%dD" % self.index) - self.index = 0 - - def move_end(self): - if self.index < len(self.value): - output(f"\033[%dC" % (len(self.value) - self.index)) - self.index = len(self.value) diff --git a/images/utils/launcher/shell/history.py b/images/utils/launcher/shell/history.py deleted file mode 100644 index 459b16f76..000000000 --- a/images/utils/launcher/shell/history.py +++ /dev/null @@ -1,67 +0,0 @@ -import sys -import logging -from .command import Command - - -def output(seq): - print(seq, end="") - sys.stdout.flush() - - -class History: - _logger = logging.getLogger("launcher.shell.History") - - def __init__(self, cmd: Command, history_file): - self.cmd = cmd - self._history_file = history_file - - self.history = [] - self.index = 0 - try: - with open(self._history_file) as f: - for line in f.readlines(): - if line.endswith("\n"): - line = line[:-1] - if len(line) > 0: - self.history.append(line) - self.index = len(self.history) - except FileNotFoundError: - with open(self._history_file, 'w'): - pass - - def prev(self): - if self.index > 0: - self.index -= 1 - new_cmd = self.history[self.index] - self.cmd.change(new_cmd) - else: - self._logger.debug("No prev history") - - def next(self): - if self.index < len(self.history) - 1: - self.index += 1 - new_cmd = self.history[self.index] - self.cmd.change(new_cmd) - elif self.index == len(self.history) - 1: - self.cmd.restore() - self.index += 1 - else: - self._logger.debug("No next history") - - def commit(self, cmd: Command): - cmd_str = str(cmd) - self.history.append(cmd_str) - try: - with open(self._history_file, 'a') as f: - f.write(cmd_str + "\n") - except FileNotFoundError: - with open(self._history_file, 'w') as f: - f.write(cmd_str + "\n") - cmd.reset() - self.reset() - - def reset(self): - self.index = len(self.history) - - def __repr__(self): - return f"" diff --git a/images/utils/launcher/shell/shell.py b/images/utils/launcher/shell/shell.py deleted file mode 100644 index 612891a2a..000000000 --- a/images/utils/launcher/shell/shell.py +++ /dev/null @@ -1,530 +0,0 @@ -import sys -import os -import logging -from termios import * -import threading -from concurrent.futures import Future, CancelledError -from typing import Optional -import fcntl -import selectors -from queue import Queue - -from .command import Command -from .history import History - -from ..utils import get_hostfs_file - -def output(seq): - os.write(sys.stdout.fileno(), seq.encode()) - sys.stdout.flush() - -# Indexes for termios list. -IFLAG = 0 -OFLAG = 1 -CFLAG = 2 -LFLAG = 3 -ISPEED = 4 -OSPEED = 5 -CC = 6 - - -def _setraw(fd, when=TCSAFLUSH): - """Put terminal into a raw mode.""" - mode = tcgetattr(fd) - mode[IFLAG] = mode[IFLAG] & ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON) - # This OPOST flag is used to translate "\n" to "\r\n" - #mode[OFLAG] = mode[OFLAG] & ~(OPOST) - mode[CFLAG] = mode[CFLAG] & ~(CSIZE | PARENB) - mode[CFLAG] = mode[CFLAG] | CS8 - mode[LFLAG] = mode[LFLAG] & ~(ECHO | ICANON | IEXTEN | ISIG) - mode[CC][VMIN] = 1 - mode[CC][VTIME] = 0 - tcsetattr(fd, when, mode) - - -def _set_raw_nonblock(fd): - # tty.setraw(self.fd) - _setraw(fd) - flag = fcntl.fcntl(sys.stdin, fcntl.F_GETFL) - fcntl.fcntl(sys.stdin, fcntl.F_SETFL, flag | os.O_NONBLOCK) - - -def _reset_mode(fd, mode): - tcsetattr(fd, TCSADRAIN, mode) - - -def _remove_oflag_opost(fd, when=TCSAFLUSH): - mode = tcgetattr(fd) - mode[OFLAG] = mode[OFLAG] & ~(OPOST) - tcsetattr(fd, when, mode) - - -def _add_oflag_opost(fd, when=TCSAFLUSH): - mode = tcgetattr(fd) - mode[OFLAG] = mode[OFLAG] | OPOST - tcsetattr(fd, when, mode) - - -class EventLoop(threading.Thread): - _logger = logging.getLogger("launcher.shell.EventLoop") - - def __init__(self, queue, stop_event, fd): - super().__init__(name="EventLoop") - - self.queue: Queue = queue - self.stop_event: threading.Event = stop_event - - self.selector = selectors.DefaultSelector() - self.selector.register(sys.stdin, selectors.EVENT_READ, self.callback) - - self._lock = threading.RLock() - self.__socket = None - - self._ch_dict = { - 1: "ctrl_a", - 2: "ctrl_b", - 3: "ctrl_c", - 4: "ctrl_d", - 5: "ctrl_e", - 6: "ctrl_f", - 9: "tab", - 12: "ctrl_l", - 13: "enter", - 21: "ctrl_u", - 27: "esc" - } - self._fd = fd - - @property - def socket(self): - with self._lock: - return self.__socket - - @socket.setter - def socket(self, value): - with self._lock: - self.__socket = value - if value is None: - _add_oflag_opost(sys.stdin.fileno()) - else: - _remove_oflag_opost(sys.stdin.fileno()) - - def decode_input(self, data): - i = 0 - esc_i = -1 - while i < len(data): - b = ord(data[i]) - - if esc_i == 0: - if data[i] == '[': - esc_i = 1 - i = i + 1 - continue - else: - esc_i = -1 - self.queue.put("esc") - - if esc_i == 1: - if data[i] == 'A': - esc_i = -1 - self.queue.put("arrow_up") - i = i + 1 - continue - elif data[i] == 'B': - esc_i = -1 - self.queue.put("arrow_down") - i = i + 1 - continue - elif data[i] == 'C': - esc_i = -1 - self.queue.put("arrow_right") - i = i + 1 - continue - elif data[i] == 'D': - esc_i = -1 - self.queue.put("arrow_left") - i = i + 1 - continue - else: - esc_i = -1 - self.queue.put("esc") - self.queue.put("[") - - ch = None - - if b < 0: - raise Exception(f"Illegal byte: {b!r}") - elif b < 32: - # control characters: - if b == 27: - if i + 1 < len(data) and data[i + 1] == '[': - # decode ANSI escape sequences - esc_i = 0 - i = i + 1 - continue - ch = self._ch_dict.get(b, None) - elif b < 127: - # printable characters: - ch = data[i] - elif b == 127: - # del - ch = 'del' - else: - # >= 128 characters UTF-8 Unicode characters - pass - - if ch: - self.queue.put(ch) - - i = i + 1 - - def callback(self, stdin, mask): - with self._lock: - data = stdin.read() - if self.socket: - try: - self.socket.send(data.encode()) - return - except: - self._logger.exception("Failed to send data to socket") - self.socket = None - - self.decode_input(data) - - def _loop(self): - try: - while not self.stop_event.is_set(): - events = self.selector.select(timeout=1) - for key, mask in events: - callback = key.data - callback(key.fileobj, mask) - except: - self._logger.exception("The loop exits unexpectedly") - - def run(self) -> None: - self._logger.debug("Begin") - - mode = tcgetattr(self._fd) - _set_raw_nonblock(self._fd) - - try: - while not self.stop_event.is_set(): - events = self.selector.select(timeout=1) - for key, mask in events: - callback = key.data - callback(key.fileobj, mask) - except: - self._logger.exception("The loop exits unexpectedly") - finally: - _reset_mode(self._fd, mode) - - self._logger.debug("End") - - def interrupt(self): - try: - self.queue.put("eof") # enqueue EOF (Ctrl + D) - self.selector.close() - except: - self._logger.exception("Failed to interrupt") - - -class InputHandler(threading.Thread): - _logger = logging.getLogger("launcher.shell.InputHandler") - - def __init__(self, queue, stop_event): - super().__init__(name="InputHandler") - - self.queue: Queue = queue - self.stop_event: threading.Event = stop_event - - self._lock = threading.RLock() - - self._cmd: Command = Command() - self._history: Optional[History] = None - - self.__prompt = "" - self.__command_handler = None - self.__accept_input = False - self.__enable_history = True - self.__answer: Optional[Future] = None - - @property - def prompt(self): - with self._lock: - return self.__prompt - - @prompt.setter - def prompt(self, value): - with self._lock: - self.__prompt = value - - @property - def command_handler(self): - with self._lock: - return self.__command_handler - - @command_handler.setter - def command_handler(self, value): - with self._lock: - self.__command_handler = value - - @property - def accept_input(self): - with self._lock: - return self.__accept_input - - @accept_input.setter - def accept_input(self, value): - with self._lock: - self.__accept_input = value - - @property - def enable_history(self): - with self._lock: - return self.__enable_history - - @enable_history.setter - def enable_history(self, value): - with self._lock: - self.__enable_history = value - - @property - def answer(self): - with self._lock: - return self.__answer - - @answer.setter - def answer(self, value): - with self._lock: - self.__answer = value - - def set_network_dir(self, network_dir): - with self._lock: - self._history = History(self._cmd, get_hostfs_file(f"{network_dir}/history")) - - def _history_reset(self): - if self._history is None or not self.enable_history: - return - self._history.reset() - - def _history_commit(self, cmd): - if self._history is None or not self.enable_history: - return - self._history.commit(cmd) - - def _history_prev(self): - if self._history is None or not self.enable_history: - return - self._history.prev() - - def _history_next(self): - if self._history is None or not self.enable_history: - return - self._history.next() - - def _handle_command(self, cmd: str): - try: - if self.command_handler is not None: - #self.accept_input = False - # TODO disable input - self.command_handler(cmd) # sync - # TODO enable input - #self.accept_input = True - except: - self._logger.exception("Failed to execute command: %r", cmd) - - def _handle_input(self, ch): - with self._lock: - cmd = self._cmd - - if ch == "eof": - return False - - if not self.accept_input: - if ch == "ctrl_c": - return False - return True - - if ch == "arrow_up": - self._history_prev() - elif ch == "arrow_down": - self._history_next() - elif ch == "arrow_right": - cmd.move_forward() - elif ch == "arrow_left": - cmd.move_backward() - elif ch == "ctrl_a": - cmd.move_begin() - elif ch == "ctrl_b": # Ctrl + B - cmd.move_backward() - elif ch == "ctrl_c": # Ctrl + C - cmd.reset() - output("^C") - if self.answer is not None: - return False - else: - output("\n") - output(self.prompt) - self._history_reset() - elif ch == "ctrl_e": - cmd.move_end() - elif ch == "ctrl_f": - cmd.move_forward() - elif ch == "tab": - # TODO tab completion - pass - elif ch == "ctrl_l": - output(f'\033[2J\033[1;1H') - output(self.prompt) - cmd.print() - elif ch == "enter": - output("\n") - if self.answer is not None: - cmd_str = str(cmd) - self.answer.set_result(cmd_str) - self.accept_input = False - cmd.reset() - return True - else: - if not cmd.is_empty(): - if cmd.is_exit(): - return False - elif str(cmd) == "down": - self._handle_command(str(cmd)) - return False - else: - self._handle_command(str(cmd)) - self._history_commit(cmd) # will reset history too - else: - self._history_reset() - output(self.prompt) - cmd.reset() - elif ch == "ctrl_u": - cmd.delete_to_begin() - elif ch == "del": # DEL - cmd.delete_backward() - elif len(ch) == 1: - cmd.append_character(ch) - self._history_reset() - else: - self._logger.warning(f"Discard {ch=}") - - return True - - def run(self) -> None: - self._logger.debug("Begin") - - try: - while True: - ch = self.queue.get() - if not self._handle_input(ch): - break - except: - self._logger.exception("The loop exits unexpectedly") - - self._logger.debug("End") - self.stop_event.set() - if self.answer is not None and not self.answer.cancelled(): - self.answer.cancel() - - -class Shell: - def __init__(self): - self._logger = logging.getLogger("launcher.shell.Shell") - - with open(os.path.dirname(__file__) + '/banner.txt') as f: - self._banner = "".join(f.readlines()) - - self.fd_in = sys.stdin.fileno() - self.fd_out = sys.stdout.fileno() - self.fd_err = sys.stderr.fileno() - - queue = Queue() - stop_event = threading.Event() - - self.stop_event = stop_event - self.loop = EventLoop(queue, stop_event, self.fd_in) - self.handler = InputHandler(queue, stop_event) - - self.loop.start() - self.handler.start() - - def print_banner(self): - self.print(self._banner) - - def start(self, prompt, command_handler): - self.print_banner() - - self.handler.prompt = prompt - self.handler.command_handler = command_handler - output(prompt) - self.handler.accept_input = True - self.handler.enable_history = True - - self.stop_event.wait() - - def input(self, prompt: str) -> str: - assert self.handler.answer is None - - old_prompt = self.handler.prompt - old_accept_input = self.handler.accept_input - old_enable_history = self.handler.enable_history - - self.handler.prompt = prompt - self.handler.accept_input = True - self.handler.enable_history = False - self.handler.answer = Future() - - output(prompt) - - try: - result = self.handler.answer.result() - except CancelledError: - raise KeyboardInterrupt() - finally: - self.handler.prompt = old_prompt - self.handler.accept_input = old_accept_input - self.handler.enable_history = old_enable_history - self.handler.answer = None - - return result - - def yes_or_no(self, prompt: str) -> str: - while True: - answer = self.input(prompt + " [Y/n] ").lower() - if answer == "y" or answer == "yes" or len(answer) == 0: - return "yes" - elif answer == "n" or answer == "no": - return "no" - - def no_or_yes(self, prompt: str) -> str: - while True: - answer = self.input(prompt + " [y/N] ").lower() - if answer == "n" or answer == "no" or len(answer) == 0: - return "no" - if answer == "y" or answer == "yes": - return "yes" - - def confirm(self, prompt: str) -> bool: - answer = self.input(prompt) - return len(answer) == 0 - - def redirect_stdin(self, socket): - self.loop.socket = socket - - def stop_redirect_stdin(self): - self.loop.socket = None - - def stop(self): - self._logger.debug("stop") - self.loop.interrupt() - - def set_network_dir(self, network_dir): - self.handler.set_network_dir(network_dir) - - def print(self, text): - print(text, end="") - sys.stdout.flush() - - def println(self, line): - print(line) diff --git a/images/utils/launcher/utils.py b/images/utils/launcher/utils.py index 2650a8a29..fb5785a91 100644 --- a/images/utils/launcher/utils.py +++ b/images/utils/launcher/utils.py @@ -1,57 +1,14 @@ +import argparse import logging import os -from concurrent.futures import ThreadPoolExecutor, wait -import argparse - -logger = logging.getLogger("launcher.utils") - - -class ParallelExecutionError(Exception): - def __init__(self, failed): - super() - self.failed = failed - - -def parallel_execute(tasks, execute, timeout, print_failed, try_again, handle_result=None, single_thread=False): - while len(tasks) > 0: - failed = [] - if single_thread: - workers = 1 - else: - workers = len(tasks) - with ThreadPoolExecutor(max_workers=workers, thread_name_prefix="P") as executor: - fs = {executor.submit(execute, t): t for t in tasks} - done, not_done = wait(fs, timeout) - for f in done: - task = fs[f] - try: - result = f.result() - if handle_result: - handle_result(task, result) - except Exception as e: - failed.append((task, e)) - for f in not_done: - task = fs[f] - f.cancel() - failed.append((task, TimeoutError("timeout"))) - if len(failed) > 0: - print_failed(failed) - if try_again(): - tasks = [f[0] for f in failed] - else: - raise ParallelExecutionError(failed) - else: - tasks = [] +import random +import threading +import time +from concurrent.futures import ThreadPoolExecutor +from typing import List, Callable, TypeVar +from launcher.errors import ParallelError - -def get_useful_error_message(error): - msg = str(error).strip() - if len(msg) == 0: - if isinstance(error, TimeoutError): - return "timeout" - else: - return "%s" % type(error) - return msg +logger = logging.getLogger(__name__) def normalize_path(path: str) -> str: @@ -99,3 +56,118 @@ class ArgumentParser(argparse.ArgumentParser): def error(self, message): raise ArgumentError(message, self.format_usage()) + + +def yes_or_no(prompt, default="yes"): + assert default in ["yes", "no"] + while True: + if default == "yes": + reply = input(prompt + " [Y/n] ") + else: + reply = input(prompt + " [y/N] ") + reply = reply.strip().lower() + if reply == "": + return default + if reply in ["y", "yes"]: + return "yes" + if reply in ["n", "no"]: + return "no" + + +def get_percentage(current, total): + if total == 0: + return "0.00%% (%d/%d)" % (current, total) + if current >= total: + return "100.00%% (%d/%d)" % (current, total) + p = current / total * 100 + if p > 0.005: + p = p - 0.005 + else: + p = 0 + return "%.2f%% (%d/%d)" % (p, current, total) + + +def color(text: str) -> str: + if text == "done": + return "\033[32mdone\033[0m" + elif text == "error": + return "\033[31merror\033[0m" + else: + return text + + +T = TypeVar('T') + + +def parallel( + executor: ThreadPoolExecutor, + items: List[T], + linehead: Callable[[T], str], + run: Callable[[T, threading.Event], None] +): + result = {item: None for item in items} + stop = threading.Event() + + def animate(): + nonlocal result + nonlocal stop + + lines = [] + width = 0 + for item in items: + line = linehead(item) + line = line.capitalize() + if len(line) > width: + width = len(line) + lines.append(line) + fmt = "%-{}s ...".format(width) + lines = [fmt % line for line in lines] + print("\n".join(lines)) + + i = 0 + error = False + while not stop.is_set(): + print("\033[%dA" % len(items), end="", flush=True) + finish = 0 + for item in items: + r = result[item] + if r: + if r == "error": + error = True + suffix = "... " + color(r) + suffix_len = 4 + len(r) + finish += 1 + else: + suffix = "%-3s" % ("." * abs(3 - i % 6)) + suffix_len = 3 + print("\033[%dC" % (width + 1), end="", flush=True) + print(suffix, end="", flush=True) + print("\033[%dD\033[1B" % (width + 1 + suffix_len), end="", flush=True) + print("\033[K", end="", flush=True) + if finish == len(items): + break + stop.wait(0.5) + i += 1 + + if error: + # TODO create ParallelError with all task errors + raise ParallelError + + def wrapper(item): + nonlocal result + nonlocal stop + try: + run(item, stop) + # time.sleep(random.randint(3, 10)) + result[item] = "done" + except Exception as e: + logger.exception("[Parallel] %s: %s", linehead(item), str(e)) + result[item] = "error" + + f = executor.submit(animate) + try: + for item in items: + executor.submit(wrapper, item) + f.result() + finally: + stop.set() diff --git a/images/utils/launcher/warm_up.py b/images/utils/launcher/warm_up.py deleted file mode 100644 index 87b6eeb4f..000000000 --- a/images/utils/launcher/warm_up.py +++ /dev/null @@ -1,12 +0,0 @@ -import logging - -from .node import NodeManager - - -class Action: - def __init__(self, node_manager: NodeManager): - self.logger = logging.getLogger("launcher.WarmUpAction") - self.node_manager = node_manager - - def execute(self): - print("\nšŸƒ Warming up...\n") diff --git a/images/utils/requirements.txt b/images/utils/requirements.txt index 7b61e3bb4..33d3865fd 100644 --- a/images/utils/requirements.txt +++ b/images/utils/requirements.txt @@ -1,3 +1,4 @@ toml docker -demjson \ No newline at end of file +demjson +dockerpty diff --git a/images/utils/setup.py b/images/utils/setup.py index 330052b23..c727d7132 100644 --- a/images/utils/setup.py +++ b/images/utils/setup.py @@ -4,10 +4,10 @@ name="launcher", version="1.0.0", packages=find_packages(), - install_requires=["toml", "docker", "demjson"], + install_requires=["toml", "docker", "demjson", "dockerpty"], include_package_data=True, package_data={ "launcher.config": ["*.conf", "nodes.json"], - "launcher.shell": ["banner.txt"], + "launcher": ["banner.txt"], } ) diff --git a/images/xud/entrypoint.sh b/images/xud/entrypoint.sh index 99859f4c2..02fceb892 100755 --- a/images/xud/entrypoint.sh +++ b/images/xud/entrypoint.sh @@ -66,16 +66,6 @@ CONNEXT_IP=$(getent hosts connext || echo '' | awk '{ print $1 }') echo "$CONNEXT_IP connext" >> /etc/hosts -while [[ $LNDBTC_MODE == "native" && ! -e /root/.lndbtc/tls.cert ]]; do - echo "[entrypoint] Waiting for /root/.lndbtc/tls.cert to be created..." - sleep 1 -done - -while [[ $LNDLTC_MODE == "native" && ! -e /root/.lndltc/tls.cert ]]; do - echo "[entrypoint] Waiting for /root/.lndltc/tls.cert to be created..." - sleep 1 -done - if [[ -z ${LNDBTC_RPC_HOST:-} ]]; then LNDBTC_RPC_HOST="lndbtc" fi diff --git a/setup.sh b/setup.sh index 6478da3f3..97fd0bc5d 100644 --- a/setup.sh +++ b/setup.sh @@ -6,7 +6,37 @@ BRANCH=master DEV=false DOCKER_REGISTRY="https://registry-1.docker.io" UTILS_TAG="20.10.16" +NETWORK="" +HOME_DIR="$HOME/.xud-docker" +SIMNET_DIR="$HOME_DIR/simnet" +TESTNET_DIR="$HOME_DIR/testnet" +MAINNET_DIR="$HOME_DIR/mainnet" +XUD_DOCKER_CONF="$HOME_DIR/xud-docker.conf" + +if [[ -e $XUD_DOCKER_CONF ]]; then + function get_value() { + sed -nE "s/^\s*$1 = \"(.+)\"$/\1/p" "$XUD_DOCKER_CONF" + } + # shellcheck disable=2034 + home_dir=$HOME_DIR + + if VALUE=$(get_value "simnet-dir"); then + SIMNET_DIR=$(eval echo "$VALUE") + fi + if VALUE=$(get_value "testnet-dir"); then + SIMNET_DIR=$(eval echo "$VALUE") + fi + if VALUE=$(get_value "mainnet-dir"); then + SIMNET_DIR=$(eval echo "$VALUE") + fi + unset home_dir + unset VALUE + unset get_value +fi +############################################################################### +# Functions +############################################################################### function print_help() { cat <&2 "āŒ Missing option value: $OPTION" + echo >&2 "Command-line option \"$OPTION\" needs value" exit 1 fi VALUE=$1 @@ -137,6 +167,62 @@ function parse_opts() { DEV=true shift ;; + "-n" | "--network" ) + if [[ $1 =~ = ]]; then + VALUE=$(echo "$1" | cut -d'=' -f2) + else + OPTION=$1 + shift + if [[ $# -eq 0 || $1 =~ ^- ]]; then + echo >&2 "Command-line option \"$OPTION\" needs value" + exit 1 + fi + VALUE=$1 + fi + NETWORK=$VALUE + ;; + "--simnet-dir") + if [[ $1 =~ = ]]; then + VALUE=$(echo "$1" | cut -d'=' -f2) + else + OPTION=$1 + shift + if [[ $# -eq 0 || $1 =~ ^- ]]; then + echo >&2 "Command-line option \"$OPTION\" needs value" + exit 1 + fi + VALUE=$1 + fi + SIMNET_DIR=$VALUE + ;; + "--testnet-dir") + if [[ $1 =~ = ]]; then + VALUE=$(echo "$1" | cut -d'=' -f2) + else + OPTION=$1 + shift + if [[ $# -eq 0 || $1 =~ ^- ]]; then + echo >&2 "Command-line option \"$OPTION\" needs value" + exit 1 + fi + VALUE=$1 + fi + TESTNET_DIR=$VALUE + ;; + "--mainnet-dir") + if [[ $1 =~ = ]]; then + VALUE=$(echo "$1" | cut -d'=' -f2) + else + OPTION=$1 + shift + if [[ $# -eq 0 || $1 =~ ^- ]]; then + echo >&2 "Command-line option \"$OPTION\" needs value" + exit 1 + fi + VALUE=$1 + fi + MAINNET_DIR=$VALUE + ;; *) shift esac @@ -366,15 +452,30 @@ function get_utils_name() { echo "${NETWORK}_utils_${N}" } -################################################################################ + +############################################################################### # MAIN -################################################################################ +############################################################################### LOG_TIMESTAMP="$(date +%F-%H-%M-%S)" parse_opts "$@" -choose_network +if [[ -z $NETWORK ]]; then + choose_network +fi + +case $NETWORK in + simnet) + NETWORK_DIR=$SIMNET_DIR + ;; + testnet) + NETWORK_DIR=$TESTNET_DIR + ;; + mainnet) + NETWORK_DIR=$MAINNET_DIR + ;; +esac ensure_utils_image @@ -383,11 +484,13 @@ echo "šŸš€ Launching $NETWORK environment" docker run --rm -it \ --name "$(get_utils_name)" \ -v /var/run/docker.sock:/var/run/docker.sock \ +-v "$NETWORK_DIR:/root/$NETWORK" \ -v /:/mnt/hostfs \ +-e LOG_TIMESTAMP="$LOG_TIMESTAMP" \ +-e NETWORK="$NETWORK" \ +-e NETWORK_DIR="$NETWORK_DIR" \ -e HOST_PWD="$PWD" \ -e HOST_HOME="$HOME" \ --e NETWORK="$NETWORK" \ --e LOG_TIMESTAMP="$LOG_TIMESTAMP" \ --entrypoint python \ "$UTILS_IMG" \ -m launcher "$@" diff --git a/tools/core/docker.py b/tools/core/docker.py index cf63523fc..0dd60bd14 100644 --- a/tools/core/docker.py +++ b/tools/core/docker.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Dict, Optional, List, Literal, Union +from typing import TYPE_CHECKING, Dict, Optional, List, Union import json from urllib.request import urlopen, Request @@ -17,7 +17,7 @@ from http.client import HTTPResponse -SupportedPlatform = Literal["linux/arm64", "linux/amd64", "linux/386", "linux/ppc64le", "linux/s390s", "linux/arm/v7", "linux/arm/v6"] +# SupportedPlatform = Literal["linux/arm64", "linux/amd64", "linux/386", "linux/ppc64le", "linux/s390s", "linux/arm/v7", "linux/arm/v6"] class Platform: diff --git a/tools/core/image.py b/tools/core/image.py index 23d9095e5..e93266343 100644 --- a/tools/core/image.py +++ b/tools/core/image.py @@ -138,6 +138,16 @@ def repo(self) -> Optional[str]: return repo def _run_command(self, cmd): + on_travis = "TRAVIS_BRANCH" in os.environ + if on_travis: + self._run_command_on_travis(cmd) + return + print("$ %s" % cmd) + exit_code = os.system(cmd) + if exit_code != 0: + raise RuntimeError("Failed to build (exit_code=%s)" % exit_code) + + def _run_command_on_travis(self, cmd): self._logger.info(cmd) stop = threading.Event() @@ -145,19 +155,12 @@ def _run_command(self, cmd): def f(): nonlocal stop counter = 0 - on_travis = "TRAVIS_BRANCH" in os.environ + while not stop.is_set(): counter = counter + 1 + print("Still building... ({})".format(counter), flush=True) + stop.wait(10) - if on_travis: - print("Still building... ({})".format(counter), flush=True) - stop.wait(10) - continue - - print(".", end="", flush=True) - stop.wait(1) - if not on_travis: - print() threading.Thread(target=f).start() try: output = execute(cmd)