From b9ffee86284e0757b84a581a3791e987104be98b Mon Sep 17 00:00:00 2001 From: latentvector Date: Mon, 2 Sep 2024 21:31:15 -0400 Subject: [PATCH] refsctoring core to less files --- commune/{module => }/README.md | 0 commune/__init__.py | 18 +- commune/app/login/login.py | 22 - commune/cli.py | 15 +- commune/client/client.py | 156 +- commune/client/tools.py | 132 - commune/client/virtual.py | 29 - commune/client/ws.py | 0 commune/executor/executor.py | 568 +- commune/executor/process.py | 902 --- commune/executor/task.py | 123 - commune/executor/thread.py | 254 - commune/key/key.py | 14 +- commune/key/test.py | 2 +- commune/module.py | 6135 +++++++++++++++++ commune/module/__init__.py | 1 - commune/module/_config.py | 162 - commune/module/_logger.py | 89 - commune/module/_manager.py | 680 -- commune/module/_misc.py | 1102 --- commune/module/_network.py | 581 -- commune/module/_os.py | 505 -- commune/module/_schema.py | 1020 --- commune/module/_storage.py | 680 -- commune/module/module.py | 1043 --- commune/module/module.yaml | 218 - commune/modules/agent/data/agent_data.py | 2 +- .../modules/agent/factory/agent_factory.py | 2 +- commune/modules/agent/maker/agent_maker.py | 2 +- commune/modules/base/base.py | 2 +- .../modules/data/text/code/data_text_code.py | 2 +- .../data/text/folder/data_text_folder.py | 2 +- .../folder/docs/data_text_realfake_docs.md | 2 +- .../modules/data/text/math/data_text_math.py | 2 +- .../data/text/realfake/data_text_realfake.py | 2 +- .../realfake/docs/data_text_realfake_docs.md | 2 +- commune/modules/evm/network.py | 2 +- commune/modules/model/hf/model_hf.py | 2 +- commune/modules/model/openai/openai.py | 2 +- commune/modules/repo/repo.py | 2 +- commune/modules/sandbox.py | 3 +- commune/modules/tool/search/tool_search.py | 2 +- commune/routes.yaml | 182 + commune/server/manager.py | 329 - commune/server/namespace.py | 5 +- commune/server/server.py | 1 - commune/shortcuts.yaml | 17 + commune/subspace/client.py | 3025 -------- commune/subspace/subspace.py | 17 +- commune/subspace/utils.py | 121 - commune/subspace/wallet.py | 8 - commune/utils/metric.py | 120 - commune/utils/misc.py | 997 --- commune/utils/schema.py | 17 + docs/3_cli_basics.md | 2 +- 55 files changed, 6870 insertions(+), 12455 deletions(-) rename commune/{module => }/README.md (100%) delete mode 100644 commune/app/login/login.py delete mode 100644 commune/client/tools.py delete mode 100644 commune/client/virtual.py delete mode 100644 commune/client/ws.py delete mode 100644 commune/executor/process.py delete mode 100644 commune/executor/task.py delete mode 100644 commune/executor/thread.py create mode 100755 commune/module.py delete mode 100644 commune/module/__init__.py delete mode 100644 commune/module/_config.py delete mode 100644 commune/module/_logger.py delete mode 100644 commune/module/_manager.py delete mode 100644 commune/module/_misc.py delete mode 100644 commune/module/_network.py delete mode 100644 commune/module/_os.py delete mode 100644 commune/module/_schema.py delete mode 100644 commune/module/_storage.py delete mode 100755 commune/module/module.py delete mode 100644 commune/module/module.yaml create mode 100644 commune/routes.yaml delete mode 100644 commune/server/manager.py create mode 100644 commune/shortcuts.yaml delete mode 100644 commune/subspace/client.py delete mode 100644 commune/subspace/utils.py delete mode 100644 commune/subspace/wallet.py delete mode 100644 commune/utils/metric.py delete mode 100644 commune/utils/misc.py create mode 100644 commune/utils/schema.py diff --git a/commune/module/README.md b/commune/README.md similarity index 100% rename from commune/module/README.md rename to commune/README.md diff --git a/commune/__init__.py b/commune/__init__.py index 24e9d9d5..5f3ebad1 100755 --- a/commune/__init__.py +++ b/commune/__init__.py @@ -1,27 +1,13 @@ -from .module.module import Module +from .module import Module from functools import partial # set the module functions as globals -for k,v in Module.__dict__.items(): - globals()[k] = v # for f in : # globals()[f] = getattr(Module, f) -for f in Module.class_functions() + Module.static_functions(): - globals()[f] = getattr(Module, f) - -for f in Module.self_functions(): - def wrapper_fn(f, *args, **kwargs): - try: - fn = getattr(Module(), f) - except: - fn = getattr(Module, f) - return fn(*args, **kwargs) - globals()[f] = partial(wrapper_fn, f) - - c = Block = Lego = M = Module # alias c.Module as c.Block, c.Lego, c.M +c.add_to_globals(globals()) \ No newline at end of file diff --git a/commune/app/login/login.py b/commune/app/login/login.py deleted file mode 100644 index 6907a702..00000000 --- a/commune/app/login/login.py +++ /dev/null @@ -1,22 +0,0 @@ -import commune as c -import streamlit as st - -class Login(c.Module): - def __init__(self): - self.set_config(locals()) - - def passwords(self): - return self.get('allowed_password', []) - - def add_password(self, password): - passwords = self.passwords() - passwords.append(str(password)) - self.put('allowed_password', passwords) - - def app(self, x:int = 1, y:int = 2) -> int: - password = st.text_input('Password', '123456', type='password') - self.key = c.module('key').from_password(c.hash(password)) - st.write(self.key.ss58_address) - - -Login.run(__name__) \ No newline at end of file diff --git a/commune/cli.py b/commune/cli.py index 4c3e7771..5eb698bf 100644 --- a/commune/cli.py +++ b/commune/cli.py @@ -32,7 +32,6 @@ def __init__(self, self.forget_fns = forget_fns self.base_module = c.module(module) if isinstance(module, str) else module self.base_module_attributes = list(set(self.base_module.functions() + self.base_module.attributes())) - self.forward(args) def forward(self, argv=None): @@ -47,6 +46,18 @@ def forward(self, argv=None): for arg in c.copy(argv): if arg.startswith('--'): key = arg[2:].split('=')[0] + # if key == 'cwd': + # new_argv = [] + # for item in c.copy(argv): + # if '--cwd' in item: + # continue + # new_argv.append(item) + # new_cmd = 'c ' + ' '.join(c.copy(new_argv)) + + # cwd = c.resolve_path(arg.split('=')[1]) + # v = c.cmd(f'{new_cmd}', cwd=cwd) + # c.print(v) + # return new_cmd if key in self.helper_fns: new_argvs = self.argv() new_argvs.remove(arg) @@ -214,6 +225,6 @@ def determine_type(cls, x): def argv(self): return sys.argv[1:] - + def main(): cli() \ No newline at end of file diff --git a/commune/client/client.py b/commune/client/client.py index d60ecdfc..41c8641d 100644 --- a/commune/client/client.py +++ b/commune/client/client.py @@ -5,12 +5,38 @@ import commune as c import json import requests -from .tools import ClientTools -from .virtual import ClientVirtual # from .pool import ClientPool -class Client(c.Module, ClientTools): + +class ClientVirtual: + protected_attributes = [ 'client', 'remote_call'] + + def __init__(self, client: str ='ReactAgentModule'): + if isinstance(client, str): + client = c.connect(client) + self.client = client + + def remote_call(self, remote_fn, *args, return_future= False, timeout:int=10, key=None, **kwargs): + result = self.client.forward(fn=remote_fn, args=args, kwargs=kwargs, timeout=timeout, key=key, return_future=return_future) + return result + + def __str__(self): + return str(self.client) + + def __repr__(self): + return self.__str__() + + def __getattr__(self, key): + + if key in self.protected_attributes : + return getattr(self, key) + else: + return lambda *args, **kwargs : self.remote_call( remote_fn=key, *args, **kwargs) + + + +class Client(c.Module): network2namespace = {} def __init__( self, @@ -236,4 +262,126 @@ def process_stream_line(self, line , stream_prefix=None): if isinstance(event_data, str): if event_data.startswith('{') and event_data.endswith('}') and 'data' in event_data: event_data = json.loads(event_data)['data'] - return event_data \ No newline at end of file + return event_data + + + + @classmethod + def call_search(cls, + search : str, + *args, + timeout : int = 10, + network:str = 'local', + key:str = None, + kwargs = None, + **extra_kwargs) -> None: + if '/' in search: + search, fn = search.split('/') + namespace = c.namespace(search=search, network=network) + future2module = {} + for module, address in namespace.items(): + c.print(f"Calling {module}/{fn}", color='green') + future = c.submit(cls.call, + args = [module, fn] + list(args), + kwargs = {'timeout': timeout, + 'network': network, 'key': key, + 'kwargs': kwargs, + **extra_kwargs} , timeout=timeout) + future2module[future] = module + futures = list(future2module.keys()) + result = {} + progress_bar = c.tqdm(len(futures)) + for future in c.as_completed(futures, timeout=timeout): + module = future2module.pop(future) + futures.remove(future) + progress_bar.update(1) + result[module] = future.result() + + return result + + + @classmethod + def call_pool(cls, + modules, + fn = 'info', + *args, + network = 'local', + timeout = 10, + n=None, + **kwargs): + + args = args or [] + kwargs = kwargs or {} + + if isinstance(modules, str) or modules == None: + modules = c.servers(modules, network=network) + if n == None: + n = len(modules) + modules = cls.shuffle(modules)[:n] + assert isinstance(modules, list), 'modules must be a list' + futures = [] + for m in modules: + job_kwargs = {'module': m, 'fn': fn, 'network': network, **kwargs} + future = c.submit(c.call, kwargs=job_kwargs, args=[*args] , timeout=timeout) + futures.append(future) + responses = c.wait(futures, timeout=timeout) + return responses + + + @classmethod + def connect_pool(cls, modules=None, *args, return_dict:bool=False, **kwargs): + if modules == None: + modules = c.servers(modules) + + module_clients = cls.gather([cls.async_connect(m, ignore_error=True,**kwargs) for m in modules]) + if return_dict: + return dict(zip(modules, module_clients)) + return module_clients + + + @staticmethod + def check_response(x) -> bool: + if isinstance(x, dict) and 'error' in x: + return False + else: + return True + + + def get_curl(self, + fn='info', + params=None, + args=None, + kwargs=None, + timeout=10, + module=None, + key=None, + headers={'Content-Type': 'application/json'}, + network=None, + version=1, + mode='http', + **extra_kwargs): + key = self.resolve_key(key) + network = network or self.network + url = self.get_url(fn=fn, mode=mode, network=network) + kwargs = {**(kwargs or {}), **extra_kwargs} + input_data = self.get_params(args=args, kwargs=kwargs, params=params, version=version) + + # Convert the headers to curl format + headers_str = ' '.join([f'-H "{k}: {v}"' for k, v in headers.items()]) + + # Convert the input data to JSON string + data_str = json.dumps(input_data).replace('"', '\\"') + + # Construct the curl command + curl_command = f'curl -X POST {headers_str} -d "{data_str}" "{url}"' + + return curl_command + + + def run_curl(self, *args, **kwargs): + curl_command = self.get_curl(*args, **kwargs) + # get the output of the curl command + import subprocess + output = subprocess.check_output(curl_command, shell=True) + return output.decode('utf-8') + diff --git a/commune/client/tools.py b/commune/client/tools.py deleted file mode 100644 index bc25664d..00000000 --- a/commune/client/tools.py +++ /dev/null @@ -1,132 +0,0 @@ - - -import commune as c -import json - -import commune as c -from functools import partial -import asyncio - - - -class ClientTools: - - @classmethod - def call_search(cls, - search : str, - *args, - timeout : int = 10, - network:str = 'local', - key:str = None, - kwargs = None, - **extra_kwargs) -> None: - if '/' in search: - search, fn = search.split('/') - namespace = c.namespace(search=search, network=network) - future2module = {} - for module, address in namespace.items(): - c.print(f"Calling {module}/{fn}", color='green') - future = c.submit(cls.call, - args = [module, fn] + list(args), - kwargs = {'timeout': timeout, - 'network': network, 'key': key, - 'kwargs': kwargs, - **extra_kwargs} , timeout=timeout) - future2module[future] = module - futures = list(future2module.keys()) - result = {} - progress_bar = c.tqdm(len(futures)) - for future in c.as_completed(futures, timeout=timeout): - module = future2module.pop(future) - futures.remove(future) - progress_bar.update(1) - result[module] = future.result() - - return result - - - @classmethod - def call_pool(cls, - modules, - fn = 'info', - *args, - network = 'local', - timeout = 10, - n=None, - **kwargs): - - args = args or [] - kwargs = kwargs or {} - - if isinstance(modules, str) or modules == None: - modules = c.servers(modules, network=network) - if n == None: - n = len(modules) - modules = cls.shuffle(modules)[:n] - assert isinstance(modules, list), 'modules must be a list' - futures = [] - for m in modules: - job_kwargs = {'module': m, 'fn': fn, 'network': network, **kwargs} - future = c.submit(c.call, kwargs=job_kwargs, args=[*args] , timeout=timeout) - futures.append(future) - responses = c.wait(futures, timeout=timeout) - return responses - - - @classmethod - def connect_pool(cls, modules=None, *args, return_dict:bool=False, **kwargs): - if modules == None: - modules = c.servers(modules) - - module_clients = cls.gather([cls.async_connect(m, ignore_error=True,**kwargs) for m in modules]) - if return_dict: - return dict(zip(modules, module_clients)) - return module_clients - - - @staticmethod - def check_response(x) -> bool: - if isinstance(x, dict) and 'error' in x: - return False - else: - return True - - - def get_curl(self, - fn='info', - params=None, - args=None, - kwargs=None, - timeout=10, - module=None, - key=None, - headers={'Content-Type': 'application/json'}, - network=None, - version=1, - mode='http', - **extra_kwargs): - key = self.resolve_key(key) - network = network or self.network - url = self.get_url(fn=fn, mode=mode, network=network) - kwargs = {**(kwargs or {}), **extra_kwargs} - input_data = self.get_params(args=args, kwargs=kwargs, params=params, version=version) - - # Convert the headers to curl format - headers_str = ' '.join([f'-H "{k}: {v}"' for k, v in headers.items()]) - - # Convert the input data to JSON string - data_str = json.dumps(input_data).replace('"', '\\"') - - # Construct the curl command - curl_command = f'curl -X POST {headers_str} -d "{data_str}" "{url}"' - - return curl_command - - - def run_curl(self, *args, **kwargs): - curl_command = self.get_curl(*args, **kwargs) - # get the output of the curl command - import subprocess - output = subprocess.check_output(curl_command, shell=True) - return output.decode('utf-8') - diff --git a/commune/client/virtual.py b/commune/client/virtual.py deleted file mode 100644 index 5c034196..00000000 --- a/commune/client/virtual.py +++ /dev/null @@ -1,29 +0,0 @@ -import commune as c -from functools import partial -import asyncio - - -class ClientVirtual: - protected_attributes = [ 'client', 'remote_call'] - def __init__(self, client: str ='ReactAgentModule'): - if isinstance(client, str): - client = c.connect(client) - self.client = client - - def remote_call(self, remote_fn, *args, return_future= False, timeout:int=10, key=None, **kwargs): - result = self.client.forward(fn=remote_fn, args=args, kwargs=kwargs, timeout=timeout, key=key, return_future=return_future) - return result - - def __str__(self): - return str(self.client) - - def __repr__(self): - return self.__str__() - - def __getattr__(self, key): - - if key in self.protected_attributes : - return getattr(self, key) - else: - return lambda *args, **kwargs : self.remote_call( remote_fn=key, *args, **kwargs) - \ No newline at end of file diff --git a/commune/client/ws.py b/commune/client/ws.py deleted file mode 100644 index e69de29b..00000000 diff --git a/commune/executor/executor.py b/commune/executor/executor.py index 537e84c8..cdf6825e 100644 --- a/commune/executor/executor.py +++ b/commune/executor/executor.py @@ -1,286 +1,364 @@ -import asyncio -import concurrent -import threading -from typing import * + +import os +import sys +import time +import queue +import random +import weakref +import itertools import threading -import commune as c -class Executor(c.Module): - thread_map = {} +from loguru import logger +from typing import Callable +import concurrent +from concurrent.futures._base import Future +import commune as c +import gc - @classmethod - def wait(cls, futures:list, timeout:int = None, generator:bool=False, return_dict:bool = True) -> list: - is_singleton = bool(not isinstance(futures, list)) +import time +from concurrent.futures._base import Future +import commune as c - futures = [futures] if is_singleton else futures - # if type(futures[0]) in [asyncio.Task, asyncio.Future]: - # return cls.gather(futures, timeout=timeout) - - if len(futures) == 0: - return [] - if cls.is_coroutine(futures[0]): - return cls.gather(futures, timeout=timeout) +class Task(c.Module): + def __init__(self, + fn:str, + args:list, + kwargs:dict, + timeout:int=10, + priority:int=1, + path = None, + **extra_kwargs): - future2idx = {future:i for i,future in enumerate(futures)} - - if timeout == None: - if hasattr(futures[0], 'timeout'): - timeout = futures[0].timeout - else: - timeout = 30 + self.future = Future() + self.fn = fn # the function to run + self.start_time = time.time() # the time the task was created + self.end_time = None + self.args = args # the arguments of the task + self.kwargs = kwargs # the arguments of the task + self.timeout = timeout # the timeout of the task + self.priority = priority # the priority of the task + self.data = None # the result of the task + self.latency = None - if generator: - def get_results(futures): - try: - for future in concurrent.futures.as_completed(futures, timeout=timeout): - if return_dict: - idx = future2idx[future] - yield {'idx': idx, 'result': future.result()} - else: - yield future.result() - except Exception as e: - yield None - - else: - def get_results(futures): - results = [None]*len(futures) - try: - for future in concurrent.futures.as_completed(futures, timeout=timeout): - idx = future2idx[future] - results[idx] = future.result() - del future2idx[future] - if is_singleton: - results = results[0] - except Exception as e: - unfinished_futures = [future for future in futures if future in future2idx] - cls.print(f'Error: {e}, {len(unfinished_futures)} unfinished futures with timeout {timeout} seconds') - return results + self.fn_name = fn.__name__ if fn != None else str(fn) # the name of the function + # for the sake of simplicity, we'll just add all the extra kwargs to the task object + self.path = self.resolve_path(path) if path != None else None + self.status = 'pending' # pending, running, done + # save the task state + + + @property + def lifetime(self) -> float: + return time.time() - self.start_time + + @property + def state(self) -> dict: + return { + 'fn': self.fn.__name__, + 'args': self.args, + 'kwargs': self.kwargs, + 'timeout': self.timeout, + 'start_time': self.start_time, + 'end_time': self.end_time, + 'latency': self.latency, + 'priority': self.priority, + 'status': self.status, + 'data': self.data, + } + + + def run(self): + """Run the given work item""" + # Checks if future is canceled or if work item is stale + self.start_time = c.time() + + if (not self.future.set_running_or_notify_cancel()) or (time.time() - self.start_time) > self.timeout: + self.future.set_exception(TimeoutError('Task timed out')) + try: + data = self.fn(*self.args, **self.kwargs) + self.status = 'complete' + except Exception as e: + data = c.detailed_error(e) + if 'event loop' in data['error']: + c.new_event_loop(nest_asyncio=True) + self.status = 'failed' + + self.future.set_result(data) + # store the result of the task + if self.path != None: + self.save(self.path, self.state) + + self.end_time = c.time() + self.latency = self.end_time - self.start_time + self.data = data - return get_results(futures) + def result(self) -> object: + return self.future.result() - @classmethod - def gather(cls,jobs:list, timeout:int = 20, loop=None)-> list: + @property + def _condition(self) -> bool: + return self.future._condition + @property + def _state(self, *args, **kwargs) -> bool: + return self.future._state - if loop == None: - loop = cls.get_event_loop() + @property + def _waiters(self) -> bool: + return self.future._waiters - if not isinstance(jobs, list): - singleton = True - jobs = [jobs] + def cancel(self) -> bool: + self.future.cancel() + + def running(self) -> bool: + return self.future.running() + + def done(self) -> bool: + return self.future.done() + + def __lt__(self, other): + if isinstance(other, Task): + return self.priority < other.priority + elif isinstance(other, int): + return self.priority < other else: - singleton = False + raise TypeError(f"Cannot compare Task with {type(other)}") + + +NULL_ENTRY = (sys.maxsize, Task(None, (), {})) + +class ThreadPoolExecutor(c.Module): + """Base threadpool executor with a priority queue""" + + # Used to assign unique thread names when thread_name_prefix is not supplied. + _counter = itertools.count().__next__ + # submit.__doc__ = _base.Executor.submit.__doc__ + threads_queues = weakref.WeakKeyDictionary() + + def __init__( + self, + max_workers: int =None, + maxsize : int = None , + thread_name_prefix : str ="", + mode = 'thread', + ): + """Initializes a new ThreadPoolExecutor instance. + Args: + max_workers: The maximum number of threads that can be used to + execute the given calls. + thread_name_prefix: An optional name prefix to give our threads. + """ + self.start_time = c.time() - assert isinstance(jobs, list) and len(jobs) > 0, f'Invalid jobs: {jobs}' - # determine if we are using asyncio or multiprocessing + max_workers = (os.cpu_count() or 1) * 5 if max_workers == None else max_workers + maxsize = max_workers * 10 or None + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + self.mode = mode + self.max_workers = max_workers + self.work_queue = queue.PriorityQueue(maxsize=maxsize) + self.idle_semaphore = threading.Semaphore(0) + self.threads = [] + self.broken = False + self.shutdown = False + self.shutdown_lock = threading.Lock() + self.thread_name_prefix = thread_name_prefix or ("ThreadPoolExecutor-%d" % self._counter() ) - # wait until they finish, and if they dont, give them none + @property + def is_empty(self): + return self.work_queue.empty() - # return the futures that done timeout or not - async def wait_for(future, timeout): - try: - result = await asyncio.wait_for(future, timeout=timeout) - except asyncio.TimeoutError: - result = {'error': f'TimeoutError: {timeout} seconds'} + @property + def is_full(self): + return self.work_queue.full() - return result - - jobs = [wait_for(job, timeout=timeout) for job in jobs] - future = asyncio.gather(*jobs) - results = loop.run_until_complete(future) - if singleton: - return results[0] - return results - + def default_priority_score(self): + # older scores are higher priority + return 1 # abs((self.start_time - c.time())) - - @classmethod - def submit(cls, - fn, - params = None, - kwargs: dict = None, - args:list = None, - timeout:int = 40, + + def submit(self, + fn: Callable, + params = None, + args:dict=None, + kwargs:dict=None, + priority:int=1, + timeout=200, return_future:bool=True, - init_args : list = [], - init_kwargs:dict= {}, - executor = None, - module: str = None, - mode:str='thread', - max_workers : int = 100, - ): - kwargs = {} if kwargs == None else kwargs - args = [] if args == None else args + wait = True, + path:str=None) -> Future: if params != None: if isinstance(params, dict): - kwargs = {**kwargs, **params} + kwargs = params elif isinstance(params, list): - args = [*args, *params] + args = params else: - raise ValueError('params must be a list or a dictionary') - - fn = cls.get_fn(fn) - executor = cls.executor(max_workers=max_workers, mode=mode) if executor == None else executor - args = cls.copy(args) - kwargs = cls.copy(kwargs) - init_kwargs = cls.copy(init_kwargs) - init_args = cls.copy(init_args) - if module == None: - module = cls - else: - module = cls.module(module) - if isinstance(fn, str): - method_type = cls.classify_fn(getattr(module, fn)) - elif callable(fn): - method_type = cls.classify_fn(fn) - else: - raise ValueError('fn must be a string or a callable') - - if method_type == 'self': - module = module(*init_args, **init_kwargs) - - future = executor.submit(fn=fn, args=args, kwargs=kwargs, timeout=timeout) - - if not hasattr(cls, 'futures'): - cls.futures = [] - - cls.futures.append(future) + raise ValueError("params must be a list or a dict") + # check if the queue is full and if so, raise an exception + if self.work_queue.full(): + if wait: + while self.work_queue.full(): + time.sleep(0.1) + else: + return {'success': False, 'msg':"cannot schedule new futures after maxsize exceeded"} + + args = args or [] + kwargs = kwargs or {} + + with self.shutdown_lock: + + if self.broken: + raise Exception("ThreadPoolExecutor is broken") + if self.shutdown: + raise RuntimeError("cannot schedule new futures after shutdown") + priority = kwargs.get("priority", priority) + if "priority" in kwargs: + del kwargs["priority"] + task = Task(fn=fn, args=args, kwargs=kwargs, timeout=timeout, path=path) + # add the work item to the queue + self.work_queue.put((priority, task), block=False) + # adjust the thread count to match the new task + self.adjust_thread_count() - + # return the future (MAYBE WE CAN RETURN THE TASK ITSELF) if return_future: - return future - else: - return cls.wait(future, timeout=timeout) - - @classmethod - def submit_batch(cls, fn:str, batch_kwargs: List[Dict[str, Any]], return_future:bool=False, timeout:int=10, module = None, *args, **kwargs): - n = len(batch_kwargs) - module = cls if module == None else module - executor = cls.executor(max_workers=n) - futures = [ executor.submit(fn=getattr(module, fn), kwargs=batch_kwargs[i], timeout=timeout) for i in range(n)] - if return_future: - return futures - return cls.wait(futures) - - - executor_cache = {} - @classmethod - def executor(cls, max_workers:int=None, mode:str="thread", cache:bool = True, maxsize=200, **kwargs): - if cache: - if mode in cls.executor_cache: - return cls.executor_cache[mode] - executor = cls.module(f'executor.{mode}')(max_workers=max_workers, maxsize=maxsize , **kwargs) - if cache: - cls.executor_cache[mode] = executor - return executor - - + return task.future + + return task.future.result() + + + def adjust_thread_count(self): + # if idle threads are available, don't spin new threads + if self.idle_semaphore.acquire(timeout=0): + return + + # When the executor gets lost, the weakref callback will wake up + # the worker threads. + def weakref_cb(_, q=self.work_queue): + q.put(NULL_ENTRY) + + num_threads = len(self.threads) + if num_threads < self.max_workers: + thread_name = "%s_%d" % (self.thread_name_prefix or self, num_threads) + t = threading.Thread( + name=thread_name, + target=self.worker, + args=( + weakref.ref(self, weakref_cb), + self.work_queue, + ), + ) + t.daemon = True + t.start() + self.threads.append(t) + self.threads_queues[t] = self.work_queue + + def shutdown(self, wait=True): + with self.shutdown_lock: + self.shutdown = True + self.work_queue.put(NULL_ENTRY) + if wait: + for t in self.threads: + try: + t.join(timeout=2) + except Exception: + pass @staticmethod - def detailed_error(e) -> dict: - import traceback - tb = traceback.extract_tb(e.__traceback__) - file_name = tb[-1].filename - line_no = tb[-1].lineno - line_text = tb[-1].line - response = { - 'success': False, - 'error': str(e), - 'file_name': file_name, - 'line_no': line_no, - 'line_text': line_text - } - return response + def worker(executor_reference, work_queue): + c.new_event_loop(nest_asyncio=True) + try: + while True: + work_item = work_queue.get(block=True) + priority = work_item[0] + + if priority == sys.maxsize: + # Wake up queue management thread. + work_queue.put(NULL_ENTRY) + break + + item = work_item[1] + + if item is not None: + item.run() + # Delete references to object. See issue16284 + del item + continue + + executor = executor_reference() + # Exit if: + # - The interpreter is shutting down OR + # - The executor that owns the worker has been collected OR + # - The executor that owns the worker has been shutdown. + if executor is None or executor.shutdown: + # Flag the executor as shutting down as early as possible if it + # is not gc-ed yet. + if executor is not None: + executor.shutdown = True + # Notice other workers + work_queue.put(NULL_ENTRY) + return + del executor + except Exception as e: + e = c.detailed_error(e) + + @property + def num_tasks(self): + return self.work_queue.qsize() @classmethod - def as_completed(cls , futures:list, timeout:int=10, **kwargs): - return concurrent.futures.as_completed(futures, timeout=timeout) - - @classmethod - def is_coroutine(cls, future): - """ - returns True if future is a coroutine - """ - return cls.obj2typestr(future) == 'coroutine' - - - @classmethod - def obj2typestr(cls, obj): - return str(type(obj)).split("'")[1] - - @classmethod - def tasks(cls, task = None, mode='pm2',**kwargs) -> List[str]: - kwargs['network'] = 'local' - kwargs['update'] = False - modules = cls.servers( **kwargs) - tasks = getattr(cls, f'{mode}_list')(task) - tasks = list(filter(lambda x: x not in modules, tasks)) - return tasks + def as_completed(futures: list): + assert isinstance(futures, list), "futures must be a list" + return [f for f in futures if not f.done()] + @staticmethod + def wait(futures:list) -> list: + futures = [futures] if not isinstance(futures, list) else futures + results = [] + for future in c.as_completed(futures): + results += [future.result()] + return results + @classmethod - def asubmit(cls, fn:str, *args, **kwargs): + def test(cls): + def fn(x): + result = x*2 + c.print(result) + return result + + self = cls() + futures = [] + for i in range(10): + futures += [self.submit(fn=fn, kwargs=dict(x=i))] + for future in c.tqdm(futures): + future.result() + for i in range(10): + futures += [self.submit(fn=fn, kwargs=dict(x=i))] + + results = c.wait(futures, timeout=10) - async def _asubmit(): - kwargs.update(kwargs.pop('kwargs',{})) - return fn(*args, **kwargs) - return _asubmit() + while self.num_tasks > 0: + c.print(self.num_tasks, 'tasks remaining', color='red') + return {'success': True, 'msg': 'thread pool test passed'} - thread_map = {} - - @classmethod - def thread(cls,fn: Union['callable', str], - args:list = None, - kwargs:dict = None, - daemon:bool = True, - name = None, - tag = None, - start:bool = True, - tag_seperator:str='::', - **extra_kwargs): - - if isinstance(fn, str): - fn = cls.get_fn(fn) - if args == None: - args = [] - if kwargs == None: - kwargs = {} - - assert callable(fn), f'target must be callable, got {fn}' - assert isinstance(args, list), f'args must be a list, got {args}' - assert isinstance(kwargs, dict), f'kwargs must be a dict, got {kwargs}' - # unique thread name - if name == None: - name = fn.__name__ - cnt = 0 - while name in cls.thread_map: - cnt += 1 - if tag == None: - tag = '' - name = name + tag_seperator + tag + str(cnt) - - if name in cls.thread_map: - cls.thread_map[name].join() - - t = threading.Thread(target=fn, args=args, kwargs=kwargs, **extra_kwargs) - # set the time it starts - setattr(t, 'start_time', cls.time()) - t.daemon = daemon - if start: - t.start() - cls.thread_map[name] = t - return t + + @property + def is_empty(self): + return self.work_queue.empty() + def status(self): + return dict( + num_threads = len(self.threads), + num_tasks = self.num_tasks, + is_empty = self.is_empty, + is_full = self.is_full + ) - @classmethod - def threads(cls, search:str = None): - threads = list(cls.thread_map.keys()) - if search != None: - threads = [t for t in threads if search in t] - return threads + \ No newline at end of file diff --git a/commune/executor/process.py b/commune/executor/process.py deleted file mode 100644 index 7ce9e91d..00000000 --- a/commune/executor/process.py +++ /dev/null @@ -1,902 +0,0 @@ -# Copyright 2009 Brian Quinlan. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Implements ProcessPoolExecutor. - -The following diagram and text describe the data-flow through the system: - -|======================= In-process =====================|== Out-of-process ==| - -+----------+ +----------+ +--------+ +-----------+ +---------+ -| | => | Work Ids | | | | Call Q | | Process | -| | +----------+ | | +-----------+ | Pool | -| | | ... | | | | ... | +---------+ -| | | 6 | => | | => | 5, call() | => | | -| | | 7 | | | | ... | | | -| Process | | ... | | Local | +-----------+ | Process | -| Pool | +----------+ | Worker | | #1..n | -| Executor | | Thread | | | -| | +----------- + | | +-----------+ | | -| | <=> | Work Items | <=> | | <= | Result Q | <= | | -| | +------------+ | | +-----------+ | | -| | | 6: call() | | | | ... | | | -| | | future | | | | 4, result | | | -| | | ... | | | | 3, except | | | -+----------+ +------------+ +--------+ +-----------+ +---------+ - -Executor.submit() called: -- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict -- adds the id of the _WorkItem to the "Work Ids" queue - -Local worker thread: -- reads work ids from the "Work Ids" queue and looks up the corresponding - WorkItem from the "Work Items" dict: if the work item has been cancelled then - it is simply removed from the dict, otherwise it is repackaged as a - _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" - until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because - calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). -- reads _ResultItems from "Result Q", updates the future stored in the - "Work Items" dict and deletes the dict entry - -Process #1..n: -- reads _CallItems from "Call Q", executes the calls, and puts the resulting - _ResultItems in "Result Q" -""" - -__author__ = 'Brian Quinlan (brian@sweetapp.com)' - -import os -from concurrent.futures import _base -import queue -import multiprocessing as mp -# This import is required to load the multiprocessing.connection submodule -# so that it can be accessed later as `mp.connection` -import multiprocessing.connection -from multiprocessing.queues import Queue -import threading -import weakref -from functools import partial -import itertools -import sys -from traceback import format_exception -import commune as c -import inspect - - -_threads_wakeups = weakref.WeakKeyDictionary() -_global_shutdown = False - - -class _ThreadWakeup: - def __init__(self): - self._closed = False - self._reader, self._writer = mp.Pipe(duplex=False) - - def close(self): - # Please note that we do not take the shutdown lock when - # calling clear() (to avoid deadlocking) so this method can - # only be called safely from the same thread as all calls to - # clear() even if you hold the shutdown lock. Otherwise we - # might try to read from the closed pipe. - if not self._closed: - self._closed = True - self._writer.close() - self._reader.close() - - def wakeup(self): - if not self._closed: - self._writer.send_bytes(b"") - - def clear(self): - if not self._closed: - while self._reader.poll(): - self._reader.recv_bytes() - - -def _python_exit(): - global _global_shutdown - _global_shutdown = True - items = list(_threads_wakeups.items()) - for _, thread_wakeup in items: - # call not protected by ProcessPoolExecutor._shutdown_lock - thread_wakeup.wakeup() - for t, _ in items: - t.join() - -# Register for `_python_exit()` to be called just before joining all -# non-daemon threads. This is used instead of `atexit.register()` for -# compatibility with subinterpreters, which no longer support daemon threads. -# See bpo-39812 for context. -threading._register_atexit(_python_exit) - -# Controls how many more calls than processes will be queued in the call queue. -# A smaller number will mean that processes spend more time idle waiting for -# work while a larger number will make Future.cancel() succeed less frequently -# (Futures in the call queue cannot be cancelled). -EXTRA_QUEUED_CALLS = 1 - - -# On Windows, WaitForMultipleObjects is used to wait for processes to finish. -# It can wait on, at most, 63 objects. There is an overhead of two objects: -# - the result queue reader -# - the thread wakeup reader -_MAX_WINDOWS_WORKERS = 63 - 2 - -# Hack to embed stringification of remote traceback in local traceback - -class _RemoteTraceback(Exception): - def __init__(self, tb): - self.tb = tb - def __str__(self): - return self.tb - -class _ExceptionWithTraceback: - def __init__(self, exc, tb): - tb = ''.join(format_exception(type(exc), exc, tb)) - self.exc = exc - # Traceback object needs to be garbage-collected as its frames - # contain references to all the objects in the exception scope - self.exc.__traceback__ = None - self.tb = '\n"""\n%s"""' % tb - def __reduce__(self): - return _rebuild_exc, (self.exc, self.tb) - -def _rebuild_exc(exc, tb): - exc.__cause__ = _RemoteTraceback(tb) - return exc - -class _WorkItem(object): - def __init__(self, future, fn, args, kwargs): - self.future = future - self.fn = fn - self.args = args - self.kwargs = kwargs - -class _ResultItem(object): - def __init__(self, work_id, exception=None, result=None, exit_pid=None): - self.work_id = work_id - self.exception = exception - self.result = result - self.exit_pid = exit_pid - -class _CallItem(object): - def __init__(self, work_id, fn, args, kwargs): - self.work_id = work_id - self.fn = fn - self.args = args - self.kwargs = kwargs - - -class _SafeQueue(Queue): - """Safe Queue set exception to the future object linked to a job""" - def __init__(self, max_size=0, *, ctx, pending_work_items, shutdown_lock, - thread_wakeup): - self.pending_work_items = pending_work_items - self.shutdown_lock = shutdown_lock - self.thread_wakeup = thread_wakeup - super().__init__(max_size, ctx=ctx) - - def _on_queue_feeder_error(self, e, obj): - if isinstance(obj, _CallItem): - tb = format_exception(type(e), e, e.__traceback__) - e.__cause__ = _RemoteTraceback('\n"""\n{}"""'.format(''.join(tb))) - work_item = self.pending_work_items.pop(obj.work_id, None) - with self.shutdown_lock: - self.thread_wakeup.wakeup() - # work_item can be None if another process terminated. In this - # case, the executor_manager_thread fails all work_items - # with BrokenProcessPool - if work_item is not None: - work_item.future.set_exception(e) - else: - super()._on_queue_feeder_error(e, obj) - - -def _get_chunks(*iterables, chunksize): - """ Iterates over zip()ed iterables in chunks. """ - it = zip(*iterables) - while True: - chunk = tuple(itertools.islice(it, chunksize)) - if not chunk: - return - yield chunk - - -def _process_chunk(fn, chunk): - """ Processes a chunk of an iterable passed to map. - - Runs the function passed to map() on a chunk of the - iterable passed to map. - - This function is run in a separate process. - - """ - return [fn(*args) for args in chunk] - - -def _sendback_result(result_queue, work_id, result=None, exception=None, - exit_pid=None): - """Safely send back the given result or exception""" - try: - result_queue.put(_ResultItem(work_id, result=result, - exception=exception, exit_pid=exit_pid)) - except BaseException as e: - exc = _ExceptionWithTraceback(e, e.__traceback__) - result_queue.put(_ResultItem(work_id, exception=exc, - exit_pid=exit_pid)) - - -def _process_worker(call_queue, result_queue, initializer, initargs, max_tasks=None): - """Evaluates calls from call_queue and places the results in result_queue. - - This worker is run in a separate process. - - Args: - call_queue: A ctx.Queue of _CallItems that will be read and - evaluated by the worker. - result_queue: A ctx.Queue of _ResultItems that will written - to by the worker. - initializer: A callable initializer, or None - initargs: A tuple of args for the initializer - """ - if initializer is not None: - try: - initializer(*initargs) - except BaseException: - _base.LOGGER.critical('Exception in initializer:', exc_info=True) - # The parent will notice that the process stopped and - # mark the pool broken - return - num_tasks = 0 - exit_pid = None - - while True: - call_item = call_queue.get(block=True) - if call_item is None: - # Wake up queue management thread - result_queue.put(os.getpid()) - return - - if max_tasks is not None: - num_tasks += 1 - if num_tasks >= max_tasks: - exit_pid = os.getpid() - - try: - r = call_item.fn(*call_item.args, **call_item.kwargs) - except BaseException as e: - exc = _ExceptionWithTraceback(e, e.__traceback__) - _sendback_result(result_queue, call_item.work_id, exception=exc, - exit_pid=exit_pid) - else: - _sendback_result(result_queue, call_item.work_id, result=r, - exit_pid=exit_pid) - del r - - # Liberate the resource as soon as possible, to avoid holding onto - # open files or shared memory that is not needed anymore - del call_item - - if exit_pid is not None: - return - - -class _ExecutorManagerThread(threading.Thread): - """Manages the communication between this process and the worker processes. - - The manager is run in a local thread. - - Args: - executor: A reference to the ProcessPoolExecutor that owns - this thread. A weakref will be own by the manager as well as - references to internal objects used to introspect the state of - the executor. - """ - - def __init__(self, executor): - # Store references to necessary internals of the executor. - - # A _ThreadWakeup to allow waking up the queue_manager_thread from the - # main Thread and avoid deadlocks caused by permanently locked queues. - self.thread_wakeup = executor._executor_manager_thread_wakeup - self.shutdown_lock = executor._shutdown_lock - - # A weakref.ref to the ProcessPoolExecutor that owns this thread. Used - # to determine if the ProcessPoolExecutor has been garbage collected - # and that the manager can exit. - # When the executor gets garbage collected, the weakref callback - # will wake up the queue management thread so that it can terminate - # if there is no pending work item. - def weakref_cb(_, - thread_wakeup=self.thread_wakeup, - shutdown_lock=self.shutdown_lock): - mp.util.debug('Executor collected: triggering callback for' - ' QueueManager wakeup') - with shutdown_lock: - thread_wakeup.wakeup() - - self.executor_reference = weakref.ref(executor, weakref_cb) - - # A list of the ctx.Process instances used as workers. - self.processes = executor._processes - - # A ctx.Queue that will be filled with _CallItems derived from - # _WorkItems for processing by the process workers. - self.call_queue = executor._call_queue - - # A ctx.SimpleQueue of _ResultItems generated by the process workers. - self.result_queue = executor._result_queue - - # A queue.Queue of work ids e.g. Queue([5, 6, ...]). - self.work_ids_queue = executor._work_ids - - # Maximum number of tasks a worker process can execute before - # exiting safely - self.max_tasks_per_child = executor._max_tasks_per_child - - # A dict mapping work ids to _WorkItems e.g. - # {5: <_WorkItem...>, 6: <_WorkItem...>, ...} - self.pending_work_items = executor._pending_work_items - - super().__init__() - - def run(self): - # Main loop for the executor manager thread. - - while True: - # gh-109047: During Python finalization, self.call_queue.put() - # creation of a thread can fail with RuntimeError. - try: - self.add_call_item_to_queue() - except BaseException as exc: - cause = format_exception(exc) - self.terminate_broken(cause) - return - - result_item, is_broken, cause = self.wait_result_broken_or_wakeup() - - if is_broken: - self.terminate_broken(cause) - return - if result_item is not None: - self.process_result_item(result_item) - - process_exited = result_item.exit_pid is not None - if process_exited: - p = self.processes.pop(result_item.exit_pid) - p.join() - - # Delete reference to result_item to avoid keeping references - # while waiting on new results. - del result_item - - if executor := self.executor_reference(): - if process_exited: - with self.shutdown_lock: - executor._adjust_process_count() - else: - executor._idle_worker_semaphore.release() - del executor - - if self.is_shutting_down(): - self.flag_executor_shutting_down() - - # When only canceled futures remain in pending_work_items, our - # next call to wait_result_broken_or_wakeup would hang forever. - # This makes sure we have some running futures or none at all. - self.add_call_item_to_queue() - - # Since no new work items can be added, it is safe to shutdown - # this thread if there are no pending work items. - if not self.pending_work_items: - self.join_executor_internals() - return - - def add_call_item_to_queue(self): - # Fills call_queue with _WorkItems from pending_work_items. - # This function never blocks. - while True: - if self.call_queue.full(): - return - try: - work_id = self.work_ids_queue.get(block=False) - except queue.Empty: - return - else: - work_item = self.pending_work_items[work_id] - - if work_item.future.set_running_or_notify_cancel(): - self.call_queue.put(_CallItem(work_id, - work_item.fn, - work_item.args, - work_item.kwargs), - block=True) - else: - del self.pending_work_items[work_id] - continue - - def wait_result_broken_or_wakeup(self): - # Wait for a result to be ready in the result_queue while checking - # that all worker processes are still running, or for a wake up - # signal send. The wake up signals come either from new tasks being - # submitted, from the executor being shutdown/gc-ed, or from the - # shutdown of the python interpreter. - result_reader = self.result_queue._reader - assert not self.thread_wakeup._closed - wakeup_reader = self.thread_wakeup._reader - readers = [result_reader, wakeup_reader] - worker_sentinels = [p.sentinel for p in list(self.processes.values())] - ready = mp.connection.wait(readers + worker_sentinels) - - cause = None - is_broken = True - result_item = None - if result_reader in ready: - try: - result_item = result_reader.recv() - is_broken = False - except BaseException as exc: - cause = format_exception(exc) - - elif wakeup_reader in ready: - is_broken = False - - # No need to hold the _shutdown_lock here because: - # 1. we're the only thread to use the wakeup reader - # 2. we're also the only thread to call thread_wakeup.close() - # 3. we want to avoid a possible deadlock when both reader and writer - # would block (gh-105829) - self.thread_wakeup.clear() - - return result_item, is_broken, cause - - def process_result_item(self, result_item): - # Process the received a result_item. This can be either the PID of a - # worker that exited gracefully or a _ResultItem - - # Received a _ResultItem so mark the future as completed. - work_item = self.pending_work_items.pop(result_item.work_id, None) - # work_item can be None if another process terminated (see above) - if work_item is not None: - if result_item.exception: - work_item.future.set_exception(result_item.exception) - else: - work_item.future.set_result(result_item.result) - - def is_shutting_down(self): - # Check whether we should start shutting down the executor. - executor = self.executor_reference() - # No more work items can be added if: - # - The interpreter is shutting down OR - # - The executor that owns this worker has been collected OR - # - The executor that owns this worker has been shutdown. - return (_global_shutdown or executor is None - or executor._shutdown_thread) - - def _terminate_broken(self, cause): - # Terminate the executor because it is in a broken state. The cause - # argument can be used to display more information on the error that - # lead the executor into becoming broken. - - # Mark the process pool broken so that submits fail right now. - executor = self.executor_reference() - if executor is not None: - executor._broken = ('A child process terminated ' - 'abruptly, the process pool is not ' - 'usable anymore') - executor._shutdown_thread = True - executor = None - - # All pending tasks are to be marked failed with the following - # BrokenProcessPool error - bpe = BrokenProcessPool("A process in the process pool was " - "terminated abruptly while the future was " - "running or pending.") - if cause is not None: - bpe.__cause__ = _RemoteTraceback( - f"\n'''\n{''.join(cause)}'''") - - # Mark pending tasks as failed. - for work_id, work_item in self.pending_work_items.items(): - try: - work_item.future.set_exception(bpe) - except _base.InvalidStateError: - # set_exception() fails if the future is cancelled: ignore it. - # Trying to check if the future is cancelled before calling - # set_exception() would leave a race condition if the future is - # cancelled between the check and set_exception(). - pass - # Delete references to object. See issue16284 - del work_item - self.pending_work_items.clear() - - # Terminate remaining workers forcibly: the queues or their - # locks may be in a dirty state and block forever. - for p in self.processes.values(): - p.terminate() - - self.call_queue._terminate_broken() - - # clean up resources - self._join_executor_internals(broken=True) - - def terminate_broken(self, cause): - with self.shutdown_lock: - self._terminate_broken(cause) - - def flag_executor_shutting_down(self): - # Flag the executor as shutting down and cancel remaining tasks if - # requested as early as possible if it is not gc-ed yet. - executor = self.executor_reference() - if executor is not None: - executor._shutdown_thread = True - # Cancel pending work items if requested. - if executor._cancel_pending_futures: - # Cancel all pending futures and update pending_work_items - # to only have futures that are currently running. - new_pending_work_items = {} - for work_id, work_item in self.pending_work_items.items(): - if not work_item.future.cancel(): - new_pending_work_items[work_id] = work_item - self.pending_work_items = new_pending_work_items - # Drain work_ids_queue since we no longer need to - # add items to the call queue. - while True: - try: - self.work_ids_queue.get_nowait() - except queue.Empty: - break - # Make sure we do this only once to not waste time looping - # on running processes over and over. - executor._cancel_pending_futures = False - - def shutdown_workers(self): - n_children_to_stop = self.get_n_children_alive() - n_sentinels_sent = 0 - # Send the right number of sentinels, to make sure all children are - # properly terminated. - while (n_sentinels_sent < n_children_to_stop - and self.get_n_children_alive() > 0): - for i in range(n_children_to_stop - n_sentinels_sent): - try: - self.call_queue.put_nowait(None) - n_sentinels_sent += 1 - except queue.Full: - break - - def join_executor_internals(self): - with self.shutdown_lock: - self._join_executor_internals() - - def _join_executor_internals(self, broken=False): - # If broken, call_queue was closed and so can no longer be used. - if not broken: - self.shutdown_workers() - - # Release the queue's resources as soon as possible. - self.call_queue.close() - self.call_queue.join_thread() - self.thread_wakeup.close() - - # If .join() is not called on the created processes then - # some ctx.Queue methods may deadlock on Mac OS X. - for p in self.processes.values(): - if broken: - p.terminate() - p.join() - - def get_n_children_alive(self): - # This is an upper bound on the number of children alive. - return sum(p.is_alive() for p in self.processes.values()) - - -_system_limits_checked = False -_system_limited = None - - -def _check_system_limits(): - global _system_limits_checked, _system_limited - if _system_limits_checked: - if _system_limited: - raise NotImplementedError(_system_limited) - _system_limits_checked = True - try: - import multiprocessing.synchronize - except ImportError: - _system_limited = ( - "This Python build lacks multiprocessing.synchronize, usually due " - "to named semaphores being unavailable on this platform." - ) - raise NotImplementedError(_system_limited) - try: - nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") - except (AttributeError, ValueError): - # sysconf not available or setting not available - return - if nsems_max == -1: - # indetermined limit, assume that limit is determined - # by available memory only - return - if nsems_max >= 256: - # minimum number of semaphores available - # according to POSIX - return - _system_limited = ("system provides too few semaphores (%d" - " available, 256 necessary)" % nsems_max) - raise NotImplementedError(_system_limited) - - -def _chain_from_iterable_of_lists(iterable): - """ - Specialized implementation of itertools.chain.from_iterable. - Each item in *iterable* should be a list. This function is - careful not to keep references to yielded objects. - """ - for element in iterable: - element.reverse() - while element: - yield element.pop() - - -class BrokenProcessPool(_base.BrokenExecutor): - """ - Raised when a process in a ProcessPoolExecutor terminated abruptly - while a future was in the running state. - """ - -class ProcessPoolExecutor(_base.Executor,c.Module): - def __init__(self, max_workers=None, mp_context=None, - initializer=None, initargs=(), *, max_tasks_per_child=None): - """Initializes a new ProcessPoolExecutor instance. - - Args: - max_workers: The maximum number of processes that can be used to - execute the given calls. If None or not given then as many - worker processes will be created as the machine has processors. - mp_context: A multiprocessing context to launch the workers created - using the multiprocessing.get_context('start method') API. This - object should provide SimpleQueue, Queue and Process. - initializer: A callable used to initialize worker processes. - initargs: A tuple of arguments to pass to the initializer. - max_tasks_per_child: The maximum number of tasks a worker process - can complete before it will exit and be replaced with a fresh - worker process. The default of None means worker process will - live as long as the executor. Requires a non-'fork' mp_context - start method. When given, we default to using 'spawn' if no - mp_context is supplied. - """ - _check_system_limits() - - if max_workers is None: - self._max_workers = os.cpu_count() or 1 - if sys.platform == 'win32': - self._max_workers = min(_MAX_WINDOWS_WORKERS, - self._max_workers) - else: - if max_workers <= 0: - raise ValueError("max_workers must be greater than 0") - elif (sys.platform == 'win32' and - max_workers > _MAX_WINDOWS_WORKERS): - raise ValueError( - f"max_workers must be <= {_MAX_WINDOWS_WORKERS}") - - self._max_workers = max_workers - - if mp_context is None: - if max_tasks_per_child is not None: - mp_context = mp.get_context("spawn") - else: - mp_context = mp.get_context() - self._mp_context = mp_context - - # https://github.com/python/cpython/issues/90622 - self._safe_to_dynamically_spawn_children = ( - self._mp_context.get_start_method(allow_none=False) != "fork") - - if initializer is not None and not callable(initializer): - raise TypeError("initializer must be a callable") - self._initializer = initializer - self._initargs = initargs - - if max_tasks_per_child is not None: - if not isinstance(max_tasks_per_child, int): - raise TypeError("max_tasks_per_child must be an integer") - elif max_tasks_per_child <= 0: - raise ValueError("max_tasks_per_child must be >= 1") - if self._mp_context.get_start_method(allow_none=False) == "fork": - # https://github.com/python/cpython/issues/90622 - raise ValueError("max_tasks_per_child is incompatible with" - " the 'fork' multiprocessing start method;" - " supply a different mp_context.") - self._max_tasks_per_child = max_tasks_per_child - - # Management thread - self._executor_manager_thread = None - - # Map of pids to processes - self._processes = {} - - # Shutdown is a two-step process. - self._shutdown_thread = False - self._shutdown_lock = threading.Lock() - self._idle_worker_semaphore = threading.Semaphore(0) - self._broken = False - self._queue_count = 0 - self._pending_work_items = {} - self._cancel_pending_futures = False - - # _ThreadWakeup is a communication channel used to interrupt the wait - # of the main loop of executor_manager_thread from another thread (e.g. - # when calling executor.submit or executor.shutdown). We do not use the - # _result_queue to send wakeup signals to the executor_manager_thread - # as it could result in a deadlock if a worker process dies with the - # _result_queue write lock still acquired. - # - # _shutdown_lock must be locked to access _ThreadWakeup.close() and - # .wakeup(). Care must also be taken to not call clear or close from - # more than one thread since _ThreadWakeup.clear() is not protected by - # the _shutdown_lock - self._executor_manager_thread_wakeup = _ThreadWakeup() - - # Create communication channels for the executor - # Make the call queue slightly larger than the number of processes to - # prevent the worker processes from idling. But don't make it too big - # because futures in the call queue cannot be cancelled. - queue_size = self._max_workers + EXTRA_QUEUED_CALLS - self._call_queue = _SafeQueue( - max_size=queue_size, ctx=self._mp_context, - pending_work_items=self._pending_work_items, - shutdown_lock=self._shutdown_lock, - thread_wakeup=self._executor_manager_thread_wakeup) - # Killed worker processes can produce spurious "broken pipe" - # tracebacks in the queue's own worker thread. But we detect killed - # processes anyway, so silence the tracebacks. - self._call_queue._ignore_epipe = True - self._result_queue = mp_context.SimpleQueue() - self._work_ids = queue.Queue() - - def _start_executor_manager_thread(self): - if self._executor_manager_thread is None: - # Start the processes so that their sentinels are known. - if not self._safe_to_dynamically_spawn_children: # ie, using fork. - self._launch_processes() - self._executor_manager_thread = _ExecutorManagerThread(self) - self._executor_manager_thread.start() - _threads_wakeups[self._executor_manager_thread] = \ - self._executor_manager_thread_wakeup - - def _adjust_process_count(self): - # if there's an idle process, we don't need to spawn a new one. - if self._idle_worker_semaphore.acquire(blocking=False): - return - - process_count = len(self._processes) - if process_count < self._max_workers: - # Assertion disabled as this codepath is also used to replace a - # worker that unexpectedly dies, even when using the 'fork' start - # method. That means there is still a potential deadlock bug. If a - # 'fork' mp_context worker dies, we'll be forking a new one when - # we know a thread is running (self._executor_manager_thread). - #assert self._safe_to_dynamically_spawn_children or not self._executor_manager_thread, 'https://github.com/python/cpython/issues/90622' - self._spawn_process() - - def _launch_processes(self): - # https://github.com/python/cpython/issues/90622 - assert not self._executor_manager_thread, ( - 'Processes cannot be fork()ed after the thread has started, ' - 'deadlock in the child processes could result.') - for _ in range(len(self._processes), self._max_workers): - self._spawn_process() - - def _spawn_process(self): - p = self._mp_context.Process( - target=_process_worker, - args=(self._call_queue, - self._result_queue, - self._initializer, - self._initargs, - self._max_tasks_per_child)) - p.start() - self._processes[p.pid] = p - - - def submit(self, fn, *args, return_future:bool = True, init_kwargs:dict=None, **kwargs): - with self._shutdown_lock: - fn = c.resolve_fn(fn) - if self._broken: - raise BrokenProcessPool(self._broken) - if self._shutdown_thread: - raise RuntimeError('cannot schedule new futures after shutdown') - if _global_shutdown: - raise RuntimeError('cannot schedule new futures after ' - 'interpreter shutdown') - - f = _base.Future() - w = _WorkItem(f, fn, args, kwargs) - - self._pending_work_items[self._queue_count] = w - self._work_ids.put(self._queue_count) - self._queue_count += 1 - # Wake up queue management thread - self._executor_manager_thread_wakeup.wakeup() - if self._safe_to_dynamically_spawn_children: - self._adjust_process_count() - self._start_executor_manager_thread() - if return_future: - return f - return f.result() - - - submit.__doc__ = _base.Executor.submit.__doc__ - - def map(self, fn, *iterables, timeout=None, chunksize=1): - """Returns an iterator equivalent to map(fn, iter). - - Args: - fn: A callable that will take as many arguments as there are - passed iterables. - timeout: The maximum number of seconds to wait. If None, then there - is no limit on the wait time. - chunksize: If greater than one, the iterables will be chopped into - chunks of size chunksize and submitted to the process pool. - If set to one, the items in the list will be sent one at a time. - - Returns: - An iterator equivalent to: map(func, *iterables) but the calls may - be evaluated out-of-order. - - Raises: - TimeoutError: If the entire result iterator could not be generated - before the given timeout. - Exception: If fn(*args) raises for any values. - """ - if chunksize < 1: - raise ValueError("chunksize must be >= 1.") - - results = super().map(partial(_process_chunk, fn), - _get_chunks(*iterables, chunksize=chunksize), - timeout=timeout) - return _chain_from_iterable_of_lists(results) - - def shutdown(self, wait=True, *, cancel_futures=False): - with self._shutdown_lock: - self._cancel_pending_futures = cancel_futures - self._shutdown_thread = True - if self._executor_manager_thread_wakeup is not None: - # Wake up queue management thread - self._executor_manager_thread_wakeup.wakeup() - - if self._executor_manager_thread is not None and wait: - self._executor_manager_thread.join() - # To reduce the risk of opening too many files, remove references to - # objects that use file descriptors. - self._executor_manager_thread = None - self._call_queue = None - if self._result_queue is not None and wait: - self._result_queue.close() - self._result_queue = None - self._processes = None - self._executor_manager_thread_wakeup = None - - @property - def num_tasks(self): - return self._call_queue.__sizeof__() - - @staticmethod - def fn(x=2): - result = x*2 - return result - shutdown.__doc__ = _base.Executor.shutdown.__doc__ - @classmethod - def test(cls): - - self = cls() - futures = [] - for i in range(10): - futures += [self.submit('module/ls', return_future=True)] - # for future in c.tqdm(futures): - # future.result() - c.print(c.wait(futures)) - - return {'success': True, 'msg': 'process pool test passed'} - \ No newline at end of file diff --git a/commune/executor/task.py b/commune/executor/task.py deleted file mode 100644 index 4e843582..00000000 --- a/commune/executor/task.py +++ /dev/null @@ -1,123 +0,0 @@ -# Workers are created as daemon threads. This is done to allow the interpreter -# to exit when there are still idle threads in a ThreadPoolExecutor's thread -# pool (i.e. shutdown() was not called). However, allowing workers to die with -# the interpreter has two undesirable properties: -# - The workers would still be running during interpreter shutdown, -# meaning that they would fail in unpredictable ways. -# - The workers could be killed while evaluating a work item, which could -# be bad if the callable being evaluated has external side-effects e.g. -# writing to a file. -# -# To work around this problem, an exit handler is installed which tells the -# workers to exit when their work queues are empty and then waits until the -# threads finish. - -import time -from concurrent.futures._base import Future -import commune as c - -class Task(c.Module): - def __init__(self, - fn:str, - args:list, - kwargs:dict, - timeout:int=10, - priority:int=1, - path = None, - **extra_kwargs): - - self.future = Future() - self.fn = fn # the function to run - self.start_time = time.time() # the time the task was created - self.end_time = None - self.args = args # the arguments of the task - self.kwargs = kwargs # the arguments of the task - self.timeout = timeout # the timeout of the task - self.priority = priority # the priority of the task - self.data = None # the result of the task - self.latency = None - - self.fn_name = fn.__name__ if fn != None else str(fn) # the name of the function - # for the sake of simplicity, we'll just add all the extra kwargs to the task object - self.path = self.resolve_path(path) if path != None else None - self.status = 'pending' # pending, running, done - # save the task state - - - @property - def lifetime(self) -> float: - return time.time() - self.start_time - - @property - def state(self) -> dict: - return { - 'fn': self.fn.__name__, - 'args': self.args, - 'kwargs': self.kwargs, - 'timeout': self.timeout, - 'start_time': self.start_time, - 'end_time': self.end_time, - 'latency': self.latency, - 'priority': self.priority, - 'status': self.status, - 'data': self.data, - } - - - def run(self): - """Run the given work item""" - # Checks if future is canceled or if work item is stale - self.start_time = c.time() - - if (not self.future.set_running_or_notify_cancel()) or (time.time() - self.start_time) > self.timeout: - self.future.set_exception(TimeoutError('Task timed out')) - try: - data = self.fn(*self.args, **self.kwargs) - self.status = 'complete' - except Exception as e: - data = c.detailed_error(e) - if 'event loop' in data['error']: - c.new_event_loop(nest_asyncio=True) - self.status = 'failed' - - self.future.set_result(data) - # store the result of the task - if self.path != None: - self.save(self.path, self.state) - - self.end_time = c.time() - self.latency = self.end_time - self.start_time - self.data = data - - - - def result(self) -> object: - return self.future.result() - - @property - def _condition(self) -> bool: - return self.future._condition - @property - def _state(self, *args, **kwargs) -> bool: - return self.future._state - - @property - def _waiters(self) -> bool: - return self.future._waiters - - def cancel(self) -> bool: - self.future.cancel() - - def running(self) -> bool: - return self.future.running() - - def done(self) -> bool: - return self.future.done() - - def __lt__(self, other): - if isinstance(other, Task): - return self.priority < other.priority - elif isinstance(other, int): - return self.priority < other - else: - raise TypeError(f"Cannot compare Task with {type(other)}") diff --git a/commune/executor/thread.py b/commune/executor/thread.py deleted file mode 100644 index 06a909e5..00000000 --- a/commune/executor/thread.py +++ /dev/null @@ -1,254 +0,0 @@ - -import os -import sys -import time -import queue -import random -import weakref -import itertools -import threading - -from loguru import logger -from typing import Callable -import concurrent -from concurrent.futures._base import Future -import commune as c -import gc - -Task = c.module('executor.task') - -NULL_ENTRY = (sys.maxsize, Task(None, (), {})) - -class ThreadPoolExecutor(c.Module): - """Base threadpool executor with a priority queue""" - - # Used to assign unique thread names when thread_name_prefix is not supplied. - _counter = itertools.count().__next__ - # submit.__doc__ = _base.Executor.submit.__doc__ - threads_queues = weakref.WeakKeyDictionary() - - def __init__( - self, - max_workers: int =None, - maxsize : int = None , - thread_name_prefix : str ="", - ): - """Initializes a new ThreadPoolExecutor instance. - Args: - max_workers: The maximum number of threads that can be used to - execute the given calls. - thread_name_prefix: An optional name prefix to give our threads. - """ - self.start_time = c.time() - - max_workers = (os.cpu_count() or 1) * 5 if max_workers == None else max_workers - maxsize = max_workers * 10 or None - if max_workers <= 0: - raise ValueError("max_workers must be greater than 0") - - self.max_workers = max_workers - self.work_queue = queue.PriorityQueue(maxsize=maxsize) - self.idle_semaphore = threading.Semaphore(0) - self.threads = [] - self.broken = False - self.shutdown = False - self.shutdown_lock = threading.Lock() - self.thread_name_prefix = thread_name_prefix or ("ThreadPoolExecutor-%d" % self._counter() ) - - @property - def is_empty(self): - return self.work_queue.empty() - - @property - def is_full(self): - return self.work_queue.full() - - - def default_priority_score(self): - # older scores are higher priority - return 1 # abs((self.start_time - c.time())) - - - - def submit(self, - fn: Callable, - params = None, - args:dict=None, - kwargs:dict=None, - priority:int=1, - timeout=200, - return_future:bool=True, - wait = True, - path:str=None) -> Future: - if params != None: - if isinstance(params, dict): - kwargs = params - elif isinstance(params, list): - args = params - else: - raise ValueError("params must be a list or a dict") - # check if the queue is full and if so, raise an exception - if self.work_queue.full(): - if wait: - while self.work_queue.full(): - time.sleep(0.1) - else: - return {'success': False, 'msg':"cannot schedule new futures after maxsize exceeded"} - - args = args or [] - kwargs = kwargs or {} - - with self.shutdown_lock: - - if self.broken: - raise Exception("ThreadPoolExecutor is broken") - if self.shutdown: - raise RuntimeError("cannot schedule new futures after shutdown") - priority = kwargs.get("priority", priority) - if "priority" in kwargs: - del kwargs["priority"] - task = Task(fn=fn, args=args, kwargs=kwargs, timeout=timeout, path=path) - # add the work item to the queue - self.work_queue.put((priority, task), block=False) - # adjust the thread count to match the new task - self.adjust_thread_count() - - # return the future (MAYBE WE CAN RETURN THE TASK ITSELF) - if return_future: - return task.future - - return task.future.result() - - - def adjust_thread_count(self): - # if idle threads are available, don't spin new threads - if self.idle_semaphore.acquire(timeout=0): - return - - # When the executor gets lost, the weakref callback will wake up - # the worker threads. - def weakref_cb(_, q=self.work_queue): - q.put(NULL_ENTRY) - - num_threads = len(self.threads) - if num_threads < self.max_workers: - thread_name = "%s_%d" % (self.thread_name_prefix or self, num_threads) - t = threading.Thread( - name=thread_name, - target=self.worker, - args=( - weakref.ref(self, weakref_cb), - self.work_queue, - ), - ) - t.daemon = True - t.start() - self.threads.append(t) - self.threads_queues[t] = self.work_queue - - def shutdown(self, wait=True): - with self.shutdown_lock: - self.shutdown = True - self.work_queue.put(NULL_ENTRY) - if wait: - for t in self.threads: - try: - t.join(timeout=2) - except Exception: - pass - - @staticmethod - def worker(executor_reference, work_queue): - c.new_event_loop(nest_asyncio=True) - - try: - while True: - work_item = work_queue.get(block=True) - priority = work_item[0] - - if priority == sys.maxsize: - # Wake up queue management thread. - work_queue.put(NULL_ENTRY) - break - - item = work_item[1] - - if item is not None: - item.run() - # Delete references to object. See issue16284 - del item - continue - - executor = executor_reference() - # Exit if: - # - The interpreter is shutting down OR - # - The executor that owns the worker has been collected OR - # - The executor that owns the worker has been shutdown. - if executor is None or executor.shutdown: - # Flag the executor as shutting down as early as possible if it - # is not gc-ed yet. - if executor is not None: - executor.shutdown = True - # Notice other workers - work_queue.put(NULL_ENTRY) - return - del executor - except Exception as e: - e = c.detailed_error(e) - - @property - def num_tasks(self): - return self.work_queue.qsize() - - @classmethod - def as_completed(futures: list): - assert isinstance(futures, list), "futures must be a list" - return [f for f in futures if not f.done()] - - @staticmethod - def wait(futures:list) -> list: - futures = [futures] if not isinstance(futures, list) else futures - results = [] - for future in c.as_completed(futures): - results += [future.result()] - return results - - - @classmethod - def test(cls): - def fn(x): - result = x*2 - c.print(result) - return result - - self = cls() - futures = [] - for i in range(10): - futures += [self.submit(fn=fn, kwargs=dict(x=i))] - for future in c.tqdm(futures): - future.result() - for i in range(10): - futures += [self.submit(fn=fn, kwargs=dict(x=i))] - - results = c.wait(futures, timeout=10) - - while self.num_tasks > 0: - c.print(self.num_tasks, 'tasks remaining', color='red') - - - return {'success': True, 'msg': 'thread pool test passed'} - - - - @property - def is_empty(self): - return self.work_queue.empty() - def status(self): - return dict( - num_threads = len(self.threads), - num_tasks = self.num_tasks, - is_empty = self.is_empty, - is_full = self.is_full - ) - - \ No newline at end of file diff --git a/commune/key/key.py b/commune/key/key.py index 8b25cf81..e03cc575 100644 --- a/commune/key/key.py +++ b/commune/key/key.py @@ -981,6 +981,8 @@ def sign(self, def is_ticket(self, data): return all([k in data for k in ['data','signature', 'address', 'crypto_type']]) and any([k in data for k in ['time', 'timestamp']]) + + def verify(self, data: Union[ScaleBytes, bytes, str, dict], signature: Union[bytes, str] = None, @@ -1006,8 +1008,6 @@ def verify(self, True if data is signed with this Keypair, otherwise False """ data = c.copy(data) - - if isinstance(data, str) and seperator in data: data, signature = data.split(seperator) if isinstance(data, dict): @@ -1410,20 +1410,11 @@ def to_mnemonic(self, password=None): return Mnemonic('english').to_mnemonic(self.private_key) - def ticket_staleness(self, ticket, **kwargs): - - return self.verify(ticket, **kwargs) - - def app(self): - c.module('key.app').app() - - @staticmethod def is_ss58(address): # Check address length if len(address) != 47: return False - # Check prefix network_prefixes = ['1', '2', '5', '7'] # Add more prefixes as needed if address[0] not in network_prefixes: @@ -1446,7 +1437,6 @@ def is_ss58(address): return True - @classmethod def is_encrypted(cls, data, prefix=encrypted_prefix): if isinstance(data, str): diff --git a/commune/key/test.py b/commune/key/test.py index 7583181c..2069e6d3 100644 --- a/commune/key/test.py +++ b/commune/key/test.py @@ -2,7 +2,7 @@ import commune as c -class TestKey(c.m('key')): +class TestKey(c.module('key')): @classmethod def test_verification(cls, key='test'): diff --git a/commune/module.py b/commune/module.py new file mode 100755 index 00000000..3bfa3781 --- /dev/null +++ b/commune/module.py @@ -0,0 +1,6135 @@ +import os +import inspect +import json +import shutil +import time +import gc +import threading +import subprocess +import shlex +import sys +import argparse +import asyncio +import nest_asyncio +import urllib +import requests +import netaddr +import yaml +from functools import partial +import random +import os +from copy import deepcopy +import concurrent +from typing import * + + +import socket + +nest_asyncio.apply() + +class c: + + whitelist = [] + _schema = None + core_modules = ['module', 'key', 'subspace', 'web3', 'serializer', 'pm2', + 'executor', 'client', 'server', + 'namespace' ] + libname = lib_name = lib = 'commune' # the name of the library + cost = 1 + description = """This is a module""" + base_module = 'module' # the base module + giturl = 'https://github.com/commune-ai/commune.git' # tge gutg + root_module_class = 'c' # WE REPLACE THIS THIS Module at the end, kindof odd, i know, ill fix it fam, chill out dawg, i didnt sleep with your girl + default_port_range = [50050, 50150] # the port range between 50050 and 50150 + default_ip = local_ip = loopback = '0.0.0.0' + address = '0.0.0.0:8888' # the address of the server (default) + rootpath = root_path = root = '/'.join(__file__.split('/')[:-2]) # the path to the root of the library + homepath = home_path = os.path.expanduser('~') # the home path + libpath = lib_path = os.path.dirname(root_path) # the path to the library + repopath = repo_path = os.path.dirname(root_path) # the path to the repo + cache = {} # cache for module objects + home = os.path.expanduser('~') # the home directory + __ss58_format__ = 42 # the ss58 format for the substrate address + cache_path = os.path.expanduser(f'~/.{libname}') + default_tag = 'base' + + def __init__(self, *args, **kwargs): + pass + + @property + def key(self): + if not hasattr(self, '_key'): + if not hasattr(self, 'server_name') or self.server_name == None: + self.server_name = self.module_name() + self._key = c.get_key(self.server_name, create_if_not_exists=True) + return self._key + + @key.setter + def key(self, key: 'Key'): + if key == None: + key = self.server_name + self._key = key if hasattr(key, 'ss58_address') else c.get_key(key, create_if_not_exists=True) + return self._key + + @classmethod + async def async_call(cls, *args,**kwargs): + return c.call(*args, **kwargs) + + def getattr(self, k:str)-> Any: + return getattr(self, k) + + @classmethod + def getclassattr(cls, k:str)-> Any: + return getattr(cls, k) + + @classmethod + def module_file(cls) -> str: + # get the file of the module + return inspect.getfile(cls) + @classmethod + def filepath(cls, obj=None) -> str: + ''' + removes the PWD with respect to where module.py is located + ''' + obj = cls.resolve_object(obj) + try: + module_path = inspect.getfile(obj) + except Exception as e: + c.print(f'Error: {e} {cls}', color='red') + module_path = inspect.getfile(cls) + return module_path + + pythonpath = pypath = file_path = filepath + + @classmethod + def dirpath(cls) -> str: + ''' + removes the PWD with respect to where module.py is located + ''' + return os.path.dirname(cls.filepath()) + folderpath = dirname = dir_path = dirpath + + @classmethod + def module_name(cls, obj=None): + if hasattr(cls, 'name') and isinstance(cls.name, str): + return cls.name + obj = cls.resolve_object(obj) + module_file = inspect.getfile(obj) + return c.path2simple(module_file) + + path = name = module_name + + @classmethod + def module_class(cls) -> str: + return cls.__name__ + @classmethod + def class_name(cls, obj= None) -> str: + obj = obj if obj != None else cls + return obj.__name__ + + classname = class_name + + @classmethod + def config_path(cls) -> str: + return cls.filepath().replace('.py', '.yaml') + + @classmethod + def sandbox(cls): + c.cmd(f'python3 {c.root_path}/sandbox.py', verbose=True) + return + + sand = sandbox + + module_cache = {} + _obj = None + + @classmethod + def obj2module(cls,obj): + import commune as c + class WrapperModule(c.Module): + _obj = obj + def __name__(self): + return obj.__name__ + def __class__(self): + return obj.__class__ + @classmethod + def filepath(cls) -> str: + return super().filepath(cls._obj) + + for fn in dir(WrapperModule): + try: + setattr(obj, fn, getattr(WrapperModule, fn)) + except: + pass + + return obj + + @classmethod + def storage_dir(cls): + return f'{c.cache_path}/{cls.module_name()}' + + @classmethod + def refresh_storage(cls): + cls.rm(cls.storage_dir()) + + @classmethod + def refresh_storage_dir(cls): + c.rm(cls.storage_dir()) + c.makedirs(cls.storage_dir()) + + ############ JSON LAND ############### + + @classmethod + def __str__(cls): + return cls.__name__ + + @classmethod + def root_address(cls, name:str='module', + network : str = 'local', + timeout:int = 100, + sleep_interval:int = 1, + **kwargs): + """ + Root module + """ + try: + if not c.server_exists(name, network=network): + c.serve(name, network=network, wait_for_server=True, **kwargs) + address = c.call('module/address', network=network, timeout=timeout) + ip = c.ip() + address = ip+':'+address.split(':')[-1] + except Exception as e: + c.print(f'Error: {e}', color='red') + address = None + return address + + addy = root_address + + @property + def key_address(self): + return self.key.ss58_address + + @classmethod + def is_module(cls, obj=None) -> bool: + + if obj is None: + obj = cls + if all([hasattr(obj, k) for k in ['info', 'schema', 'set_config', 'config']]): + return True + return False + + @classmethod + def root_functions(cls): + return c.fns() + + @classmethod + def is_root(cls, obj=None) -> bool: + required_features = ['module_class','root_module_class', 'module_name'] + if obj is None: + obj = cls + if all([hasattr(obj, k) for k in required_features]): + module_class = obj.module_class() + if module_class == cls.root_module_class: + return True + return False + is_module_root = is_root_module = is_root + + @classmethod + def serialize(cls, *args, **kwargs): + return c.module('serializer')().serialize(*args, **kwargs) + @classmethod + def deserialize(cls, *args, **kwargs): + return c.module('serializer')().deserialize(*args, **kwargs) + + @property + def server_name(self): + if not hasattr(self, '_server_name'): + self._server_name = self.module_name() + return self._server_name + + @server_name.setter + def server_name(self, name): + self._server_name = name + + @classmethod + def resolve_object(cls, obj:str = None, **kwargs): + if isinstance(obj, str): + obj = c.module(obj, **kwargs) + if cls._obj != None: + return cls._obj + else: + return obj or cls + + def self_destruct(self): + c.kill(self.server_name) + + def self_restart(self): + c.restart(self.server_name) + + @classmethod + def pm2_start(cls, *args, **kwargs): + return c.module('pm2').start(*args, **kwargs) + + @classmethod + def pm2_launch(cls, *args, **kwargs): + return c.module('pm2').launch(*args, **kwargs) + + @classmethod + def restart(cls, name:str, mode:str='pm2', verbose:bool = False, prefix_match:bool = True): + refreshed_modules = getattr(cls, f'{mode}_restart')(name, verbose=verbose, prefix_match=prefix_match) + return refreshed_modules + + def restart_self(self): + """ + Helper function to restart the server + """ + return c.restart(self.server_name) + + update_self = restart_self + + def kill_self(self): + """ + Helper function to kill the server + """ + return c.kill(self.server_name) + + refresh = reset = restart + + @classmethod + def argparse(cls): + parser = argparse.ArgumentParser(description='Argparse for the module') + parser.add_argument('-m', '--m', '--module', '-module', dest='function', help='The function', type=str, default=cls.module_name()) + parser.add_argument('-fn', '--fn', dest='function', help='The function', type=str, default="__init__") + parser.add_argument('-kw', '-kwargs', '--kwargs', dest='kwargs', help='key word arguments to the function', type=str, default="{}") + parser.add_argument('-p', '-params', '--params', dest='params', help='key word arguments to the function', type=str, default="{}") + parser.add_argument('-i','-input', '--input', dest='input', help='key word arguments to the function', type=str, default="{}") + parser.add_argument('-args', '--args', dest='args', help='arguments to the function', type=str, default="[]") + args = parser.parse_args() + args.kwargs = json.loads(args.kwargs.replace("'",'"')) + args.params = json.loads(args.params.replace("'",'"')) + args.inputs = json.loads(args.input.replace("'",'"')) + args.args = json.loads(args.args.replace("'",'"')) + args.fn = args.function + # if you pass in the params, it will override the kwargs + if len(args.params) > 0: + if isinstance(args.params, dict): + args.kwargs = args.params + elif isinstance(args.params, list): + args.args = args.params + else: + raise Exception('Invalid params', args.params) + return args + + @classmethod + def run(cls, name:str = None) -> Any: + is_main = name == '__main__' or name == None or name == cls.__name__ + if not is_main: + return {'success':False, 'message':f'Not main module {name}'} + args = cls.argparse() + if args.function == '__init__': + return cls(*args.args, **args.kwargs) + else: + fn = getattr(cls, args.function) + fn_type = cls.classify_fn(fn) + if fn_type == 'self': + module = cls(*args.args, **args.kwargs) + else: + module = cls + return getattr(module, args.function)(*args.args, **args.kwargs) + + @classmethod + def commit_hash(cls, libpath:str = None): + if libpath == None: + libpath = c.libpath + return c.cmd('git rev-parse HEAD', cwd=libpath, verbose=False).split('\n')[0].strip() + + @classmethod + def commit_ticket(cls, **kwargs): + commit_hash = cls.commit_hash() + ticket = c.ticket(commit_hash, **kwargs) + assert c.verify(ticket) + return ticket + + @classmethod + def module_fn(cls, module:str, fn:str , args:list = None, kwargs:dict= None): + module = c.module(module) + is_self_method = bool(fn in module.self_functions()) + if is_self_method: + module = module() + fn = getattr(module, fn) + else: + fn = getattr(module, fn) + args = args or [] + kwargs = kwargs or {} + return fn(*args, **kwargs) + + fn = module_fn + + @classmethod + def info_hash(self): + return c.commit_hash() + + @classmethod + def module(cls,module: Any = 'module' , verbose=False, **kwargs): + ''' + Wraps a python class as a module + ''' + t0 = c.time() + module_class = c.get_module(module,**kwargs) + latency = c.time() - t0 + c.print(f'Loaded {module} in {latency} seconds', color='green', verbose=verbose) + return module_class + + + _module = m = mod = module + + # UNDER CONSTRUCTION (USE WITH CAUTION) + + def setattr(self, k, v): + setattr(self, k, v) + + @classmethod + def pip_exists(cls, lib:str, verbose:str=True): + return bool(lib in cls.pip_libs()) + + @classmethod + def version(cls, lib:str=libname): + lines = [l for l in cls.cmd(f'pip3 list', verbose=False).split('\n') if l.startswith(lib)] + if len(lines)>0: + return lines[0].split(' ')[-1].strip() + else: + return f'No Library Found {lib}' + + def forward(self, a=1, b=2): + return a+b + + ### DICT LAND ### + + def to_dict(self)-> Dict: + return self.__dict__ + + @classmethod + def from_dict(cls, input_dict:Dict[str, Any]) -> 'Module': + return cls(**input_dict) + + def to_json(self) -> str: + state_dict = self.to_dict() + assert isinstance(state_dict, dict), 'State dict must be a dictionary' + assert self.jsonable(state_dict), 'State dict must be jsonable' + return json.dumps(state_dict) + + @classmethod + def from_json(cls, json_str:str) -> 'Module': + import json + return cls.from_dict(json.loads(json_str)) + + @classmethod + def test_fns(cls, *args, **kwargs): + return [f for f in cls.functions(*args, **kwargs) if f.startswith('test_')] + + @classmethod + def argv(cls, include_script:bool = False): + import sys + args = sys.argv + if include_script: + return args + else: + return args[1:] + + @classmethod + def is_file_module(cls, module = None) -> bool: + if module != None: + cls = c.module(module) + dirpath = cls.dirpath() + filepath = cls.filepath() + return bool(dirpath.split('/')[-1] != filepath.split('/')[-1].split('.')[0]) + + @classmethod + def is_folder_module(cls, module = None) -> bool: + if module != None: + cls = c.module(module) + return not cls.is_file_module() + + is_module_folder = is_folder_module + + @classmethod + def get_key(cls,key:str = None ,mode='commune', **kwargs) -> None: + mode2module = { + 'commune': 'key', + 'subspace': 'subspace.key', + 'substrate': 'web3.account.substrate', + 'evm': 'web3.account.evm', + 'aes': 'key.aes', + } + + key = cls.resolve_keypath(key) + if 'Keypair' in c.type_str(key): + return key + module = c.module(mode2module[mode]) + if hasattr(module, 'get_key'): + key = module.get_key(key, **kwargs) + else: + key = module(key, **kwargs) + + return key + + @classmethod + def id(self): + return self.key.ss58_address + + @property + def ss58_address(self): + if not hasattr(self, '_ss58_address'): + self._ss58_address = self.key.ss58_address + return self._ss58_address + + @ss58_address.setter + def ss58_address(self, value): + self._ss58_address = value + return self._ss58_address + + @classmethod + def readme_paths(cls): + readme_paths = [f for f in c.ls(cls.dirpath()) if f.endswith('md')] + return readme_paths + + @classmethod + def has_readme(cls): + return len(cls.readme_paths()) > 0 + + @classmethod + def readme(cls) -> str: + readme_paths = cls.readme_paths() + if len(readme_paths) == 0: + return '' + return c.get_text(readme_paths[0]) + + @classmethod + def encrypt(cls, + data: Union[str, bytes], + key: str = None, + password: str = None, + **kwargs + ) -> bytes: + """ + encrypt data with key + """ + key = c.get_key(key) + return key.encrypt(data, password=password,**kwargs) + + @classmethod + def decrypt(cls, + data: Union[str, bytes], + key: str = None, + password : str = None, + **kwargs) -> bytes: + key = c.get_key(key) + return key.decrypt(data, password=password, **kwargs) + + @classmethod + def type_str(cls, x): + return type(x).__name__ + + @classmethod + def keys(cls, search = None, ss58=False,*args, **kwargs): + if search == None: + search = cls.module_name() + if search == 'module': + search = None + keys = c.module('key').keys(search, *args, **kwargs) + if ss58: + keys = [c.get_key_address(k) for k in keys] + return keys + + @classmethod + def get_mem(cls, *args, **kwargs): + return c.module('key').get_mem(*args, **kwargs) + + mem = get_mem + + @classmethod + def set_key(self, key:str, **kwargs) -> None: + key = self.get_key(key) + self.key = key + return key + + @classmethod + def resolve_keypath(cls, key = None): + if key == None: + key = cls.module_name() + return key + + def resolve_key(self, key: str = None) -> str: + if key == None: + if hasattr(self, 'key'): + key = self.key + key = self.resolve_keypath(key) + key = self.get_key(key) + return key + + def sign(self, data:dict = None, key: str = None, **kwargs) -> bool: + return self.resolve_key(key).sign(data, **kwargs) + + @classmethod + def verify(cls, auth, key=None, **kwargs ) -> bool: + return c.get_key(key).verify(auth, **kwargs) + + @classmethod + def verify_ticket(cls, auth, key=None, **kwargs ) -> bool: + return c.get_key(key).verify_ticket(auth, **kwargs) + + @classmethod + def start(cls, *args, **kwargs): + return cls(*args, **kwargs) + + def remove_user(self, key: str) -> None: + if not hasattr(self, 'users'): + self.users = [] + self.users.pop(key, None) + + @classmethod + def is_pwd(cls, module:str = None): + if module != None: + module = c.module(module) + else: + module = cls + return module.dirpath() == c.pwd() + + + @classmethod + def shortcuts(cls, cache=True) -> Dict[str, str]: + return c.get_yaml(f'{cls.dirpath()}/shortcuts.yaml') + + def __repr__(self) -> str: + return f'<{self.class_name()}' + def __str__(self) -> str: + return f'<{self.class_name()}' + + + @classmethod + def get_commune(cls): + from commune import c + return c + + def pull(self): + return c.cmd('git pull', verbose=True, cwd=c.libpath) + + def push(self, msg:str = 'update'): + c.cmd('git add .', verbose=True, cwd=c.libpath) + c.cmd(f'git commit -m "{msg}"', verbose=True, cwd=c.libpath) + return c.cmd('git push', verbose=True, cwd=c.libpath) + @classmethod + def base_config(cls, cache=True): + if cache and hasattr(cls, '_base_config'): + return cls._base_config + cls._base_config = cls.get_yaml(cls.config_path()) + return cls._base_config + + @classmethod + def local_config(cls, filename_options = ['module', 'commune', 'config', 'cfg'], cache=True): + if cache and hasattr(cls, '_local_config'): + return cls._local_config + local_config = {} + for filename in filename_options: + if os.path.exists(f'./{filename}.yaml'): + local_config = cls.get_yaml(f'./{filename}.yaml') + if local_config != None: + break + cls._local_config = local_config + return cls._local_config + + @classmethod + def local_module(cls, filename_options = ['module', 'agent', 'block'], cache=True): + for filename in filename_options: + path = os.path.dirname(f'./{filename}.py') + for filename in filename_options: + if os.path.exists(path): + classes = cls.find_classes(path) + if len(classes) > 0: + return classes[-1] + return None + + # local update + @classmethod + def update(cls, + module = None, + namespace: bool = False, + subspace: bool = False, + network: str = 'local', + **kwargs + ): + responses = [] + if module != None: + return c.module(module).update() + # update local namespace + if namespace: + responses.append(c.namespace(network=network, update=True)) + return {'success': True, 'responses': responses} + + @classmethod + def set_key(self, key:str, **kwargs) -> None: + key = self.get_key(key) + self.key = key + return key + + @classmethod + def resolve_keypath(cls, key = None): + if key == None: + key = cls.module_name() + return key + + def sign(self, data:dict = None, key: str = None, **kwargs) -> bool: + key = self.resolve_key(key) + signature = key.sign(data, **kwargs) + return signature + + def logs(self, name:str = None, verbose: bool = False): + return c.pm2_logs(name, verbose=verbose) + + def hardware(self, *args, **kwargs): + return c.obj('commune.utils.os.hardware')(*args, **kwargs) + + def set_params(self,*args, **kwargs): + return self.set_config(*args, **kwargs) + + def init_module(self,*args, **kwargs): + return self.set_config(*args, **kwargs) + + + + + helper_functions = ['info', + 'metadata', + 'schema', + 'server_name', + 'is_admin', + 'namespace', + 'whitelist', + 'endpoints', + 'forward', + 'module_name', + 'class_name', + 'name', + 'address', + 'fns'] # whitelist of helper functions to load + + def add_endpoint(self, name, fn): + setattr(self, name, fn) + self.endpoints.append(name) + assert hasattr(self, name), f'{name} not added to {self.__class__.__name__}' + return {'success':True, 'message':f'Added {fn} to {self.__class__.__name__}'} + + def is_endpoint(self, fn) -> bool: + if isinstance(fn, str): + fn = getattr(self, fn) + return hasattr(fn, '__metadata__') + + def get_endpoints(self, search: str =None , helper_fn_attributes = ['helper_functions', + 'whitelist', + '_endpoints', + '__endpoints___']): + endpoints = [] + for k in helper_fn_attributes: + if hasattr(self, k): + fn_obj = getattr(self, k) + if callable(fn_obj): + endpoints += fn_obj() + else: + endpoints += fn_obj + for f in dir(self): + try: + if not callable(getattr(self, f)) or (search != None and search not in f): + continue + fn_obj = getattr(self, f) # you need to watchout for properties + is_endpoint = hasattr(fn_obj, '__metadata__') + if is_endpoint: + endpoints.append(f) + except Exception as e: + print(f'Error in get_endpoints: {e} for {f}') + return sorted(list(set(endpoints))) + + endpoints = get_endpoints + + + def cost_fn(self, fn:str, args:list, kwargs:dict): + return 1 + + @classmethod + def endpoint(cls, + cost=1, # cost per call + user2rate : dict = None, + rate_limit : int = 100, # calls per minute + timestale : int = 60, + public:bool = False, + cost_keys = ['cost', 'w', 'weight'], + **kwargs): + + for k in cost_keys: + if k in kwargs: + cost = kwargs[k] + break + + def decorator_fn(fn): + metadata = { + **cls.fn_schema(fn), + 'cost': cost, + 'rate_limit': rate_limit, + 'user2rate': user2rate, + 'timestale': timestale, + 'public': public, + } + import commune as c + fn.__dict__['__metadata__'] = metadata + + return fn + + return decorator_fn + + + + def metadata(self, to_string=False): + if hasattr(self, '_metadata'): + return self._metadata + metadata = {} + metadata['schema'] = self.schema() + metadata['description'] = self.description + metadata['urls'] = {k: v for k,v in self.urls.items() if v != None} + if to_string: + return self.python2str(metadata) + self._metadata = metadata + return metadata + + def info(self , + module = None, + lite_features = ['name', 'address', 'schema', 'key', 'description'], + lite = True, + cost = False, + **kwargs + ) -> Dict[str, Any]: + ''' + hey, whadup hey how is it going + ''' + info = self.metadata() + info['name'] = self.server_name or self.module_name() + info['address'] = self.address + info['key'] = self.key.ss58_address + return info + + @classmethod + def is_public(cls, fn): + if not cls.is_endpoint(fn): + return False + return getattr(fn, '__metadata__')['public'] + + + urls = {'github': None, + 'website': None, + 'docs': None, + 'twitter': None, + 'discord': None, + 'telegram': None, + 'linkedin': None, + 'email': None} + + + + def schema(self, + search = None, + docs: bool = True, + defaults:bool = True, + cache=True) -> 'Schema': + if self.is_str_fn(search): + return self.fn_schema(search, docs=docs, defaults=defaults) + schema = {} + if cache and self._schema != None: + return self._schema + fns = self.get_endpoints() + for fn in fns: + if search != None and search not in fn: + continue + if callable(getattr(self, fn )): + schema[fn] = self.fn_schema(fn, defaults=defaults,docs=docs) + # sort by keys + schema = dict(sorted(schema.items())) + if cache: + self._schema = schema + + return schema + + @classmethod + def has_routes(cls): + return cls.config().get('routes') is not None + + @classmethod + def util_functions(cls, search=None): + utils = c.find_functions(c.root_path + '/utils') + if search != None: + utils = [u for u in utils if search in u] + return utils + + def util_modules(self, search=None): + return sorted(list(set([f.split('.')[-2] for f in self.util_functions(search)]))) + + utils = util_functions + @classmethod + def util2path(cls): + util_functions = cls.util_functions() + util2path = {} + for f in util_functions: + util2path[f.split('.')[-1]] = f + return util2path + + + @classmethod + def add_utils(cls, obj=None): + obj = obj or cls + from functools import partial + utils = obj.util2path() + def wrapper_fn2(fn, *args, **kwargs): + try: + fn = c.import_object(fn) + return fn(*args, **kwargs) + except : + fn = fn.split('.')[-1] + return getattr(c, fn)(*args, **kwargs) + for k, fn in utils.items(): + print(fn) + setattr(obj, k, partial(wrapper_fn2, fn)) + return {'success': True, 'message': 'added utils'} + route_cache = None + + + @classmethod + def routes(cls, cache=True): + if cls.route_cache is not None and cache: + return cls.route_cache + routes_path = os.path.dirname(__file__)+ '/routes.yaml' + routes = cls.get_yaml(routes_path) + cls.route_cache = routes + return routes + + #### THE FINAL TOUCH , ROUTE ALL OF THE MODULES TO THE CURRENT MODULE BASED ON THE routes CONFIG + + + @classmethod + def route_fns(cls): + routes = cls.routes() + route_fns = [] + for module, fns in routes.items(): + for fn in fns: + if isinstance(fn, dict): + fn = fn['to'] + elif isinstance(fn, list): + fn = fn[1] + elif isinstance(fn, str): + fn + else: + raise ValueError(f'Invalid route {fn}') + route_fns.append(fn) + return route_fns + + + @staticmethod + def resolve_to_from_fn_routes(fn): + ''' + resolve the from and to function names from the routes + option 1: + {fn: 'fn_name', name: 'name_in_current_module'} + option 2: + {from: 'fn_name', to: 'name_in_current_module'} + ''' + + if type(fn) in [list, set, tuple] and len(fn) == 2: + # option 1: ['fn_name', 'name_in_current_module'] + from_fn = fn[0] + to_fn = fn[1] + elif isinstance(fn, dict) and all([k in fn for k in ['fn', 'name']]): + if 'fn' in fn and 'name' in fn: + to_fn = fn['name'] + from_fn = fn['fn'] + elif 'from' in fn and 'to' in fn: + from_fn = fn['from'] + to_fn = fn['to'] + else: + from_fn = fn + to_fn = fn + + return from_fn, to_fn + + + @classmethod + def enable_routes(cls, routes:dict=None, verbose=False): + from functools import partial + """ + This ties other modules into the current module. + The way it works is that it takes the module name and the function name and creates a partial function that is bound to the module. + This allows you to call the function as if it were a method of the current module. + for example + """ + my_path = cls.class_name() + if not hasattr(cls, 'routes_enabled'): + cls.routes_enabled = False + + t0 = cls.time() + + # WARNING : THE PLACE HOLDERS MUST NOT INTERFERE WITH THE KWARGS OTHERWISE IT WILL CAUSE A BUG IF THE KWARGS ARE THE SAME AS THE PLACEHOLDERS + # THE PLACEHOLDERS ARE NAMED AS module_ph and fn_ph AND WILL UNLIKELY INTERFERE WITH THE KWARGS + def fn_generator( *args, module_ph, fn_ph, **kwargs): + module_ph = cls.module(module_ph) + fn_type = module_ph.classify_fn(fn_ph) + module_ph = module_ph() if fn_type == 'self' else module_ph + return getattr(module_ph, fn_ph)(*args, **kwargs) + + if routes == None: + if not hasattr(cls, 'routes'): + return {'success': False, 'msg': 'routes not found'} + routes = cls.routes() if callable(cls.routes) else cls.routes + for m, fns in routes.items(): + if fns in ['all', '*']: + fns = c.functions(m) + for fn in fns: + # resolve the from and to function names + from_fn, to_fn = cls.resolve_to_from_fn_routes(fn) + # create a partial function that is bound to the module + fn_obj = partial(fn_generator, fn_ph=from_fn, module_ph=m ) + # make sure the funciton is as close to the original function as possible + fn_obj.__name__ = to_fn + # set the function to the current module + setattr(cls, to_fn, fn_obj) + cls.print(f'ROUTE({m}.{fn} -> {my_path}:{fn})', verbose=verbose) + + t1 = cls.time() + cls.print(f'enabled routes in {t1-t0} seconds', verbose=verbose) + cls.routes_enabled = True + return {'success': True, 'msg': 'enabled routes'} + + @classmethod + def fn2module(cls): + ''' + get the module of a function + ''' + routes = cls.routes() + fn2module = {} + for module, fn_routes in routes.items(): + for fn_route in fn_routes: + if isinstance(fn_route, dict): + fn_route = fn_route['to'] + elif isinstance(fn_route, list): + fn_route = fn_route[1] + fn2module[fn_route] = module + return fn2module + + def is_route(cls, fn): + ''' + check if a function is a route + ''' + return fn in cls.fn2module() + + + + @classmethod + def has_test_module(cls, module=None): + module = module or cls.module_name() + return cls.module_exists(cls.module_name() + '.test') + + @classmethod + def test(cls, + module=None, + timeout=42, + trials=3, + parallel=True, + ): + module = module or cls.module_name() + + if c.module_exists( module + '.test'): + module = module + '.test' + print(f'testing {module}') + module = c.module(module)() + test_fns = module.test_fns() + + def trial_wrapper(fn, trials=trials): + def trial_fn(trials=trials): + + for i in range(trials): + try: + return fn() + except Exception as e: + print(f'Error: {e}, Retrying {i}/{trials}') + cls.sleep(1) + return False + return trial_fn + fn2result = {} + if parallel: + future2fn = {} + for fn in test_fns: + f = cls.submit(trial_wrapper(getattr(module, fn)), timeout=timeout) + future2fn[f] = fn + for f in cls.as_completed(future2fn, timeout=timeout): + fn = future2fn.pop(f) + fn2result[fn] = f.result() + else: + for fn in self.test_fns(): + print(f'testing {fn}') + fn2result[fn] = trial_wrapper(getattr(self, fn))() + return fn2result + + + @classmethod + def add_to_globals(cls, globals_input:dict = None): + from functools import partial + globals_input = globals_input or {} + for k,v in c.__dict__.items(): + globals_input[k] = v + + for f in c.class_functions() + c.static_functions(): + globals_input[f] = getattr(c, f) + + for f in c.self_functions(): + def wrapper_fn(f, *args, **kwargs): + try: + fn = getattr(Module(), f) + except: + fn = getattr(Module, f) + return fn(*args, **kwargs) + + globals_input[f] = partial(wrapper_fn, f) + + return globals_input + + + + @classmethod + def critical(cls, *args, **kwargs): + console = cls.resolve_console() + return console.critical(*args, **kwargs) + + @classmethod + def resolve_console(cls, console = None, **kwargs): + if hasattr(cls,'console'): + return cls.console + import logging + from rich.logging import RichHandler + from rich.console import Console + logging.basicConfig( handlers=[RichHandler()]) + # print the line number + console = Console() + cls.console = console + return console + + @classmethod + def print(cls, *text:str, + color:str=None, + verbose:bool = True, + console: 'Console' = None, + flush:bool = False, + buffer:str = None, + **kwargs): + + if not verbose: + return + if color == 'random': + color = cls.random_color() + if color: + kwargs['style'] = color + + if buffer != None: + text = [buffer] + list(text) + [buffer] + + console = cls.resolve_console(console) + try: + if flush: + console.print(**kwargs, end='\r') + console.print(*text, **kwargs) + except Exception as e: + print(e) + @classmethod + def success(cls, *args, **kwargs): + logger = cls.resolve_logger() + return logger.success(*args, **kwargs) + + @classmethod + def error(cls, *args, **kwargs): + logger = cls.resolve_logger() + return logger.error(*args, **kwargs) + + @classmethod + def debug(cls, *args, **kwargs): + logger = cls.resolve_logger() + return logger.debug(*args, **kwargs) + + @classmethod + def warning(cls, *args, **kwargs): + logger = cls.resolve_logger() + return logger.warning(*args, **kwargs) + @classmethod + def status(cls, *args, **kwargs): + console = cls.resolve_console() + return console.status(*args, **kwargs) + @classmethod + def log(cls, *args, **kwargs): + console = cls.resolve_console() + return console.log(*args, **kwargs) + + ### LOGGER LAND ### + @classmethod + def resolve_logger(cls, logger = None): + if not hasattr(cls,'logger'): + from loguru import logger + cls.logger = logger.opt(colors=True) + if logger is not None: + cls.logger = logger + return cls.logger + + @staticmethod + def echo(x): + return x + + + + @classmethod + def check_pid(cls, pid): + """ Check For the existence of a unix pid. """ + try: + os.kill(pid, 0) + except OSError: + return False + else: + return True + @staticmethod + def kill_process(pid): + import signal + if isinstance(pid, str): + pid = int(pid) + + os.kill(pid, signal.SIGKILL) + + @classmethod + def path_exists(cls, path:str): + return os.path.exists(path) + + @classmethod + def ensure_path(cls, path): + """ + ensures a dir_path exists, otherwise, it will create it + """ + + dir_path = os.path.dirname(path) + if not os.path.isdir(dir_path): + os.makedirs(dir_path, exist_ok=True) + + return path + + + @staticmethod + def seed_everything(seed: int) -> None: + import torch, random + import numpy as np + "seeding function for reproducibility" + random.seed(seed) + os.environ["PYTHONHASHSEED"] = str(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.backends.cudnn.deterministic = True + + @staticmethod + def cpu_count(): + return os.cpu_count() + + num_cpus = cpu_count + + @staticmethod + def get_env(key:str): + return os.environ.get(key) + + @staticmethod + def set_env(key:str, value:str): + os.environ[key] = value + return {'success': True, 'key': key, 'value': value} + + @staticmethod + def get_cwd(): + return os.getcwd() + + @staticmethod + def set_cwd(path:str): + return os.chdir(path) + + + @staticmethod + def get_pid(): + return os.getpid() + + @classmethod + def memory_usage_info(cls, fmt='gb'): + import psutil + process = psutil.Process(os.getpid()) + memory_info = process.memory_info() + response = { + 'rss': memory_info.rss, + 'vms': memory_info.vms, + 'pageins' : memory_info.pageins, + 'pfaults': memory_info.pfaults, + } + + + for key, value in response.items(): + response[key] = cls.format_data_size(value, fmt=fmt) + + return response + + + + @classmethod + def memory_info(cls, fmt='gb'): + import psutil + + """ + Returns the current memory usage and total memory of the system. + """ + # Get memory statistics + memory_stats = psutil.virtual_memory() + + # Total memory in the system + response = { + 'total': memory_stats.total, + 'available': memory_stats.available, + 'used': memory_stats.total - memory_stats.available, + 'free': memory_stats.available, + 'active': memory_stats.active, + 'inactive': memory_stats.inactive, + 'percent': memory_stats.percent, + 'ratio': memory_stats.percent/100, + } + + for key, value in response.items(): + if key in ['percent', 'ratio']: + continue + response[key] = cls.format_data_size(value, fmt=fmt) + + return response + + @classmethod + def virtual_memory_available(cls): + import psutil + return psutil.virtual_memory().available + + @classmethod + def virtual_memory_total(cls): + import psutil + return psutil.virtual_memory().total + + @classmethod + def virtual_memory_percent(cls): + import psutil + return psutil.virtual_memory().percent + + @classmethod + def cpu_type(cls): + import platform + return platform.processor() + + @classmethod + def cpu_info(cls): + + return { + 'cpu_count': cls.cpu_count(), + 'cpu_type': cls.cpu_type(), + } + + + def cpu_usage(self): + import psutil + # get the system performance data for the cpu + cpu_usage = psutil.cpu_percent() + return cpu_usage + + + + @classmethod + def gpu_memory(cls): + import torch + return torch.cuda.memory_allocated() + + @classmethod + def num_gpus(cls): + import torch + return torch.cuda.device_count() + + + @classmethod + def gpus(cls): + return list(range(cls.num_gpus())) + + def add_rsa_key(cls, b=2048, t='rsa'): + return cls.cmd(f"ssh-keygen -b {b} -t {t}") + + + @classmethod + def stream_output(cls, process, verbose=False): + try: + modes = ['stdout', 'stderr'] + for mode in modes: + pipe = getattr(process, mode) + if pipe == None: + continue + for line in iter(pipe.readline, b''): + line = line.decode('utf-8') + if verbose: + cls.print(line[:-1]) + yield line + except Exception as e: + print(e) + pass + + cls.kill_process(process) + + @classmethod + def cmd(cls, + command:Union[str, list], + *args, + verbose:bool = False , + env:Dict[str, str] = {}, + sudo:bool = False, + password: bool = None, + bash : bool = False, + return_process: bool = False, + generator: bool = False, + color : str = 'white', + cwd : str = None, + **kwargs) -> 'subprocess.Popen': + + ''' + Runs a command in the shell. + + ''' + + if len(args) > 0: + command = ' '.join([command] + list(args)) + + + if password != None: + sudo = True + + if sudo: + command = f'sudo {command}' + + + if bash: + command = f'bash -c "{command}"' + + cwd = cls.resolve_path(cwd) + + env = {**os.environ, **env} + + process = subprocess.Popen(shlex.split(command), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + cwd = cwd, + env=env, **kwargs) + if return_process: + return process + streamer = cls.stream_output(process, verbose=verbose) + if generator: + return streamer + else: + text = '' + for ch in streamer: + text += ch + return text + + @staticmethod + def kill_process(process): + import signal + process_id = process.pid + process.stdout.close() + process.send_signal(signal.SIGINT) + process.wait() + return {'success': True, 'msg': 'process killed', 'pid': process_id} + # sys.exit(0) + + @staticmethod + def format_data_size(x: Union[int, float], fmt:str='b', prettify:bool=False): + assert type(x) in [int, float, str], f'x must be int or float, not {type(x)}' + x = float(x) + fmt2scale = { + 'b': 1, + 'kb': 1000, + 'mb': 1000**2, + 'gb': 1000**3, + 'GiB': 1024**3, + 'tb': 1000**4, + } + + assert fmt in fmt2scale.keys(), f'fmt must be one of {fmt2scale.keys()}' + scale = fmt2scale[fmt] + x = x/scale + + return x + + + @classmethod + def disk_info(cls, path:str = '/', fmt:str='gb'): + path = cls.resolve_path(path) + import shutil + response = shutil.disk_usage(path) + response = { + 'total': response.total, + 'used': response.used, + 'free': response.free, + } + for key, value in response.items(): + response[key] = cls.format_data_size(value, fmt=fmt) + return response + + + @classmethod + def mv(cls, path1, path2): + + assert os.path.exists(path1), path1 + if not os.path.isdir(path2): + path2_dirpath = os.path.dirname(path2) + if not os.path.isdir(path2_dirpath): + os.makedirs(path2_dirpath, exist_ok=True) + shutil.move(path1, path2) + assert os.path.exists(path2), path2 + assert not os.path.exists(path1), path1 + return path2 + + + @classmethod + def cp(cls, path1:str, path2:str, refresh:bool = False): + import shutil + # what if its a folder? + assert os.path.exists(path1), path1 + if refresh == False: + assert not os.path.exists(path2), path2 + + path2_dirpath = os.path.dirname(path2) + if not os.path.isdir(path2_dirpath): + os.makedirs(path2_dirpath, exist_ok=True) + assert os.path.isdir(path2_dirpath), f'Failed to create directory {path2_dirpath}' + + if os.path.isdir(path1): + shutil.copytree(path1, path2) + + + elif os.path.isfile(path1): + + shutil.copy(path1, path2) + else: + raise ValueError(f'path1 is not a file or a folder: {path1}') + return path2 + + + @classmethod + def cuda_available(cls) -> bool: + import torch + return torch.cuda.is_available() + + @classmethod + def free_gpu_memory(cls): + gpu_info = cls.gpu_info() + return {gpu_id: gpu_info['free'] for gpu_id, gpu_info in gpu_info.items()} + + def most_used_gpu(self): + most_used_gpu = max(self.free_gpu_memory().items(), key=lambda x: x[1])[0] + return most_used_gpu + + def most_used_gpu_memory(self): + most_used_gpu = max(self.free_gpu_memory().items(), key=lambda x: x[1])[1] + return most_used_gpu + + + def least_used_gpu(self): + least_used_gpu = min(self.free_gpu_memory().items(), key=lambda x: x[1])[0] + return least_used_gpu + + def least_used_gpu_memory(self): + least_used_gpu = min(self.free_gpu_memory().items(), key=lambda x: x[1])[1] + return least_used_gpu + + + + + @classmethod + def gpu_info(cls, fmt='gb') -> Dict[int, Dict[str, float]]: + import torch + gpu_info = {} + for gpu_id in cls.gpus(): + mem_info = torch.cuda.mem_get_info(gpu_id) + gpu_info[int(gpu_id)] = { + 'name': torch.cuda.get_device_name(gpu_id), + 'free': mem_info[0], + 'used': (mem_info[1]- mem_info[0]), + 'total': mem_info[1], + 'ratio': mem_info[0]/mem_info[1], + } + + gpu_info_map = {} + + skip_keys = ['ratio', 'total', 'name'] + + for gpu_id, gpu_info in gpu_info.items(): + for key, value in gpu_info.items(): + if key in skip_keys: + continue + gpu_info[key] = cls.format_data_size(value, fmt=fmt) + gpu_info_map[gpu_id] = gpu_info + return gpu_info_map + + + gpu_map =gpu_info + + @classmethod + def hardware(cls, fmt:str='gb'): + return { + 'cpu': cls.cpu_info(), + 'memory': cls.memory_info(fmt=fmt), + 'disk': cls.disk_info(fmt=fmt), + 'gpu': cls.gpu_info(fmt=fmt), + } + + + @classmethod + def get_folder_size(cls, folder_path:str='/'): + folder_path = cls.resolve_path(folder_path) + """Calculate the total size of all files in the folder.""" + total_size = 0 + for root, dirs, files in os.walk(folder_path): + for file in files: + file_path = os.path.join(root, file) + if not os.path.islink(file_path): + total_size += os.path.getsize(file_path) + return total_size + + @classmethod + def find_largest_folder(cls, directory: str = '~/'): + directory = cls.resolve_path(directory) + """Find the largest folder in the given directory.""" + largest_size = 0 + largest_folder = "" + + for folder_name in os.listdir(directory): + folder_path = os.path.join(directory, folder_name) + if os.path.isdir(folder_path): + folder_size = cls.get_folder_size(folder_path) + if folder_size > largest_size: + largest_size = folder_size + largest_folder = folder_path + + return largest_folder, largest_size + + + @classmethod + def getcwd(*args, **kwargs): + return os.getcwd(*args, **kwargs) + + + @classmethod + def argv(cls, include_script:bool = False): + args = sys.argv + if include_script: + return args + else: + return args[1:] + + @classmethod + def mv(cls, path1, path2): + assert os.path.exists(path1), path1 + if not os.path.isdir(path2): + path2_dirpath = os.path.dirname(path2) + if not os.path.isdir(path2_dirpath): + os.makedirs(path2_dirpath, exist_ok=True) + shutil.move(path1, path2) + assert os.path.exists(path2), path2 + assert not os.path.exists(path1), path1 + return {'success': True, 'msg': f'Moved {path1} to {path2}'} + + @classmethod + def sys_path(cls): + return sys.path + + @classmethod + def gc(cls): + gc.collect() + return {'success': True, 'msg': 'garbage collected'} + + @staticmethod + def get_pid(): + return os.getpid() + + @classmethod + def nest_asyncio(cls): + import nest_asyncio + nest_asyncio.apply() + + @staticmethod + def memory_usage(fmt='gb'): + fmt2scale = {'b': 1e0, 'kb': 1e1, 'mb': 1e3, 'gb': 1e6} + import psutil + process = psutil.Process() + scale = fmt2scale.get(fmt) + return (process.memory_info().rss // 1024) / scale + + @classmethod + def get_env(cls, key:str)-> None: + ''' + Pay attention to this function. It sets the environment variable + ''' + return os.environ[key] + + env = get_env + + + def set_config(self, config:Optional[Union[str, dict]]=None ) -> 'Munch': + ''' + Set the config as well as its local params + ''' + # in case they passed in a locals() dict, we want to resolve the kwargs and avoid ambiguous args + config = config or {} + config = {**self.config(), **config} + if isinstance(config, dict): + config = c.dict2munch(config) + self.config = config + return self.config + + + + def config_exists(self, path:str=None) -> bool: + ''' + Returns true if the config exists + ''' + path = path if path else self.config_path() + return self.path_exists(path) + + + @classmethod + def config(cls) -> 'Munch': + ''' + Returns the config + ''' + config = cls.load_config() + if not config: + if hasattr(cls, 'init_kwargs'): + config = cls.init_kwargs() # from _schema.py + else: + config = {} + return config + + + @classmethod + def load_config(cls, path:str=None, + default=None, + to_munch:bool = True + ) -> Union['Munch', Dict]: + ''' + Args: + path: The path to the config file + to_munch: If true, then convert the config to a munch + ''' + + default = default or {} + path = path if path else cls.config_path() + + if os.path.exists(path): + config = cls.load_yaml(path) + else: + config = default + config = config or {} + if to_munch: + config = cls.dict2munch(config) + return config + + @classmethod + def save_config(cls, config:Union['Munch', Dict]= None, path:str=None) -> 'Munch': + from copy import deepcopy + from munch import Munch + + ''' + Saves the config to a yaml file + ''' + if config == None: + config = cls.config() + + if isinstance(config, Munch): + config = cls.munch2dict(deepcopy(config)) + elif isinstance(config, dict): + config = deepcopy(config) + else: + raise ValueError(f'config must be a dict or munch, not {type(config)}') + + assert isinstance(config, dict), f'config must be a dict, not {config}' + + config = cls.save_yaml(data=config , path=path) + + return config + + @classmethod + def munch(cls, x:dict, recursive:bool=True)-> 'Munch': + from munch import Munch + ''' + Turn dictionary into Munch + ''' + if isinstance(x, dict): + for k,v in x.items(): + if isinstance(v, dict) and recursive: + x[k] = cls.dict2munch(v) + x = Munch(x) + return x + + + dict2munch = munch + + @classmethod + def munch2dict(cls, x:'Munch', recursive:bool=True)-> dict: + from munch import Munch + + ''' + Turn munch object into dictionary + ''' + if isinstance(x, Munch): + x = dict(x) + for k,v in x.items(): + if isinstance(v, Munch) and recursive: + x[k] = cls.munch2dict(v) + return x + to_dict = munch2dict + + + @classmethod + def has_config(cls) -> bool: + + try: + return os.path.exists(cls.config_path()) + except: + return False + + @classmethod + def config_path(cls) -> str: + return os.path.abspath('./config.yaml') + + def update_config(self, config): + self.config.update(config) + return self.config + + @classmethod + def base_config(cls, cache=True): + if cache and hasattr(cls, '_base_config'): + return cls._base_config + cls._base_config = cls.get_yaml(cls.config_path()) + return cls._base_config + + + default_port_range = [50050, 50150] # the port range between 50050 and 50150 + + @staticmethod + def int_to_ip(int_val: int) -> str: + r""" Maps an integer to a unique ip-string + Args: + int_val (:type:`int128`, `required`): + The integer representation of an ip. Must be in the range (0, 3.4028237e+38). + + Returns: + str_val (:tyep:`str`, `required): + The string representation of an ip. Of form *.*.*.* for ipv4 or *::*:*:*:* for ipv6 + + Raises: + netaddr.core.AddrFormatError (Exception): + Raised when the passed int_vals is not a valid ip int value. + """ + import netaddr + return str(netaddr.IPAddress(int_val)) + + @staticmethod + def ip_to_int(str_val: str) -> int: + r""" Maps an ip-string to a unique integer. + arg: + str_val (:tyep:`str`, `required): + The string representation of an ip. Of form *.*.*.* for ipv4 or *::*:*:*:* for ipv6 + + Returns: + int_val (:type:`int128`, `required`): + The integer representation of an ip. Must be in the range (0, 3.4028237e+38). + + Raises: + netaddr.core.AddrFormatError (Exception): + Raised when the passed str_val is not a valid ip string value. + """ + return int(netaddr.IPAddress(str_val)) + + @staticmethod + def ip_version(str_val: str) -> int: + r""" Returns the ip version (IPV4 or IPV6). + arg: + str_val (:tyep:`str`, `required): + The string representation of an ip. Of form *.*.*.* for ipv4 or *::*:*:*:* for ipv6 + + Returns: + int_val (:type:`int128`, `required`): + The ip version (Either 4 or 6 for IPv4/IPv6) + + Raises: + netaddr.core.AddrFormatError (Exception): + Raised when the passed str_val is not a valid ip string value. + """ + return int(netaddr.IPAddress(str_val).version) + + @staticmethod + def ip__str__(ip_type:int, ip_str:str, port:int): + """ Return a formatted ip string + """ + return "/ipv%i/%s:%i" % (ip_type, ip_str, port) + + @classmethod + def is_valid_ip(cls, ip:str) -> bool: + r""" Checks if an ip is valid. + Args: + ip (:obj:`str` `required`): + The ip to check. + + Returns: + valid (:obj:`bool` `required`): + True if the ip is valid, False otherwise. + """ + try: + netaddr.IPAddress(ip) + return True + except Exception as e: + return False + + @classmethod + def external_ip(cls, default_ip='0.0.0.0') -> str: + r""" Checks CURL/URLLIB/IPIFY/AWS for your external ip. + Returns: + external_ip (:obj:`str` `required`): + Your routers external facing ip as a string. + + Raises: + Exception(Exception): + Raised if all external ip attempts fail. + """ + # --- Try curl. + + + + ip = None + try: + ip = cls.cmd('curl -s ifconfig.me') + assert isinstance(cls.ip_to_int(ip), int) + except Exception as e: + print(e) + + if cls.is_valid_ip(ip): + return ip + try: + ip = requests.get('https://api.ipify.org').text + assert isinstance(cls.ip_to_int(ip), int) + except Exception as e: + print(e) + + if cls.is_valid_ip(ip): + return ip + # --- Try AWS + try: + ip = requests.get('https://checkip.amazonaws.com').text.strip() + assert isinstance(cls.ip_to_int(ip), int) + except Exception as e: + print(e) + + if cls.is_valid_ip(ip): + return ip + # --- Try myip.dnsomatic + try: + process = os.popen('curl -s myip.dnsomatic.com') + ip = process.readline() + assert isinstance(cls.ip_to_int(ip), int) + process.close() + except Exception as e: + print(e) + + if cls.is_valid_ip(ip): + return ip + # --- Try urllib ipv6 + try: + ip = urllib.request.urlopen('https://ident.me').read().decode('utf8') + assert isinstance(cls.ip_to_int(ip), int) + except Exception as e: + print(e) + + if cls.is_valid_ip(ip): + return ip + # --- Try Wikipedia + try: + ip = requests.get('https://www.wikipedia.org').headers['X-Client-IP'] + assert isinstance(cls.ip_to_int(ip), int) + except Exception as e: + print(e) + + if cls.is_valid_ip(ip): + return ip + + return default_ip + + @classmethod + def unreserve_port(cls,port:int, + var_path='reserved_ports'): + reserved_ports = cls.get(var_path, {}, root=True) + + port_info = reserved_ports.pop(port,None) + if port_info == None: + port_info = reserved_ports.pop(str(port),None) + + output = {} + if port_info != None: + cls.put(var_path, reserved_ports, root=True) + output['msg'] = 'port removed' + else: + output['msg'] = f'port {port} doesnt exist, so your good' + + output['reserved'] = cls.reserved_ports() + return output + + + + + @classmethod + def unreserve_ports(cls,*ports, + var_path='reserved_ports' ): + reserved_ports = cls.get(var_path, {}) + if len(ports) == 0: + # if zero then do all fam, tehe + ports = list(reserved_ports.keys()) + elif len(ports) == 1 and isinstance(ports[0],list): + ports = ports[0] + ports = list(map(str, ports)) + reserved_ports = {rp:v for rp,v in reserved_ports.items() if not any([p in ports for p in [str(rp), int(rp)]] )} + cls.put(var_path, reserved_ports) + return cls.reserved_ports() + + + @classmethod + def check_used_ports(cls, start_port = 8501, end_port = 8600, timeout=5): + port_range = [start_port, end_port] + used_ports = {} + for port in range(*port_range): + used_ports[port] = cls.port_used(port) + return used_ports + + + @classmethod + def kill_port(cls, port:int): + r""" Kills a process running on the passed port. + Args: + port (:obj:`int` `required`): + The port to kill the process on. + """ + try: + os.system(f'kill -9 $(lsof -t -i:{port})') + except Exception as e: + print(e) + return False + return True + + def kill_ports(self, ports = None, *more_ports): + ports = ports or self.used_ports() + if isinstance(ports, int): + ports = [ports] + if '-' in ports: + ports = list(range([int(p) for p in ports.split('-')])) + ports = list(ports) + list(more_ports) + for port in ports: + self.kill_port(port) + return self.check_used_ports() + + def public_ports(self, timeout=1.0): + import commune as c + futures = [] + for port in self.free_ports(): + c.print(f'Checking port {port}') + futures += [c.submit(self.is_port_open, {'port':port}, timeout=timeout)] + results = c.wait(futures, timeout=timeout) + results = list(map(bool, results)) + return results + + + + def is_port_open(self, port:int, ip:str=None, timeout=0.5): + import commune as c + ip = ip or self.ip() + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + return s.connect_ex((ip, port)) == 0 + return False + + + + @classmethod + def free_ports(cls, n=10, random_selection:bool = False, **kwargs ) -> List[int]: + free_ports = [] + avoid_ports = kwargs.pop('avoid_ports', []) + for i in range(n): + try: + free_ports += [cls.free_port( random_selection=random_selection, + avoid_ports=avoid_ports, **kwargs)] + except Exception as e: + cls.print(f'Error: {e}', color='red') + break + avoid_ports += [free_ports[-1]] + + + return free_ports + + @classmethod + def random_port(cls, *args, **kwargs): + return cls.choice(cls.free_ports(*args, **kwargs)) + + + + + @classmethod + def free_port(cls, + ports = None, + port_range: List[int] = None , + ip:str =None, + avoid_ports = None, + random_selection:bool = True) -> int: + + ''' + + Get an availabldefe port within the {port_range} [start_port, end_poort] and {ip} + ''' + avoid_ports = avoid_ports if avoid_ports else [] + + if ports == None: + port_range = cls.get_port_range(port_range) + ports = list(range(*port_range)) + + ip = ip if ip else cls.default_ip + + if random_selection: + ports = cls.shuffle(ports) + port = None + for port in ports: + if port in avoid_ports: + continue + + if cls.port_available(port=port, ip=ip): + return port + + raise Exception(f'ports {port_range[0]} to {port_range[1]} are occupied, change the port_range to encompase more ports') + + get_available_port = free_port + + + + def check_used_ports(self, start_port = 8501, end_port = 8600, timeout=5): + port_range = [start_port, end_port] + used_ports = {} + for port in range(*port_range): + used_ports[port] = self.port_used(port) + return used_ports + + + + @classmethod + def resolve_port(cls, port:int=None, **kwargs): + + ''' + + Resolves the port and finds one that is available + ''' + if port == None or port == 0: + port = cls.free_port(port, **kwargs) + + if cls.port_used(port): + port = cls.free_port(port, **kwargs) + + return int(port) + + + + @classmethod + def port_available(cls, port:int, ip:str ='0.0.0.0'): + return not cls.port_used(port=port, ip=ip) + + + @classmethod + def port_used(cls, port: int, ip: str = '0.0.0.0', timeout: int = 1): + import socket + if not isinstance(port, int): + return False + + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + # Set the socket timeout + sock.settimeout(timeout) + + # Try to connect to the specified IP and port + try: + port=int(port) + sock.connect((ip, port)) + return True + except socket.error: + return False + + @classmethod + def port_free(cls, *args, **kwargs) -> bool: + return not cls.port_used(*args, **kwargs) + + @classmethod + def port_available(cls, port:int, ip:str ='0.0.0.0'): + return not cls.port_used(port=port, ip=ip) + + + + @classmethod + def used_ports(cls, ports:List[int] = None, ip:str = '0.0.0.0', port_range:Tuple[int, int] = None): + ''' + Get availabel ports out of port range + + Args: + ports: list of ports + ip: ip address + + ''' + port_range = cls.resolve_port_range(port_range=port_range) + if ports == None: + ports = list(range(*port_range)) + + async def check_port(port, ip): + return cls.port_used(port=port, ip=ip) + + used_ports = [] + jobs = [] + for port in ports: + jobs += [check_port(port=port, ip=ip)] + + results = cls.wait(jobs) + for port, result in zip(ports, results): + if isinstance(result, bool) and result: + used_ports += [port] + + return used_ports + + @classmethod + def scan_ports(cls,host=None, start_port=None, end_port=None, timeout=24): + if start_port == None and end_port == None: + start_port, end_port = cls.port_range() + if host == None: + host = cls.external_ip() + import socket + open_ports = [] + future2port = {} + for port in range(start_port, end_port + 1): # ports from start_port to end_port + future2port[cls.submit(cls.port_used, kwargs=dict(port=port, ip=host), timeout=timeout)] = port + port2open = {} + for future in cls.as_completed(future2port, timeout=timeout): + port = future2port[future] + port2open[port] = future.result() + # sort the ports + port2open = {k: v for k, v in sorted(port2open.items(), key=lambda item: item[1])} + + return port2open + + @classmethod + def resolve_port(cls, port:int=None, **kwargs): + ''' + Resolves the port and finds one that is available + ''' + if port == None or port == 0: + port = cls.free_port(port, **kwargs) + if cls.port_used(port): + port = cls.free_port(port, **kwargs) + return int(port) + + @classmethod + def has_free_ports(self, n:int = 1, **kwargs): + return len(self.free_ports(n=n, **kwargs)) > 0 + + + @classmethod + def get_port_range(cls, port_range: list = None) -> list: + port_range = cls.get('port_range', cls.default_port_range) + if isinstance(port_range, str): + port_range = list(map(int, port_range.split('-'))) + if len(port_range) == 0: + port_range = cls.default_port_range + port_range = list(port_range) + assert isinstance(port_range, list), 'Port range must be a list' + assert isinstance(port_range[0], int), 'Port range must be a list of integers' + assert isinstance(port_range[1], int), 'Port range must be a list of integers' + return port_range + + @classmethod + def port_range(cls): + return cls.get_port_range() + + @classmethod + def resolve_port_range(cls, port_range: list = None) -> list: + return cls.get_port_range(port_range) + + @classmethod + def set_port_range(cls, *port_range: list): + if '-' in port_range[0]: + port_range = list(map(int, port_range[0].split('-'))) + if len(port_range) ==0 : + port_range = cls.default_port_range + elif len(port_range) == 1: + if port_range[0] == None: + port_range = cls.default_port_range + assert len(port_range) == 2, 'Port range must be a list of two integers' + for port in port_range: + assert isinstance(port, int), f'Port {port} range must be a list of integers' + assert port_range[0] < port_range[1], 'Port range must be a list of integers' + cls.put('port_range', port_range) + return port_range + + @classmethod + def get_port(cls, port:int = None)->int: + port = port if port is not None and port != 0 else cls.free_port() + while cls.port_used(port): + port += 1 + return port + + @classmethod + def port_free(cls, *args, **kwargs) -> bool: + return not cls.port_used(*args, **kwargs) + + @classmethod + def port_available(cls, port:int, ip:str ='0.0.0.0'): + return not cls.port_used(port=port, ip=ip) + + @classmethod + def used_ports(cls, ports:List[int] = None, ip:str = '0.0.0.0', port_range:Tuple[int, int] = None): + ''' + Get availabel ports out of port range + + Args: + ports: list of ports + ip: ip address + + ''' + port_range = cls.resolve_port_range(port_range=port_range) + if ports == None: + ports = list(range(*port_range)) + + async def check_port(port, ip): + return cls.port_used(port=port, ip=ip) + + used_ports = [] + jobs = [] + for port in ports: + jobs += [check_port(port=port, ip=ip)] + + results = cls.gather(jobs) + for port, result in zip(ports, results): + if isinstance(result, bool) and result: + used_ports += [port] + + return used_ports + + + get_used_ports = used_ports + + @classmethod + def get_available_ports(cls, port_range: List[int] = None , ip:str =None) -> int: + port_range = cls.resolve_port_range(port_range) + ip = ip if ip else cls.default_ip + + available_ports = [] + # return only when the port is available + for port in range(*port_range): + if not cls.port_used(port=port, ip=ip): + available_ports.append(port) + + return available_ports + available_ports = get_available_ports + + @classmethod + def set_ip(cls, ip): + + cls.put('ip', ip) + return ip + + @classmethod + def ip(cls, max_age=None, update:bool = False, **kwargs) -> str: + ip = cls.get('ip', None, max_age=max_age, update=update) + if ip == None: + ip = cls.external_ip(**kwargs) + cls.put('ip', ip) + return ip + + @classmethod + def resolve_address(cls, address:str = None): + if address == None: + address = c.free_address() + assert isinstance(address, str), 'address must be a string' + return address + + @classmethod + def free_address(cls, **kwargs): + return f'{cls.ip()}:{cls.free_port(**kwargs)}' + + @classmethod + def check_used_ports(cls, start_port = 8501, end_port = 8600, timeout=5): + port_range = [start_port, end_port] + used_ports = {} + for port in range(*port_range): + used_ports[port] = cls.port_used(port) + return used_ports + + @classmethod + def resolve_ip(cls, ip=None, external:bool=True) -> str: + if ip == None: + if external: + ip = cls.external_ip() + else: + ip = '0.0.0.0' + assert isinstance(ip, str) + return ip + + + @classmethod + def put_json(cls, + path:str, + data:Dict, + meta = None, + verbose: bool = False, + **kwargs) -> str: + if meta != None: + data = {'data':data, 'meta':meta} + path = cls.resolve_path(path=path, extension='json') + # cls.lock_file(path) + if isinstance(data, dict): + data = json.dumps(data) + cls.put_text(path, data) + return path + + save_json = put_json + + @classmethod + def rm_json(cls, path=None): + from commune.utils.dict import rm_json + if path in ['all', '**']: + return [cls.rm_json(f) for f in cls.glob(files_only=False)] + path = cls.resolve_path(path=path, extension='json') + return rm_json(path ) + + @classmethod + def rmdir(cls, path): + return shutil.rmtree(path) + + @classmethod + def isdir(cls, path): + path = cls.resolve_path(path=path) + return os.path.isdir(path) + + @classmethod + def isfile(cls, path): + path = cls.resolve_path(path=path) + return os.path.isfile(path) + + @classmethod + def rm_all(cls): + for path in cls.ls(): + cls.rm(path) + return {'success':True, 'message':f'{cls.storage_dir()} removed'} + + @classmethod + def rm(cls, path, extension=None, mode = 'json'): + + assert isinstance(path, str), f'path must be a string, got {type(path)}' + path = cls.resolve_path(path=path, extension=extension) + + # incase we want to remove the json file + mode_suffix = f'.{mode}' + if not os.path.exists(path) and os.path.exists(path+mode_suffix): + path += mode_suffix + + if not os.path.exists(path): + return {'success':False, 'message':f'{path} does not exist'} + if os.path.isdir(path): + cls.rmdir(path) + if os.path.isfile(path): + os.remove(path) + assert not os.path.exists(path), f'{path} was not removed' + + return {'success':True, 'message':f'{path} removed'} + + @classmethod + def rm_all(cls): + storage_dir = cls.storage_dir() + if cls.exists(storage_dir): + cls.rm(storage_dir) + assert not cls.exists(storage_dir), f'{storage_dir} was not removed' + cls.makedirs(storage_dir) + assert cls.is_dir_empty(storage_dir), f'{storage_dir} was not removed' + return {'success':True, 'message':f'{storage_dir} removed'} + + + + @classmethod + def rm_all(cls): + storage_dir = cls.storage_dir() + if cls.exists(storage_dir): + cls.rm(storage_dir) + assert not cls.exists(storage_dir), f'{storage_dir} was not removed' + cls.makedirs(storage_dir) + assert cls.is_dir_empty(storage_dir), f'{storage_dir} was not removed' + return {'success':True, 'message':f'{storage_dir} removed'} + + + @classmethod + def glob(cls, path =None, files_only:bool = True, recursive:bool=True): + import glob + path = cls.resolve_path(path, extension=None) + if os.path.isdir(path): + path = os.path.join(path, '**') + paths = glob.glob(path, recursive=recursive) + if files_only: + paths = list(filter(lambda f:os.path.isfile(f), paths)) + return paths + + + @classmethod + def put_cache(cls,k,v ): + cls.cache[k] = v + + @classmethod + def get_cache(cls,k, default=None, **kwargs): + v = cls.cache.get(k, default) + return v + + + @classmethod + def get_json(cls, + path:str, + default:Any=None, + verbose: bool = False,**kwargs): + path = cls.resolve_path(path=path, extension='json') + + cls.print(f'Loading json from {path}', verbose=verbose) + + try: + data = cls.get_text(path, **kwargs) + except Exception as e: + return default + if isinstance(data, str): + try: + data = json.loads(data) + except Exception as e: + return default + if isinstance(data, dict): + if 'data' in data and 'meta' in data: + data = data['data'] + return data + @classmethod + async def async_get_json(cls,*args, **kwargs): + return cls.get_json(*args, **kwargs) + + load_json = get_json + + + @classmethod + def file_exists(cls, path:str)-> bool: + path = cls.resolve_path(path) + exists = os.path.exists(path) + return exists + + + exists = exists_json = file_exists + + + @classmethod + def makedirs(cls, *args, **kwargs): + return os.makedirs(*args, **kwargs) + + + @classmethod + def mv(cls, path1, path2): + path1 = cls.resolve_path(path1) + path2 = cls.resolve_path(path2) + assert os.path.exists(path1), path1 + if not os.path.isdir(path2): + path2_dirpath = os.path.dirname(path2) + if not os.path.isdir(path2_dirpath): + os.makedirs(path2_dirpath, exist_ok=True) + shutil.move(path1, path2) + assert os.path.exists(path2), path2 + assert not os.path.exists(path1), path1 + return path2 + + @classmethod + def resolve_path(cls, path:str = None, extension=None): + ''' + ### Documentation for `resolve_path` class method + + #### Purpose: + The `resolve_path` method is a class method designed to process and resolve file and directory paths based on various inputs and conditions. This method is useful for preparing file paths for operations such as reading, writing, and manipulation. + + #### Parameters: + - `path` (str, optional): The initial path to be resolved. If not provided, a temporary directory path will be returned. + - `extension` (Optional[str], optional): The file extension to append to the path if necessary. Defaults to None. + - `root` (bool, optional): A flag to determine whether the path should be resolved in relation to the root directory. Defaults to False. + - `file_type` (str, optional): The default file type/extension to append if the `path` does not exist but appending the file type results in a valid path. Defaults to 'json'. + + #### Behavior: + - If `path` is not provided, the method returns a path to a temporary directory. + - If `path` starts with '/', it is returned as is. + - If `path` starts with '~/', it is expanded to the user’s home directory. + - If `path` starts with './', it is resolved to an absolute path. + - If `path` does not fall under the above conditions, it is treated as a relative path. If `root` is True, it is resolved relative to the root temp directory; otherwise, relative to the class's temp directory. + - If `path` is a relative path and does not contain the temp directory, the method joins `path` with the appropriate temp directory. + - If `path` does not exist as a directory and an `extension` is provided, the extension is appended to `path`. + - If `path` does not exist but appending the `file_type` results in an existing path, the `file_type` is appended. + - The parent directory of `path` is created if it does not exist, avoiding any errors when the path is accessed later. + + #### Returns: + - `str`: The resolved and potentially created path, ensuring it is ready for further file operations. + + #### Example Usage: + ```python + # Resolve a path in relation to the class's temporary directory + file_path = MyClassName.resolve_path('data/subfolder/file', extension='txt') + + # Resolve a path in relation to the root temporary directory + root_file_path = MyClassName.resolve_path('configs/settings' + ``` + + #### Notes: + - This method relies on the `os` module to perform path manipulations and checks. + - This method is versatile and can handle various input path formats, simplifying file path resolution in the class's context. + ''' + + if path == None: + return cls.storage_dir() + + if path.startswith('/'): + path = path + elif path.startswith('~'): + path = os.path.expanduser(path) + elif path.startswith('.'): + path = os.path.abspath(path) + else: + # if it is a relative path, then it is relative to the module path + # ex: 'data' -> '.commune/path_module/data' + storage_dir = cls.storage_dir() + if storage_dir not in path: + path = os.path.join(storage_dir, path) + + if extension != None and not path.endswith(extension): + path = path + '.' + extension + + return path + + + + @staticmethod + def ensure_path( path): + """ + ensures a dir_path exists, otherwise, it will create it + """ + + dir_path = os.path.dirname(path) + if not os.path.isdir(dir_path): + os.makedirs(dir_path, exist_ok=True) + + return path + + @staticmethod + async def async_write(path, data, mode ='w'): + import aiofiles + async with aiofiles.open(path, mode=mode) as f: + await f.write(data) + + @classmethod + def put_yaml(cls, path:str, data: dict) -> Dict: + from munch import Munch + from copy import deepcopy + ''' + Loads a yaml file + ''' + # Directly from dictionary + data_type = type(data) + if data_type in [pd.DataFrame]: + data = data.to_dict() + if data_type in [Munch]: + data = cls.munch2dict(deepcopy(data)) + if data_type in [dict, list, tuple, set, float, str, int]: + yaml_str = yaml.dump(data) + else: + raise NotImplementedError(f"{data_type}, is not supported") + with open(path, 'w') as file: + file.write(yaml_str) + return {'success': True, 'msg': f'Wrote yaml to {path}'} + + + + + + @classmethod + def get_yaml(cls, path:str=None, default={}, **kwargs) -> Dict: + '''f + Loads a yaml file + ''' + path = cls.resolve_path(path) + with open(path, 'r') as file: + data = yaml.load(file, Loader=yaml.FullLoader) + + return data + + + load_yaml = get_yaml + + save_yaml = put_yaml + + @classmethod + def filesize(cls, filepath:str): + filepath = cls.resolve_path(filepath) + return os.path.getsize(filepath) + + + def search_files(self, path:str='./', search:str='__pycache__') -> List[str]: + path = self.resolve_path(path) + files = self.glob(path) + return list(filter(lambda x: search in x, files)) + + def rm_pycache(self, path:str='./') -> List[str]: + files = self.search_files(path, search='__pycache__') + for file in files: + self.print(self.rm(file)) + return files + + def file2size(self, path='./', fmt='mb') -> int: + files = self.glob(path) + file2size = {} + pwd = self.pwd() + for file in files: + file2size[file.replace(pwd+'/','')] = self.format_data_size(self.filesize(file), fmt) + + # sort by size + file2size = dict(sorted(file2size.items(), key=lambda item: item[1])) + return file2size + + + @classmethod + def cp(cls, path1:str, path2:str, refresh:bool = False): + # what if its a folder? + assert os.path.exists(path1), path1 + if refresh == False: + assert not os.path.exists(path2), path2 + + path2_dirpath = os.path.dirname(path2) + if not os.path.isdir(path2_dirpath): + os.makedirs(path2_dirpath, exist_ok=True) + assert os.path.isdir(path2_dirpath), f'Failed to create directory {path2_dirpath}' + + if os.path.isdir(path1): + shutil.copytree(path1, path2) + + + elif os.path.isfile(path1): + + shutil.copy(path1, path2) + else: + raise ValueError(f'path1 is not a file or a folder: {path1}') + return {'success': True, 'msg': f'Copied {path1} to {path2}'} + + @classmethod + def put_text(cls, path:str, text:str, key=None, bits_per_character=8) -> None: + # Get the absolute path of the file + path = cls.resolve_path(path) + dirpath = os.path.dirname(path) + if not os.path.exists(dirpath): + os.makedirs(dirpath, exist_ok=True) + if not isinstance(text, str): + text = cls.python2str(text) + if key != None: + text = cls.get_key(key).encrypt(text) + # Write the text to the file + with open(path, 'w') as file: + file.write(text) + # get size + text_size = len(text)*bits_per_character + + return {'success': True, 'msg': f'Wrote text to {path}', 'size': text_size} + + @classmethod + def lsdir(cls, path:str) -> List[str]: + path = os.path.abspath(path) + return os.listdir(path) + + @classmethod + def abspath(cls, path:str) -> str: + return os.path.abspath(path) + + + @classmethod + def ls(cls, path:str = '', + recursive:bool = False, + search = None, + return_full_path:bool = True): + """ + provides a list of files in the path + + this path is relative to the module path if you dont specifcy ./ or ~/ or / + which means its based on the module path + """ + path = cls.resolve_path(path) + try: + ls_files = cls.lsdir(path) if not recursive else cls.walk(path) + except FileNotFoundError: + return [] + if return_full_path: + ls_files = [os.path.abspath(os.path.join(path,f)) for f in ls_files] + + ls_files = sorted(ls_files) + if search != None: + ls_files = list(filter(lambda x: search in x, ls_files)) + return ls_files + + + + @classmethod + def put(cls, + k: str, + v: Any, + mode: bool = 'json', + encrypt: bool = False, + verbose: bool = False, + password: str = None, **kwargs) -> Any: + ''' + Puts a value in the config + ''' + encrypt = encrypt or password != None + + if encrypt or password != None: + v = cls.encrypt(v, password=password) + + if not cls.jsonable(v): + v = cls.serialize(v) + + data = {'data': v, 'encrypted': encrypt, 'timestamp': cls.timestamp()} + + # default json + getattr(cls,f'put_{mode}')(k, data) + + data_size = cls.sizeof(v) + + return {'k': k, 'data_size': data_size, 'encrypted': encrypt, 'timestamp': cls.timestamp()} + + @classmethod + def get(cls, + k:str, + default: Any=None, + mode:str = 'json', + max_age:str = None, + cache :bool = False, + full :bool = False, + key: 'Key' = None, + update :bool = False, + password : str = None, + verbose = True, + **kwargs) -> Any: + + ''' + Puts a value in sthe config, with the option to encrypt it + + Return the value + ''' + if cache: + if k in cls.cache: + return cls.cache[k] + data = getattr(cls, f'get_{mode}')(k,default=default, **kwargs) + + + if password != None: + assert data['encrypted'] , f'{k} is not encrypted' + data['data'] = cls.decrypt(data['data'], password=password, key=key) + + data = data or default + + if isinstance(data, dict): + if update: + max_age = 0 + if max_age != None: + timestamp = data.get('timestamp', None) + if timestamp != None: + age = int(time.time() - timestamp) + if age > max_age: # if the age is greater than the max age + cls.print(f'{k} is too old ({age} > {max_age})', verbose=verbose) + return default + else: + data = default + + if not full: + if isinstance(data, dict): + if 'data' in data: + data = data['data'] + + # local cache + if cache: + cls.cache[k] = data + return data + + def get_age(self, k:str) -> int: + data = self.get_json(k) + timestamp = data.get('timestamp', None) + if timestamp != None: + age = int(time.time() - timestamp) + return age + return -1 + + @classmethod + def get_text(cls, + path: str, + tail = None, + start_byte:int = 0, + end_byte:int = 0, + start_line :int= None, + end_line:int = None ) -> str: + # Get the absolute path of the file + path = cls.resolve_path(path) + + if not os.path.exists(path): + if os.path.exists(path + '.json'): + path = path + '.json' + + # Read the contents of the file + with open(path, 'rb') as file: + + file.seek(0, 2) # this is done to get the fiel size + file_size = file.tell() # Get the file size + if start_byte < 0: + start_byte = file_size - start_byte + if end_byte <= 0: + end_byte = file_size - end_byte + if end_byte < start_byte: + end_byte = start_byte + 100 + chunk_size = end_byte - start_byte + 1 + + file.seek(start_byte) + + content_bytes = file.read(chunk_size) + + # Convert the bytes to a string + try: + content = content_bytes.decode() + except UnicodeDecodeError as e: + if hasattr(content_bytes, 'hex'): + content = content_bytes.hex() + else: + raise e + + if tail != None: + content = content.split('\n') + content = '\n'.join(content[-tail:]) + + elif start_line != None or end_line != None: + + content = content.split('\n') + if end_line == None or end_line == 0 : + end_line = len(content) + if start_line == None: + start_line = 0 + if start_line < 0: + start_line = start_line + len(content) + if end_line < 0 : + end_line = end_line + len(content) + content = '\n'.join(content[start_line:end_line]) + else: + content = content_bytes.decode() + return content + + + def is_encrypted(self, path:str) -> bool: + try: + return self.get_json(path).get('encrypted', False) + except: + return False + + @classmethod + def storage_dir(cls): + return f'{cls.cache_path}/{cls.module_name()}' + + tmp_dir = cache_dir = storage_dir + + @classmethod + def refresh_storage(cls): + cls.rm(cls.storage_dir()) + + @classmethod + def refresh_storage_dir(cls): + cls.rm(cls.storage_dir()) + cls.makedirs(cls.storage_dir()) + + + @classmethod + def rm_lines(cls, path:str, start_line:int, end_line:int) -> None: + # Get the absolute path of the file + text = cls.get_text(path) + text = text.split('\n') + text = text[:start_line-1] + text[end_line:] + text = '\n'.join(text) + cls.put_text(path, text) + return {'success': True, 'msg': f'Removed lines {start_line} to {end_line} from {path}'} + @classmethod + def rm_line(cls, path:str, line:int, text=None) -> None: + # Get the absolute path of the file + text = cls.get_text(path) + text = text.split('\n') + text = text[:line-1] + text[line:] + text = '\n'.join(text) + cls.put_text(path, text) + return {'success': True, 'msg': f'Removed line {line} from {path}'} + # Write the text to the file + + @classmethod + def tilde_path(cls): + return os.path.expanduser('~') + + def is_dir_empty(self, path:str): + return len(self.ls(path)) == 0 + + @classmethod + def get_file_size(cls, path:str): + path = cls.resolve_path(path) + return os.path.getsize(path) + + @staticmethod + def jsonable( value): + import json + try: + json.dumps(value) + return True + except: + return False + + def file2text(self, path = './', relative=True, **kwargs): + path = os.path.abspath(path) + file2text = {} + for file in c.glob(path, recursive=True): + with open(file, 'r') as f: + content = f.read() + file2text[file] = content + if relative: + print(path) + return {k[len(path)+1:]:v for k,v in file2text.items()} + + return file2text + + def file2lines(self, path:str='./')-> List[str]: + file2text = self.file2text(path) + file2lines = {f: text.split('\n') for f, text in file2text.items()} + return file2lines + + def num_files(self, path:str='./')-> int: + import commune as c + return len(c.glob(path)) + + def hidden_files(self, path:str='./')-> List[str]: + import commune as c + path = self.resolve_path(path) + files = [f[len(path)+1:] for f in c.glob(path)] + print(files) + hidden_files = [f for f in files if f.startswith('.')] + return hidden_files + + @staticmethod + def format_data_size(x: Union[int, float], fmt:str='b', prettify:bool=False): + assert type(x) in [int, float], f'x must be int or float, not {type(x)}' + fmt2scale = { + 'b': 1, + 'kb': 1000, + 'mb': 1000**2, + 'gb': 1000**3, + 'GiB': 1024**3, + 'tb': 1000**4, + } + + assert fmt in fmt2scale.keys(), f'fmt must be one of {fmt2scale.keys()}' + scale = fmt2scale[fmt] + x = x/scale + + if prettify: + return f'{x:.2f} {f}' + else: + return x + + @classmethod + def get_schema(cls, + module = None, + search = None, + whitelist = None, + fn = None, + docs: bool = True, + include_parents:bool = False, + defaults:bool = True, cache=False) -> 'Schema': + + if '/' in str(search): + module, fn = search.split('/') + cls = cls.module(module) + if isinstance(module, str): + if '/' in module: + module , fn = module.split('/') + module = cls.module(module) + module = module or cls + schema = {} + fns = module.get_functions() + for fn in fns: + if search != None and search not in fn: + continue + if callable(getattr(module, fn )): + schema[fn] = cls.fn_schema(fn, defaults=defaults,docs=docs) + # sort by keys + schema = dict(sorted(schema.items())) + if whitelist != None : + schema = {k:v for k,v in schema.items() if k in whitelist} + return schema + + + @classmethod + def determine_type(cls, x): + if x.lower() == 'null' or x == 'None': + return None + elif x.lower() in ['true', 'false']: + return bool(x.lower() == 'true') + elif x.startswith('[') and x.endswith(']'): + # this is a list + try: + + list_items = x[1:-1].split(',') + # try to convert each item to its actual type + x = [cls.determine_type(item.strip()) for item in list_items] + if len(x) == 1 and x[0] == '': + x = [] + return x + + except: + # if conversion fails, return as string + return x + elif x.startswith('{') and x.endswith('}'): + # this is a dictionary + if len(x) == 2: + return {} + try: + dict_items = x[1:-1].split(',') + # try to convert each item to a key-value pair + return {key.strip(): cls.determine_type(value.strip()) for key, value in [item.split(':', 1) for item in dict_items]} + except: + # if conversion fails, return as string + return x + else: + # try to convert to int or float, otherwise return as string + try: + return int(x) + except ValueError: + try: + return float(x) + except ValueError: + return x + + + @classmethod + def fn2code(cls, search=None, module=None)-> Dict[str, str]: + module = module if module else cls + functions = module.fns(search) + fn_code_map = {} + for fn in functions: + try: + fn_code_map[fn] = module.fn_code(fn) + except Exception as e: + print(f'Error: {e}') + return fn_code_map + + + + @classmethod + def fn_code(cls,fn:str, + detail:bool=False, + seperator: str = '/' + ) -> str: + ''' + Returns the code of a function + ''' + try: + fn = cls.get_fn(fn) + code_text = inspect.getsource(fn) + text_lines = code_text.split('\n') + if 'classmethod' in text_lines[0] or 'staticmethod' in text_lines[0] or '@' in text_lines[0]: + text_lines.pop(0) + fn_code = '\n'.join([l[len(' '):] for l in code_text.split('\n')]) + assert 'def' in text_lines[0], 'Function not found in code' + + if detail: + start_line = cls.find_code_line(search=text_lines[0]) + fn_code = { + 'text': fn_code, + 'start_line': start_line , + 'end_line': start_line + len(text_lines) + } + except Exception as e: + print(f'Error: {e}') + fn_code = None + + return fn_code + + + @classmethod + def fn_hash(cls,fn:str = 'subspace/ls', detail:bool=False, seperator: str = '/') -> str: + + fn_code = cls.fn_code(fn, detail=detail, seperator=seperator) + return cls.hash(fn_code) + + @classmethod + def is_generator(cls, obj): + """ + Is this shiz a generator dawg? + """ + if isinstance(obj, str): + if not hasattr(cls, obj): + return False + obj = getattr(cls, obj) + if not callable(obj): + result = inspect.isgenerator(obj) + else: + result = inspect.isgeneratorfunction(obj) + return result + @classmethod + def get_parents(cls, obj = None,recursive=True, avoid_classes=['object']) -> List[str]: + obj = cls.resolve_object(obj) + parents = list(obj.__bases__) + if recursive: + for parent in parents: + parent_parents = cls.get_parents(parent, recursive=recursive) + if len(parent_parents) > 0: + for pp in parent_parents: + if pp.__name__ not in avoid_classes: + + parents += [pp] + return parents + + + @classmethod + def get_class_name(cls, obj = None) -> str: + obj = cls or obj + if not cls.is_class(obj): + obj = type(obj) + return obj.__name__ + + + @classmethod + def fn_signature_map(cls, obj=None, include_parents:bool = False): + obj = cls.resolve_object(obj) + function_signature_map = {} + for f in cls.get_functions(obj = obj, include_parents=include_parents): + if f.startswith('__') and f.endswith('__'): + if f in ['__init__']: + pass + else: + continue + if not hasattr(cls, f): + continue + if callable(getattr(cls, f )): + function_signature_map[f] = {k:str(v) for k,v in cls.get_function_signature(getattr(cls, f )).items()} + return function_signature_map + + + @classmethod + def fn_schema(cls, fn:str, + defaults:bool=True, + code:bool = False, + docs:bool = True, **kwargs)->dict: + ''' + Get function schema of function in cls + ''' + fn_schema = {} + fn = cls.get_fn(fn) + input_schema = cls.fn_signature(fn) + for k,v in input_schema.items(): + v = str(v) + if v.startswith(' dict: + r = {} + code = cls.fn_code(fn) + lines = code.split('\n') + mode = 'self' + if '@classmethod' in lines[0]: + mode = 'class' + elif '@staticmethod' in lines[0]: + mode = 'static' + + start_line_text = 0 + lines_before_fn_def = 0 + for l in lines: + + if f'def {fn}('.replace(' ', '') in l.replace(' ', ''): + start_line_text = l + break + else: + lines_before_fn_def += 1 + + assert start_line_text != None, f'Could not find function {fn} in {cls.pypath()}' + module_code = cls.code() + start_line = cls.find_code_line(start_line_text, code=module_code) - 1 + + end_line = start_line + len(lines) # find the endline + has_docs = bool('"""' in code or "'''" in code) + filepath = cls.filepath() + + # start code line + for i, line in enumerate(lines): + + is_end = bool(')' in line and ':' in line) + if is_end: + start_code_line = i + break + + + return { + 'start_line': start_line, + 'end_line': end_line, + 'has_docs': has_docs, + 'code': code, + 'n_lines': len(lines), + 'hash': cls.hash(code), + 'path': filepath, + 'start_code_line': start_code_line + start_line , + 'mode': mode + + } + + + + @classmethod + def find_code_line(cls, search:str=None, code:str = None): + if code == None: + code = cls.code() # get the code + found_lines = [] # list of found lines + for i, line in enumerate(code.split('\n')): + if str(search) in line: + found_lines.append({'idx': i+1, 'text': line}) + if len(found_lines) == 0: + return None + elif len(found_lines) == 1: + return found_lines[0]['idx'] + return found_lines + + + + @classmethod + def attributes(cls): + return list(cls.__dict__.keys()) + + + @classmethod + def get_attributes(cls, search = None, obj=None): + if obj is None: + obj = cls + if isinstance(obj, str): + obj = c.module(obj) + # assert hasattr(obj, '__dict__'), f'{obj} has no __dict__' + attrs = dir(obj) + if search is not None: + attrs = [a for a in attrs if search in a and callable(a)] + return attrs + + + + def add_fn(self, fn, name=None): + if name == None: + name = fn.__name__ + assert not hasattr(self, name), f'{name} already exists' + + setattr(self, name, fn) + + return { + 'success':True , + 'message':f'Added {name} to {self.__class__.__name__}' + } + + + add_attribute = add_attr = add_function = add_fn + + @classmethod + def init_schema(cls): + return cls.fn_schema('__init__') + + + + @classmethod + def init_kwargs(cls): + kwargs = cls.fn_defaults('__init__') + kwargs.pop('self', None) + if 'config' in kwargs: + if kwargs['config'] != None: + kwargs.update(kwargs.pop('config')) + del kwargs['config'] + if 'kwargs' in kwargs: + if kwargs['kwargs'] != None: + kwargs = kwargs.pop('kwargs') + del kwargs['kwargs'] + + return kwargs + init_params = init_kwargs + + @classmethod + def lines_of_code(cls, code:str=None): + if code == None: + code = cls.code() + return len(code.split('\n')) + + @classmethod + def code(cls, module = None, search=None, *args, **kwargs): + if '/' in str(module) or module in cls.fns(): + return cls.fn_code(module) + module = cls.resolve_object(module) + print(module) + text = cls.get_text( module.filepath(), *args, **kwargs) + if search != None: + find_lines = cls.find_lines(text=text, search=search) + return find_lines + return text + pycode = code + @classmethod + def chash(cls, *args, **kwargs): + import commune as c + """ + The hash of the code, where the code is the code of the class (cls) + """ + code = cls.code(*args, **kwargs) + return c.hash(code) + + @classmethod + def find_code_line(cls, search:str, code:str = None): + if code == None: + code = cls.code() # get the code + found_lines = [] # list of found lines + for i, line in enumerate(code.split('\n')): + if search in line: + found_lines.append({'idx': i+1, 'text': line}) + if len(found_lines) == 0: + return None + elif len(found_lines) == 1: + return found_lines[0]['idx'] + return found_lines + + + def fn_code_first_line(self, fn): + code = self.fn_code(fn) + return code.split('):')[0] + '):' + + def fn_code_first_line_idx(self, fn): + code = self.fn_code(fn) + return self.find_code_line(self.fn_code_first_line(fn), code=code) + + + @classmethod + def fn_info(cls, fn:str='test_fn') -> dict: + r = {} + code = cls.fn_code(fn) + lines = code.split('\n') + mode = 'self' + if '@classmethod' in lines[0]: + mode = 'class' + elif '@staticmethod' in lines[0]: + mode = 'static' + module_code = cls.code() + in_fn = False + start_line = 0 + end_line = 0 + fn_code_lines = [] + for i, line in enumerate(module_code.split('\n')): + if f'def {fn}('.replace(' ', '') in line.replace(' ', ''): + in_fn = True + start_line = i + 1 + if in_fn: + fn_code_lines.append(line) + if ('def ' in line or '' == line) and len(fn_code_lines) > 1: + end_line = i - 1 + break + + if not in_fn: + end_line = start_line + len(fn_code_lines) # find the endline + # start code line + for i, line in enumerate(lines): + + is_end = bool(')' in line and ':' in line) + if is_end: + start_code_line = i + break + + return { + 'start_line': start_line, + 'end_line': end_line, + 'code': code, + 'n_lines': len(lines), + 'hash': cls.hash(code), + 'start_code_line': start_code_line + start_line , + 'mode': mode + + } + + + @classmethod + def set_line(cls, idx:int, text:str): + code = cls.code() + lines = code.split('\n') + if '\n' in text: + front_lines = lines[:idx] + back_lines = lines[idx:] + new_lines = text.split('\n') + lines = front_lines + new_lines + back_lines + else: + lines[idx-1] = text + new_code = '\n'.join(lines) + cls.put_text(cls.filepath(), new_code) + return {'success': True, 'msg': f'Set line {idx} to {text}'} + + @classmethod + def add_line(cls, idx=0, text:str = '', module=None ): + """ + add line to an index of the module code + """ + + code = cls.code() if module == None else c.module(module).code() + lines = code.split('\n') + new_lines = text.split('\n') if '\n' in text else [text] + lines = lines[:idx] + new_lines + lines[idx:] + new_code = '\n'.join(lines) + cls.put_text(cls.filepath(), new_code) + return {'success': True, 'msg': f'Added line {idx} to {text}'} + + @classmethod + def get_line(cls, idx): + code = cls.code() + lines = code.split('\n') + assert idx < len(lines), f'idx {idx} is out of range for {len(lines)}' + line = lines[max(idx, 0)] + print(len(line)) + return line + + @classmethod + def fn_defaults(cls, fn): + """ + Gets the function defaults + """ + fn = cls.get_fn(fn) + function_defaults = dict(inspect.signature(fn)._parameters) + for k,v in function_defaults.items(): + if v._default != inspect._empty and v._default != None: + function_defaults[k] = v._default + else: + function_defaults[k] = None + + return function_defaults + + @staticmethod + def is_class(obj): + ''' + is the object a class + ''' + return type(obj).__name__ == 'type' + + + @classmethod + def resolve_class(cls, obj): + ''' + resolve class of object or return class if it is a class + ''' + if cls.is_class(obj): + return obj + else: + return obj.__class__ + + + + @classmethod + def has_var_keyword(cls, fn='__init__', fn_signature=None): + if fn_signature == None: + fn_signature = cls.resolve_fn(fn) + for param_info in fn_signature.values(): + if param_info.kind._name_ == 'VAR_KEYWORD': + return True + return False + + @classmethod + def fn_signature(cls, fn) -> dict: + ''' + get the signature of a function + ''' + if isinstance(fn, str): + fn = getattr(cls, fn) + return dict(inspect.signature(fn)._parameters) + + get_function_signature = fn_signature + @classmethod + def is_arg_key_valid(cls, key='config', fn='__init__'): + fn_signature = cls.fn_signature(fn) + if key in fn_signature: + return True + else: + for param_info in fn_signature.values(): + if param_info.kind._name_ == 'VAR_KEYWORD': + return True + + return False + + + + @classmethod + def self_functions(cls: Union[str, type], obj=None, search=None): + ''' + Gets the self methods in a class + ''' + obj = cls.resolve_object(obj) + functions = cls.get_functions(obj) + signature_map = {f:cls.get_function_args(getattr(obj, f)) for f in functions} + if search != None: + functions = [f for f in functions if search in f] + return [k for k, v in signature_map.items() if 'self' in v] + + @classmethod + def class_functions(cls: Union[str, type], obj=None): + ''' + Gets the self methods in a class + ''' + obj = cls.resolve_object(obj) + functions = cls.get_functions(obj) + signature_map = {f:cls.get_function_args(getattr(obj, f)) for f in functions} + return [k for k, v in signature_map.items() if 'cls' in v] + + class_methods = get_class_methods = class_fns = class_functions + + @classmethod + def static_functions(cls: Union[str, type], obj=None): + ''' + Gets the self methods in a class + ''' + obj = obj or cls + functions = cls.get_functions(obj) + signature_map = {f:cls.get_function_args(getattr(obj, f)) for f in functions} + return [k for k, v in signature_map.items() if not ('self' in v or 'cls' in v)] + + static_methods = static_fns = static_functions + + @classmethod + def property_fns(cls) -> bool: + ''' + Get a list of property functions in a class + ''' + return [fn for fn in dir(cls) if cls.is_property(fn)] + + parents = get_parents + + @classmethod + def parent2functions(cls, obj=None): + ''' + Get the parent classes of a class + ''' + obj = cls.resolve_object(obj) + parent_functions = {} + for parent in cls.parents(obj): + parent_functions[parent.__name__] = cls.get_functions(parent) + return parent_functions + + parent2fns = parent2functions + + @classmethod + def get_functions(cls, obj: Any = None, + search = None, + include_parents:bool=True, + include_hidden:bool = False) -> List[str]: + ''' + Get a list of functions in a class + + Args; + obj: the class to get the functions from + include_parents: whether to include the parent functions + include_hidden: whether to include hidden functions (starts and begins with "__") + ''' + is_root_module = cls.is_root_module() + obj = cls.resolve_object(obj) + if include_parents: + parent_functions = cls.parent_functions(obj) + else: + parent_functions = [] + avoid_functions = [] + if not is_root_module: + import commune as c + avoid_functions = c.functions() + else: + avoid_functions = [] + + functions = [] + child_functions = dir(obj) + function_names = [fn_name for fn_name in child_functions + parent_functions] + + for fn_name in function_names: + if fn_name in avoid_functions: + continue + if not include_hidden: + if ((fn_name.startswith('__') or fn_name.endswith('_'))): + if fn_name != '__init__': + continue + fn_obj = getattr(obj, fn_name) + # if the function is callable, include it + if callable(fn_obj): + functions.append(fn_name) + + text_derived_fns = cls.parse_functions_from_module_text() + + functions = sorted(list(set(functions + text_derived_fns))) + + if search != None: + functions = [f for f in functions if search in f] + return functions + + @classmethod + def functions(cls, search = None, include_parents = True): + return cls.get_functions(search=search, include_parents=include_parents) + + + @classmethod + def get_conflict_functions(cls, obj = None): + ''' + Does the object conflict with the current object + ''' + if isinstance(obj, str): + obj = cls.get_module(obj) + root_fns = cls.root_functions() + conflict_functions = [] + for fn in obj.functions(): + if fn in root_fns: + print(f'Conflict: {fn}') + conflict_functions.append(fn) + return conflict_functions + + @classmethod + def does_module_conflict(cls, obj): + return len(cls.get_conflict_functions(obj)) > 0 + + + + @classmethod + def parse_functions_from_module_text(cls, obj=None, splitter_options = [" def " , " def "]): + # reutrn only functions in this class + import inspect + obj = obj or cls + text = inspect.getsource(obj) + functions = [] + for splitter in splitter_options: + for line in text.split('\n'): + if f'"{splitter}"' in line: + continue + if line.startswith(splitter): + functions += [line.split(splitter)[1].split('(')[0]] + + return functions + + + def n_fns(self, search = None): + return len(self.fns(search=search)) + + fn_n = n_fns + @classmethod + def fns(self, search = None, include_parents = True): + return self.get_functions(search=search, include_parents=include_parents) + @classmethod + def is_property(cls, fn: 'Callable') -> bool: + ''' + is the function a property + ''' + try: + fn = cls.get_fn(fn, ignore_module_pattern=True) + except : + return False + + return isinstance(fn, property) + + def is_fn_self(self, fn): + fn = self.resolve_fn(fn) + return hasattr(fn, '__self__') and fn.__self__ == self + + + + @classmethod + def get_fn(cls, fn:str, init_kwargs = None): + """ + Gets the function from a string or if its an attribute + """ + if isinstance(fn, str): + is_object = cls.object_exists(fn) + if is_object: + return cls.get_object(fn) + elif '/' in fn: + module, fn = fn.split('/') + cls = cls.get_module(module) + try: + fn = getattr(cls, fn) + except: + init_kwargs = init_kwargs or {} + fn = getattr(cls(**init_kwargs), fn) + + if callable(fn) or isinstance(fn, property): + pass + + return fn + + @classmethod + def self_functions(cls, search = None): + fns = cls.classify_fns(cls)['self'] + if search != None: + fns = [f for f in fns if search in f] + return fns + + + @classmethod + def classify_fns(cls, obj= None, mode=None): + method_type_map = {} + obj = cls.resolve_object(obj) + for attr_name in dir(obj): + method_type = None + try: + method_type = cls.classify_fn(getattr(obj, attr_name)) + except Exception as e: + continue + + if method_type not in method_type_map: + method_type_map[method_type] = [] + method_type_map[method_type].append(attr_name) + if mode != None: + method_type_map = method_type_map[mode] + return method_type_map + + + @classmethod + def get_args(cls, fn) -> List[str]: + """ + get the arguments of a function + params: + fn: the function + + """ + # if fn is an object get the __ + + if not callable(fn): + fn = cls.get_fn(fn) + try: + args = inspect.getfullargspec(fn).args + except Exception as e: + args = [] + return args + + get_function_args = get_args + + + @classmethod + def has_function_arg(cls, fn, arg:str): + args = cls.get_function_args(fn) + return arg in args + + + fn_args = get_fn_args = get_function_args + + @classmethod + def classify_fn(cls, fn): + try: + if not callable(fn): + fn = cls.get_fn(fn) + if not callable(fn): + return 'cls' + args = cls.get_function_args(fn) + if args[0] == 'self': + return 'self' + elif args[0] == 'cls': + return 'class' + except Exception as e: + return 'property' + return 'static' + + + + @classmethod + def python2types(cls, d:dict)-> dict: + return {k:str(type(v)).split("'")[1] for k,v in d.items()} + + + + + @classmethod + def fn2str(cls,search = None, code = True, defaults = True, **kwargs): + fns = cls.fns(search=search) + fn2str = {} + for fn in fns: + fn2str[fn] = cls.fn_code(fn) + + return fn2str + @classmethod + def fn2hash(cls, fn=None , mode='sha256', **kwargs): + fn2hash = {} + for k,v in cls.fn2str(**kwargs).items(): + fn2hash[k] = c.hash(v,mode=mode) + if fn: + return fn2hash[fn] + return fn2hash + + # TAG CITY + @classmethod + def parent_functions(cls, obj = None, include_root = True): + functions = [] + obj = obj or cls + parents = cls.get_parents(obj) + for parent in parents: + is_parent_root = cls.is_root_module(parent) + if is_parent_root: + continue + + for name, member in parent.__dict__.items(): + if not name.startswith('__'): + functions.append(name) + return functions + + @classmethod + def child_functions(cls, obj=None): + obj = cls.resolve_object(obj) + + methods = [] + for name, member in obj.__dict__.items(): + if inspect.isfunction(member) and not name.startswith('__'): + methods.append(name) + + return methods + + @classmethod + def locals2kwargs(cls,locals_dict:dict, kwargs_keys=['kwargs']) -> dict: + locals_dict = locals_dict or {} + kwargs = locals_dict or {} + kwargs.pop('cls', None) + kwargs.pop('self', None) + + assert isinstance(kwargs, dict), f'kwargs must be a dict, got {type(kwargs)}' + + # These lines are needed to remove the self and cls from the locals_dict + for k in kwargs_keys: + kwargs.update( locals_dict.pop(k, {}) or {}) + + return kwargs + + + + + + def kwargs2attributes(self, kwargs:dict, ignore_error:bool = False): + for k,v in kwargs.items(): + if k != 'self': # skip the self + # we dont want to overwrite existing variables from + if not ignore_error: + assert not hasattr(self, k) + setattr(self, k) + + def num_fns(self): + return len(self.fns()) + + + def fn2type(self): + fn2type = {} + fns = self.fns() + for f in fns: + if callable(getattr(self, f)): + fn2type[f] = self.classify_fn(getattr(self, f)) + return fn2type + + + @classmethod + def is_dir_module(cls, path:str) -> bool: + """ + determine if the path is a module + """ + filepath = cls.simple2path(path) + if path.replace('.', '/') + '/' in filepath: + return True + if ('modules/' + path.replace('.', '/')) in filepath: + return True + return False + + @classmethod + def add_line(cls, path:str, text:str, line=None) -> None: + # Get the absolute path of the file + path = cls.resolve_path(path) + text = str(text) + # Write the text to the file + if line != None: + line=int(line) + lines = cls.get_text(path).split('\n') + lines = lines[:line] + [text] + lines[line:] + + text = '\n'.join(lines) + with open(path, 'w') as file: + file.write(text) + + + return {'success': True, 'msg': f'Added line to {path}'} + + + @classmethod + def readme(cls): + # Markdown input + markdown_text = "## Hello, *Markdown*!" + path = cls.filepath().replace('.py', '_docs.md') + markdown_text = cls.get_text(path=path) + return markdown_text + + docs = readme + + + @staticmethod + def is_imported(package:str) : + return bool(package in sys.modules) + + @classmethod + def is_parent(cls, obj=None): + obj = obj or cls + return bool(obj in cls.get_parents()) + + @classmethod + def find_code_lines(cls, search:str = None , module=None) -> List[str]: + module_code = cls.get_module(module).code() + return cls.find_lines(search=search, text=module_code) + + @classmethod + def find_lines(self, text:str, search:str) -> List[str]: + """ + Finds the lines in text with search + """ + found_lines = [] + lines = text.split('\n') + for line in lines: + if search in line: + found_lines += [line] + + return found_lines + + + @classmethod + def params(cls, fn='__init__'): + params = cls.fn_defaults(fn) + params.pop('self', None) + return params + + + @classmethod + def is_str_fn(cls, fn): + if fn == None: + return False + if '/' in fn: + module, fn = fn.split('/') + module = cls.module(module) + else: + module = cls + + return hasattr(module, fn) + + + + @classmethod + def resolve_extension(cls, filename:str, extension = '.py') -> str: + if filename.endswith(extension): + return filename + return filename + extension + + @classmethod + def simple2path(cls, + simple:str, + extension = '.py', + avoid_dirnames = ['', 'src', + 'commune', + 'commune/module', + 'commune/modules', + 'modules', + 'blocks', + 'agents', + 'commune/agents'], + **kwargs) -> bool: + """ + converts the module path to a file path + + for example + + model.openai.gpt3 -> model/openai/gpt3.py, model/openai/gpt3_module.py, model/openai/__init__.py + model.openai -> model/openai.py or model/openai_module.py or model/__init__.py + + Parameters: + path (str): The module path + """ + # if cls.libname in simple and '/' not in simple and cls.can_import_module(simple): + # return simple + shortcuts = cls.shortcuts() + simple = shortcuts.get(simple, simple) + + if simple.endswith(extension): + simple = simple[:-len(extension)] + + path = None + pwd = cls.pwd() + path_options = [] + simple = simple.replace('/', '.') + + # create all of the possible paths by combining the avoid_dirnames with the simple path + dir_paths = list([pwd+ '/' + x for x in avoid_dirnames]) # local first + dir_paths += list([cls.libpath + '/' + x for x in avoid_dirnames]) # add libpath stuff + + for dir_path in dir_paths: + if dir_path.endswith('/'): + dir_path = dir_path[:-1] + # '/' count how many times the path has been split + module_dirpath = dir_path + '/' + simple.replace('.', '/') + if os.path.isdir(module_dirpath): + simple_filename = simple.replace('.', '_') + filename_options = [simple_filename, simple_filename + '_module', 'module_'+ simple_filename] + ['module'] + simple.split('.') + ['__init__'] + path_options += [module_dirpath + '/' + f for f in filename_options] + else: + module_filepath = dir_path + '/' + simple.replace('.', '/') + path_options += [module_filepath] + for p in path_options: + p = cls.resolve_extension(p) + if os.path.exists(p): + p_text = cls.get_text(p) + path = p + if 'commune' in p_text and 'class ' in p_text or ' def ' in p_text: + return p + if path != None: + break + return path + + + @classmethod + def is_repo(cls, libpath:str ): + # has the .git folder + return bool([f for f in cls.ls(libpath) if '.git' in f and os.path.isdir(f)]) + + + @classmethod + def path2simple(cls, + path:str, + tree = None, + ignore_prefixes = ['src', 'commune', 'modules', 'commune.modules', + 'commune.commune', + 'commune.module', 'module', 'router'], + module_folder_filnames = ['__init__', 'main', 'module'], + module_extension = 'py', + ignore_suffixes = ['module'], + name_map = {'commune': 'module'}, + compress_path = True, + verbose = False, + num_lines_to_read = 100, + ) -> str: + + path = os.path.abspath(path) + path_filename_with_extension = path.split('/')[-1] # get the filename with extension + path_extension = path_filename_with_extension.split('.')[-1] # get the extension + assert path_extension == module_extension, f'Invalid extension {path_extension} for path {path}' + path_filename = path_filename_with_extension[:-len(path_extension)-1] # remove the extension + path_filename_chunks = path_filename.split('_') + path_chunks = path.split('/') + + if path.startswith(cls.libpath): + path = path[len(cls.libpath):] + else: + # if the tree path is not in the path, we want to remove the root path + pwd = cls.pwd() + path = path[len(pwd):] + dir_chunks = path.split('/')[:-1] if '/' in path else [] + is_module_folder = all([bool(chunk in dir_chunks) for chunk in path_filename_chunks]) + is_module_folder = is_module_folder or (path_filename in module_folder_filnames) + if is_module_folder: + path = '/'.join(path.split('/')[:-1]) + path = path[1:] if path.startswith('/') else path + path = path.replace('/', '.') + module_extension = '.'+module_extension + if path.endswith(module_extension): + path = path[:-len(module_extension)] + if compress_path: + # we want to remove redundant chunks + # for example if the path is 'module/module' we want to remove the redundant module + path_chunks = path.split('.') + simple_path = [] + for chunk in path_chunks: + if chunk not in simple_path: + simple_path += [chunk] + simple_path = '.'.join(simple_path) + else: + simple_path = path + # FILTER PREFIXES + for prefix in ignore_prefixes: + prefix += '.' + if simple_path.startswith(prefix) and simple_path != prefix: + simple_path = simple_path[len(prefix):] + cls.print(f'Prefix {prefix} in path {simple_path}', color='yellow', verbose=verbose) + # FILTER SUFFIXES + for suffix in ignore_suffixes: + suffix = '.' + suffix + if simple_path.endswith(suffix) and simple_path != suffix: + simple_path = simple_path[:-len(suffix)] + cls.print(f'Suffix {suffix} in path {simple_path}', color='yellow', verbose=verbose) + + # remove leading and trailing dots + if simple_path.startswith('.'): + simple_path = simple_path[1:] + if simple_path.endswith('.'): + simple_path = simple_path[:-1] + simple_path = name_map.get(simple_path, simple_path) + return simple_path + + @classmethod + def path_config_exists(cls, path:str, + config_files = ['config.yaml', 'config.yml'], + config_extensions=['.yaml', '.yml']) -> bool: + ''' + Checks if the path exists + ''' + config_files += [path.replace('.py', ext) for ext in config_extensions] + dirpath = os.path.dirname(path) + dir_files = os.listdir(dirpath) + if os.path.exists(dirpath) and any([[f.endswith(cf) for cf in config_files] for f in dir_files]): + return True + return False + + + @classmethod + def resolve_cache_path(self, path): + path = path.replace("/", "_") + if path.startswith('_'): + path = path[1:] + path = f'cached_path/{path}' + return path + + @classmethod + def cached_paths(cls): + return cls.ls('cached_paths') + + + @classmethod + def find_classes(cls, path='./', working=False): + + path = os.path.abspath(path) + if os.path.isdir(path): + classes = [] + generator = cls.glob(path+'/**/**.py', recursive=True) + for p in generator: + if p.endswith('.py'): + p_classes = cls.find_classes(p ) + if working: + for class_path in p_classes: + try: + cls.import_object(class_path) + classes += [class_path] + except Exception as e: + r = cls.detailed_error(e) + r['class'] = class_path + cls.print(r, color='red') + continue + else: + classes += p_classes + + return classes + + code = cls.get_text(path) + classes = [] + file_path = cls.path2objectpath(path) + + for line in code.split('\n'): + if all([s in line for s in ['class ', ':']]): + new_class = line.split('class ')[-1].split('(')[0].strip() + if new_class.endswith(':'): + new_class = new_class[:-1] + if ' ' in new_class: + continue + classes += [new_class] + classes = [file_path + '.' + c for c in classes] + + libpath_objpath_prefix = cls.libpath.replace('/', '.')[1:] + '.' + classes = [c.replace(libpath_objpath_prefix, '') for c in classes] + return classes + + + + + @classmethod + def find_class2functions(cls, path, working=False): + + path = os.path.abspath(path) + if os.path.isdir(path): + class2functions = {} + for p in cls.glob(path+'/**/**.py', recursive=True): + if p.endswith('.py'): + object_path = cls.path2objectpath(p) + response = cls.find_class2functions(p ) + for k,v in response.items(): + class2functions[object_path+ '.' +k] = v + return class2functions + + code = cls.get_text(path) + classes = [] + class2functions = {} + class_functions = [] + new_class = None + for line in code.split('\n'): + if all([s in line for s in ['class ', ':']]): + new_class = line.split('class ')[-1].split('(')[0].strip() + if new_class.endswith(':'): + new_class = new_class[:-1] + if ' ' in new_class: + continue + classes += [new_class] + if len(class_functions) > 0: + class2functions[new_class] = cls.copy(class_functions) + class_functions = [] + if all([s in line for s in [' def', '(']]): + fn = line.split(' def')[-1].split('(')[0].strip() + class_functions += [fn] + if new_class != None: + class2functions[new_class] = class_functions + + return class2functions + + @classmethod + def path2objectpath(cls, path:str, **kwargs) -> str: + libpath = cls.libpath + if path.startswith(libpath): + path = path.replace(libpath , '')[1:].replace('/', '.').replace('.py', '') + else: + pwd = cls.pwd() + if path.startswith(pwd): + path = path.replace(pwd, '')[1:].replace('/', '.').replace('.py', '') + + return path.replace('__init__.', '.') + + @classmethod + def objecpath2path(cls, objectpath:str, **kwargs) -> str: + options = [cls.libpath, cls.pwd()] + for option in options: + path = option + '/' + objectpath.replace('.', '/') + '.py' + if os.path.exists(path): + return path + raise ValueError(f'Path not found for objectpath {objectpath}') + + + + @classmethod + def find_functions(cls, path = './', working=False): + fns = [] + if os.path.isdir(path): + path = os.path.abspath(path) + for p in cls.glob(path+'/**/**.py', recursive=True): + p_fns = cls.find_functions(p) + file_object_path = cls.path2objectpath(p) + p_fns = [file_object_path + '.' + f for f in p_fns] + for fn in p_fns: + if working: + try: + cls.import_object(fn) + except Exception as e: + r = cls.detailed_error(e) + r['fn'] = fn + cls.print(r, color='red') + continue + fns += [fn] + + else: + code = cls.get_text(path) + for line in code.split('\n'): + if line.startswith('def ') or line.startswith('async def '): + fn = line.split('def ')[-1].split('(')[0].strip() + fns += [fn] + return fns + + + @classmethod + def find_async_functions(cls, path): + if os.path.isdir(path): + path2classes = {} + for p in cls.glob(path+'/**/**.py', recursive=True): + path2classes[p] = cls.find_functions(p) + return path2classes + code = cls.get_text(path) + fns = [] + for line in code.split('\n'): + if line.startswith('async def '): + fn = line.split('def ')[-1].split('(')[0].strip() + fns += [fn] + return [c for c in fns] + + @classmethod + def find_objects(cls, path:str = './', search=None, working=False, **kwargs): + classes = cls.find_classes(path, working=working) + functions = cls.find_functions(path, working=working) + + if search != None: + classes = [c for c in classes if search in c] + functions = [f for f in functions if search in f] + object_paths = functions + classes + return object_paths + objs = find_objects + + + + def find_working_objects(self, path:str = './', **kwargs): + objects = self.find_objects(path, **kwargs) + working_objects = [] + progress = self.tqdm(objects, desc='Progress') + error_progress = self.tqdm(objects, desc='Errors') + + for obj in objects: + + try: + self.import_object(obj) + working_objects += [obj] + progress.update(1) + except: + error_progress.update(1) + pass + return working_objects + + search = find_objects + + @classmethod + def simple2objectpath(cls, + simple_path:str, + cactch_exception = False, + **kwargs) -> str: + + object_path = cls.simple2path(simple_path, **kwargs) + classes = cls.find_classes(object_path) + return classes[-1] + + @classmethod + def simple2object(cls, path:str, **kwargs) -> str: + path = cls.simple2objectpath(path, **kwargs) + try: + return cls.import_object(path) + except: + path = cls.tree().get(path) + return cls.import_object(path) + + included_pwd_in_path = False + @classmethod + def import_module(cls, + import_path:str, + included_pwd_in_path=True, + try_prefixes = ['commune','commune.modules', 'modules', 'commune.subspace', 'subspace'] + ) -> 'Object': + from importlib import import_module + if included_pwd_in_path and not cls.included_pwd_in_path: + import sys + pwd = cls.pwd() + sys.path.append(pwd) + sys.path = list(set(sys.path)) + cls.included_pwd_in_path = True + # if commune is in the path more than once, we want to remove the duplicates + if cls.libname in import_path: + import_path = cls.libname + import_path.split(cls.libname)[-1] + pwd = cls.pwd() + try: + return import_module(import_path) + except Exception as _e: + for prefix in try_prefixes: + try: + return import_module(f'{prefix}.{import_path}') + except Exception as e: + pass + raise _e + + @classmethod + def can_import_module(cls, module:str) -> bool: + ''' + Returns true if the module is valid + ''' + try: + cls.import_module(module) + return True + except: + return False + + + + def get_module_objects(self, path:str, **kwargs): + if self.can_import_module(path): + return self.find_objects(path.replace('.', '/'), **kwargs) + return self.find_objects(path, **kwargs) + @classmethod + def can_import_object(cls, module:str) -> bool: + ''' + Returns true if the module is valid + ''' + try: + cls.import_object(module) + return True + except: + return False + + @classmethod + def import_object(cls, key:str, verbose: bool = 0, trials=3)-> Any: + ''' + Import an object from a string with the format of {module_path}.{object} + Examples: import_object("torch.nn"): imports nn from torch + ''' + module = '.'.join(key.split('.')[:-1]) + object_name = key.split('.')[-1] + if verbose: + cls.print(f'Importing {object_name} from {module}') + obj = getattr(cls.import_module(module), object_name) + return obj + + obj = get_obj = import_object + + + @classmethod + def object_exists(cls, path:str, verbose=False)-> Any: + try: + cls.import_object(path, verbose=verbose) + return True + except Exception as e: + return False + + imp = get_object = importobj = import_object + + @classmethod + def module_exists(cls, module:str, **kwargs) -> bool: + ''' + Returns true if the module exists + ''' + try: + module_path = c.simple2path(module) + print(module_path) + module_exists = c.exists(module_path) + except: + module_exists = False + # if not module_exists: + # module_exists = module in c.modules() + return module_exists + + @classmethod + def has_app(cls, module:str, **kwargs) -> bool: + return cls.module_exists(module + '.app', **kwargs) + + @classmethod + def simplify_paths(cls, paths): + paths = [cls.simplify_path(p) for p in paths] + paths = [p for p in paths if p] + return paths + + @classmethod + def simplify_path(cls, p, avoid_terms=['modules', 'agents']): + chunks = p.split('.') + if len(chunks) < 2: + return None + file_name = chunks[-2] + chunks = chunks[:-1] + path = '' + for chunk in chunks: + if chunk in path: + continue + path += chunk + '.' + if file_name.endswith('_module'): + path = '.'.join(path.split('.')[:-1]) + + if path.startswith(cls.libname + '.'): + path = path[len(cls.libname)+1:] + + if path.endswith('.'): + path = path[:-1] + + if '_' in file_name: + file_chunks = file_name.split('_') + if all([c in path for c in file_chunks]): + path = '.'.join(path.split('.')[:-1]) + for avoid in avoid_terms: + avoid = f'{avoid}.' + if avoid in path: + path = path.replace(avoid, '') + return path + + @classmethod + def local_modules(cls, search=None): + object_paths = cls.find_classes(cls.pwd()) + object_paths = cls.simplify_paths(object_paths) + if search != None: + object_paths = [p for p in object_paths if search in p] + return sorted(list(set(object_paths))) + @classmethod + def lib_tree(cls, ): + return cls.get_tree(cls.libpath) + @classmethod + def local_tree(cls ): + return cls.get_tree(cls.pwd()) + + @classmethod + def get_tree(cls, path): + class_paths = cls.find_classes(path) + simple_paths = cls.simplify_paths(class_paths) + return dict(zip(simple_paths, class_paths)) + + @classmethod + def get_module(cls, + path:str = 'module', + cache=True, + verbose = False, + update_tree_if_fail = True, + init_kwargs = None, + catch_error = False, + ) -> str: + import commune as c + path = path or 'module' + if catch_error: + try: + return cls.get_module(path=path, cache=cache, + verbose=verbose, + update_tree_if_fail=update_tree_if_fail, + init_kwargs=init_kwargs, + catch_error=False) + except Exception as e: + return c.detailed_error(e) + if path in ['module', 'c']: + return c.Module + # if the module is a valid import path + shortcuts = c.shortcuts() + if path in shortcuts: + path = shortcuts[path] + module = None + cache_key = path + t0 = c.time() + if cache and cache_key in c.module_cache: + module = c.module_cache[cache_key] + return module + module = c.simple2object(path) + # ensure module + if verbose: + c.print(f'Loaded {path} in {c.time() - t0} seconds', color='green') + + if init_kwargs != None: + module = module(**init_kwargs) + is_module = c.is_module(module) + if not is_module: + module = cls.obj2module(module) + if cache: + c.module_cache[cache_key] = module + return module + + + _tree = None + @classmethod + def tree(cls, search=None, cache=True): + if cls._tree != None and cache: + return cls._tree + local_tree = cls.local_tree() + lib_tree = cls.lib_tree() + tree = {**local_tree, **lib_tree} + if cache: + cls._tree = tree + if search != None: + tree = {k:v for k,v in tree.items() if search in k} + return tree + + return tree + + + def overlapping_modules(self, search:str=None, **kwargs): + local_modules = self.local_modules(search=search) + lib_modules = self.lib_modules(search=search) + return [m for m in local_modules if m in lib_modules] + + + @classmethod + def lib_modules(cls, search=None): + object_paths = cls.find_classes(cls.libpath ) + object_paths = cls.simplify_paths(object_paths) + if search != None: + object_paths = [p for p in object_paths if search in p] + return sorted(list(set(object_paths))) + + @classmethod + def find_modules(cls, search=None, **kwargs): + local_modules = cls.local_modules(search=search) + lib_modules = cls.lib_modules(search=search) + return sorted(list(set(local_modules + lib_modules))) + + _modules = None + @classmethod + def modules(cls, search=None, cache=True, **kwargs)-> List[str]: + modules = cls._modules + if not cache or modules == None: + modules = cls.find_modules(search=None, **kwargs) + if search != None: + modules = [m for m in modules if search in m] + return modules + get_modules = modules + + @classmethod + def has_module(cls, module): + return module in cls.modules() + + + + + + def new_modules(self, *modules, **kwargs): + for module in modules: + self.new_module(module=module, **kwargs) + + + + @classmethod + def new_module( cls, + module : str , + base_module : str = 'demo', + folder_module : bool = False, + update=1 + ): + + import commune as c + base_module = c.module(base_module) + module_class_name = ''.join([m[0].capitalize() + m[1:] for m in module.split('.')]) + base_module_class_name = base_module.class_name() + base_module_code = base_module.code().replace(base_module_class_name, module_class_name) + pwd = c.pwd() + path = os.path.join(pwd, module.replace('.', '/')) + if folder_module: + dirpath = path + filename = module.replace('.', '_') + path = os.path.join(path, filename) + + path = path + '.py' + dirpath = os.path.dirname(path) + if os.path.exists(path) and not update: + return {'success': True, 'msg': f'Module {module} already exists', 'path': path} + if not os.path.exists(dirpath): + os.makedirs(dirpath, exist_ok=True) + + c.put_text(path, base_module_code) + + return {'success': True, 'msg': f'Created module {module}', 'path': path} + + add_module = new_module + + + @classmethod + def has_local_module(cls, path=None): + import commune as c + path = '.' if path == None else path + if os.path.exists(f'{path}/module.py'): + text = c.get_text(f'{path}/module.py') + if 'class ' in text: + return True + return False + + + + def path2functions(self, path=None): + path = path or (self.root_path + '/utils') + paths = self.ls(path) + path2functions = {} + print(paths) + + for p in paths: + + functions = [] + if os.path.isfile(p) == False: + continue + text = self.get_text(p) + if len(text) == 0: + continue + + for line in text.split('\n'): + print(line) + if 'def ' in line and '(' in line: + functions.append(line.split('def ')[1].split('(')[0]) + replative_path = p[len(path)+1:] + path2functions[replative_path] = functions + return path2functions + + @staticmethod + def chunk(sequence:list = [0,2,3,4,5,6,6,7], + chunk_size:int=4, + num_chunks:int= None): + assert chunk_size != None or num_chunks != None, 'must specify chunk_size or num_chunks' + if chunk_size == None: + chunk_size = len(sequence) / num_chunks + if chunk_size > len(sequence): + return [sequence] + if num_chunks == None: + num_chunks = int(len(sequence) / chunk_size) + if num_chunks == 0: + num_chunks = 1 + chunks = [[] for i in range(num_chunks)] + for i, element in enumerate(sequence): + idx = i % num_chunks + chunks[idx].append(element) + return chunks + + @classmethod + def batch(cls, x: list, batch_size:int=8): + return cls.chunk(x, chunk_size=batch_size) + + def cancel(self, futures): + for f in futures: + f.cancel() + return {'success': True, 'msg': 'cancelled futures'} + + + @classmethod + def cachefn(cls, func, max_age=60, update=False, cache=True, cache_folder='cachefn'): + import functools + path_name = cache_folder+'/'+func.__name__ + def wrapper(*args, **kwargs): + fn_name = func.__name__ + cache_params = {'max_age': max_age, 'cache': cache} + for k, v in cache_params.items(): + cache_params[k] = kwargs.pop(k, v) + + + if not update: + result = cls.get(fn_name, **cache_params) + if result != None: + return result + + result = func(*args, **kwargs) + + if cache: + cls.put(fn_name, result, cache=cache) + return result + return wrapper + + + @staticmethod + def round(x:Union[float, int], sig: int=6, small_value: float=1.0e-9): + from commune.utils.math import round_sig + return round_sig(x, sig=sig, small_value=small_value) + + @classmethod + def round_decimals(cls, x:Union[float, int], decimals: int=6, small_value: float=1.0e-9): + + import math + """ + Rounds x to the number of {sig} digits + :param x: + :param sig: signifant digit + :param small_value: smallest possible value + :return: + """ + x = float(x) + return round(x, decimals) + + + + + @staticmethod + def num_words( text): + return len(text.split(' ')) + + @classmethod + def random_word(cls, *args, n=1, seperator='_', **kwargs): + import commune as c + random_words = cls.module('key').generate_mnemonic(*args, **kwargs).split(' ')[0] + random_words = random_words.split(' ')[:n] + if n == 1: + return random_words[0] + else: + return seperator.join(random_words.split(' ')[:n]) + + @classmethod + def filter(cls, text_list: List[str], filter_text: str) -> List[str]: + return [text for text in text_list if filter_text in text] + + + + @staticmethod + def tqdm(*args, **kwargs): + from tqdm import tqdm + return tqdm(*args, **kwargs) + + progress = tqdm + + emojis = { + 'smile': '😊', + 'sad': '😞', + 'heart': '❤️', + 'star': '⭐', + 'fire': '🔥', + 'check': '✅', + 'cross': '❌', + 'warning': '⚠️', + 'info': 'ℹ️', + 'question': '❓', + 'exclamation': '❗', + 'plus': '➕', + 'minus': '➖', + + } + + + @classmethod + def emoji(cls, name:str): + return cls.emojis.get(name, '❓') + + @staticmethod + def tqdm(*args, **kwargs): + from tqdm import tqdm + return tqdm(*args, **kwargs) + progress = tqdm + + + + + @classmethod + def jload(cls, json_string): + import json + return json.loads(json_string.replace("'", '"')) + + @classmethod + def partial(cls, fn, *args, **kwargs): + return partial(fn, *args, **kwargs) + + + @classmethod + def sizeof(cls, obj): + import sys + sizeof = 0 + if isinstance(obj, dict): + for k,v in obj.items(): + sizeof += cls.sizeof(k) + cls.sizeof(v) + elif isinstance(obj, list): + for v in obj: + sizeof += cls.sizeof(v) + elif any([k.lower() in cls.type_str(obj).lower() for k in ['torch', 'Tensor'] ]): + + sizeof += cls.get_tensor_size(obj) + else: + sizeof += sys.getsizeof(obj) + + return sizeof + + + @classmethod + def put_torch(cls, path:str, data:Dict, **kwargs): + import torch + path = cls.resolve_path(path=path, extension='pt') + torch.save(data, path) + return path + + def init_nn(self): + import torch + torch.nn.Module.__init__(self) + + + @classmethod + def check_word(cls, word:str)-> str: + import commune as c + files = c.glob('./') + progress = c.tqdm(len(files)) + for f in files: + try: + text = c.get_text(f) + except Exception as e: + continue + if word in text: + return True + progress.update(1) + return False + + @classmethod + def wordinfolder(cls, word:str, path:str='./')-> bool: + import commune as c + path = c.resolve_path(path) + files = c.glob(path) + progress = c.tqdm(len(files)) + for f in files: + try: + text = c.get_text(f) + except Exception as e: + continue + if word in text: + return True + progress.update(1) + return False + + + def locals2hash(self, kwargs:dict = {'a': 1}, keys=['kwargs']) -> str: + kwargs.pop('cls', None) + kwargs.pop('self', None) + return self.dict2hash(kwargs) + + @classmethod + def dict2hash(cls, d:dict) -> str: + for k in d.keys(): + assert cls.jsonable(d[k]), f'{k} is not jsonable' + return cls.hash(d) + + @classmethod + def dict_put(cls, *args, **kwargs): + from commune.utils.dict import dict_put + return dict_put(*args, **kwargs) + + @classmethod + def dict_get(cls, *args, **kwargs): + from commune.utils.dict import dict_get + return dict_get(*args, **kwargs) + + + @classmethod + def is_address(cls, address:str) -> bool: + if not isinstance(address, str): + return False + if '://' in address: + return True + conds = [] + conds.append(len(address.split('.')) >= 3) + conds.append(isinstance(address, str)) + conds.append(':' in address) + conds.append(cls.is_int(address.split(':')[-1])) + return all(conds) + + + @classmethod + def new_event_loop(cls, nest_asyncio:bool = True) -> 'asyncio.AbstractEventLoop': + import asyncio + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + if nest_asyncio: + cls.nest_asyncio() + + return loop + + + def set_event_loop(self, loop=None, new_loop:bool = False) -> 'asyncio.AbstractEventLoop': + import asyncio + try: + if new_loop: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + else: + loop = loop if loop else asyncio.get_event_loop() + except RuntimeError as e: + self.new_event_loop() + + self.loop = loop + return self.loop + + @classmethod + def get_event_loop(cls, nest_asyncio:bool = True) -> 'asyncio.AbstractEventLoop': + try: + loop = asyncio.get_event_loop() + except Exception as e: + loop = cls.new_event_loop(nest_asyncio=nest_asyncio) + return loop + + + + + @classmethod + def merge(cls, from_obj= None, + to_obj = None, + include_hidden:bool=True, + allow_conflicts:bool=True, + verbose: bool = False): + + ''' + Merge the functions of a python object into the current object (a) + ''' + from_obj = from_obj or cls + to_obj = to_obj or cls + + for fn in dir(from_obj): + if fn.startswith('_') and not include_hidden: + continue + if hasattr(to_obj, fn) and not allow_conflicts: + continue + if verbose: + cls.print(f'Adding {fn}') + setattr(to_obj, fn, getattr(from_obj, fn)) + + return to_obj + + + # JUPYTER NOTEBOOKS + @classmethod + def enable_jupyter(cls): + cls.nest_asyncio() + + + + jupyter = enable_jupyter + + + @classmethod + def pip_list(cls, lib=None): + pip_list = cls.cmd(f'pip list', verbose=False, bash=True).split('\n') + if lib != None: + pip_list = [l for l in pip_list if l.startswith(lib)] + return pip_list + + + @classmethod + def pip_libs(cls): + return list(cls.lib2version().values()) + + @classmethod + def ensure_lib(cls, lib:str, verbose:bool=False): + if cls.pip_exists(lib): + return {'lib':lib, 'version':cls.version(lib), 'status':'exists'} + elif cls.pip_exists(lib) == False: + cls.pip_install(lib, verbose=verbose) + return {'lib':lib, 'version':cls.version(lib), 'status':'installed'} + + required_libs = [] + @classmethod + def ensure_libs(cls, libs: List[str] = None, verbose:bool=False): + if hasattr(cls, 'libs'): + libs = cls.libs + results = [] + for lib in libs: + results.append(cls.ensure_lib(lib, verbose=verbose)) + return results + + @classmethod + def install(cls, libs: List[str] = None, verbose:bool=False): + return cls.ensure_libs(libs, verbose=verbose) + + @classmethod + def ensure_env(cls): + cls.ensure_libs(cls.libs) + + ensure_package = ensure_lib + + @classmethod + def queue(cls, size:str=-1, *args, mode='queue', **kwargs): + if mode == 'queue': + return cls.import_object('queue.Queue')(size, *args, **kwargs) + elif mode in ['multiprocessing', 'mp', 'process']: + return cls.module('process')(size, *args, **kwargs) + elif mode == 'ray': + return cls.import_object('ray.util.queue.Queue')(size, *args, **kwargs) + elif mode == 'redis': + return cls.import_object('redis.Queue')(size, *args, **kwargs) + elif mode == 'rabbitmq': + return cls.import_object('pika.Queue')(size, *args, **kwargs) + else: + raise NotImplementedError(f'mode {mode} not implemented') + + + + + @staticmethod + def is_class(module: Any) -> bool: + return type(module).__name__ == 'type' + + + + + + @classmethod + def param_keys(cls, model:'nn.Module' = None)->List[str]: + model = cls.resolve_model(model) + return list(model.state_dict().keys()) + + @classmethod + def params_map(cls, model, fmt='b'): + params_map = {} + state_dict = cls.resolve_model(model).state_dict() + for k,v in state_dict.items(): + params_map[k] = {'shape': list(v.shape) , + 'size': cls.get_tensor_size(v, fmt=fmt), + 'dtype': str(v.dtype), + 'requires_grad': v.requires_grad, + 'device': v.device, + 'numel': v.numel(), + + } + + return params_map + + + + @classmethod + def get_shortcut(cls, shortcut:str) -> dict: + return cls.shortcuts().get(shortcut) + + @classmethod + def rm_shortcut(cls, shortcut) -> str: + shortcuts = cls.shortcuts() + if shortcut in shortcuts: + cls.shortcuts.pop(shortcut) + cls.put_json('shortcuts', cls.shortcuts) + return shortcut + + + + @classmethod + def repo_url(cls, *args, **kwargs): + return cls.module('git').repo_url(*args, **kwargs) + + + + + + @classmethod + def compose(cls, *args, **kwargs): + return cls.module('docker').compose(*args, **kwargs) + + + @classmethod + def ps(cls, *args, **kwargs): + return cls.get_module('docker').ps(*args, **kwargs) + + @classmethod + def has_gpus(cls): + return bool(len(cls.gpus())>0) + + + @classmethod + def split_gather(cls,jobs:list, n=3, **kwargs)-> list: + if len(jobs) < n: + return cls.gather(jobs, **kwargs) + gather_jobs = [asyncio.gather(*job_chunk) for job_chunk in cls.chunk(jobs, num_chunks=n)] + gather_results = cls.gather(gather_jobs, **kwargs) + results = [] + for gather_result in gather_results: + results += gather_result + return results + + @classmethod + def addresses(cls, *args, **kwargs) -> List[str]: + return list(cls.namespace(*args,**kwargs).values()) + + @classmethod + def address_exists(cls, address:str) -> List[str]: + addresses = cls.addresses() + return address in addresses + + + + @classmethod + def task(cls, fn, timeout=1, mode='asyncio'): + + if mode == 'asyncio': + assert callable(fn) + future = asyncio.wait_for(fn, timeout=timeout) + return future + else: + raise NotImplemented + + + @classmethod + def shuffle(cls, x:list)->list: + if len(x) == 0: + return x + random.shuffle(x) + return x + + + @staticmethod + def retry(fn, trials:int = 3, verbose:bool = True): + # if fn is a self method, then it will be a bound method, and we need to get the function + if hasattr(fn, '__self__'): + fn = fn.__func__ + def wrapper(*args, **kwargs): + for i in range(trials): + try: + cls.print(fn) + return fn(*args, **kwargs) + except Exception as e: + if verbose: + cls.print(cls.detailed_error(e), color='red') + cls.print(f'Retrying {fn.__name__} {i+1}/{trials}', color='red') + + return wrapper + + + @staticmethod + def reverse_map(x:dict)->dict: + ''' + reverse a dictionary + ''' + return {v:k for k,v in x.items()} + + @classmethod + def df(cls, x, **kwargs): + return cls.import_object('pandas.DataFrame')(x, **kwargs) + + @classmethod + def torch(cls): + return cls.import_module('torch') + + @classmethod + def tensor(cls, *args, **kwargs): + return cls.import_object('torch.tensor')(*args, **kwargs) + + + @staticmethod + def random_int(start_value=100, end_value=None): + if end_value == None: + end_value = start_value + start_value, end_value = 0 , start_value + + assert start_value != None, 'start_value must be provided' + assert end_value != None, 'end_value must be provided' + return random.randint(start_value, end_value) + + + + def mean(self, x:list=[0,1,2,3,4,5,6,7,8,9,10]): + if not isinstance(x, list): + x = list(x) + return sum(x) / len(x) + + def median(self, x:list=[0,1,2,3,4,5,6,7,8,9,10]): + if not isinstance(x, list): + x = list(x) + x = sorted(x) + n = len(x) + if n % 2 == 0: + return (x[n//2] + x[n//2 - 1]) / 2 + else: + return x[n//2] + + @classmethod + def stdev(cls, x:list= [0,1,2,3,4,5,6,7,8,9,10], p=2): + if not isinstance(x, list): + x = list(x) + mean = cls.mean(x) + return (sum([(i - mean)**p for i in x]) / len(x))**(1/p) + std = stdev + + @classmethod + def set_env(cls, key:str, value:str)-> None: + ''' + Pay attention to this function. It sets the environment variable + ''' + os.environ[key] = value + return value + + + @classmethod + def pwd(cls): + pwd = os.getenv('PWD', cls.libpath) # the current wor king directory from the process starts + return pwd + + @classmethod + def choice(cls, options:Union[list, dict])->list: + options = deepcopy(options) # copy to avoid changing the original + if len(options) == 0: + return None + if isinstance(options, dict): + options = list(options.values()) + assert isinstance(options, list),'options must be a list' + return random.choice(options) + + @classmethod + def sample(cls, options:list, n=2): + if isinstance(options, int): + options = list(range(options)) + options = cls.shuffle(options) + return options[:n] + + @classmethod + def chown(cls, path:str = None, sudo:bool =True): + path = cls.resolve_path(path) + user = cls.env('USER') + cmd = f'chown -R {user}:{user} {path}' + cls.cmd(cmd , sudo=sudo, verbose=True) + return {'success':True, 'message':f'chown cache {path}'} + + @classmethod + def chown_cache(cls, sudo:bool = True): + return cls.chown(cls.cache_path, sudo=sudo) + + @classmethod + def colors(cls): + return ['black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white', 'bright_black', 'bright_red', 'bright_green', 'bright_yellow', 'bright_blue', 'bright_magenta', 'bright_cyan', 'bright_white'] + colours = colors + @classmethod + def random_color(cls): + return random.choice(cls.colors()) + randcolor = randcolour = colour = color = random_colour = random_color + + + def get_util(self, util:str): + return self.get_module(util) + + @classmethod + def random_float(cls, min=0, max=1): + return random.uniform(min, max) + + @classmethod + def random_ratio_selection(cls, x:list, ratio:float = 0.5)->list: + if type(x) in [float, int]: + x = list(range(int(x))) + assert len(x)>0 + if ratio == 1: + return x + assert ratio > 0 and ratio <= 1 + random.shuffle(x) + k = max(int(len(x) * ratio),1) + return x[:k] + + + def link_cmd(cls, old, new): + + link_cmd = cls.get('link_cmd', {}) + assert isinstance(old, str), old + assert isinstance(new, str), new + link_cmd[new] = old + + cls.put('link_cmd', link_cmd) + + + + + @classmethod + def resolve_memory(cls, memory: Union[str, int, float]) -> str: + + scale_map = { + 'kb': 1e3, + 'mb': 1e6, + 'gb': 1e9, + 'b': 1, + } + if isinstance(memory, str): + scale_found = False + for scale_key, scale_value in scale_map.items(): + + + if isinstance(memory, str) and memory.lower().endswith(scale_key): + memory = int(int(memory[:-len(scale_key)].strip())*scale_value) + + + if type(memory) in [float, int]: + scale_found = True + break + + assert type(memory) in [float, int], f'memory must be a float or int, got {type(memory)}' + return memory + + + + @classmethod + def filter(cls, text_list: List[str], filter_text: str) -> List[str]: + return [text for text in text_list if filter_text in text] + + + @classmethod + def is_success(cls, x): + # assume that if the result is a dictionary, and it has an error key, then it is an error + if isinstance(x, dict): + if 'error' in x: + return False + if 'success' in x and x['success'] == False: + return False + + return True + + @classmethod + def is_error(cls, x:Any): + """ + The function checks if the result is an error + The error is a dictionary with an error key set to True + """ + if isinstance(x, dict): + if 'error' in x and x['error'] == True: + return True + if 'success' in x and x['success'] == False: + return True + return False + + @classmethod + def is_int(cls, value) -> bool: + o = False + try : + int(value) + if '.' not in str(value): + o = True + except: + pass + return o + + + @classmethod + def is_float(cls, value) -> bool: + o = False + try : + float(value) + if '.' in str(value): + o = True + except: + pass + + return o + + + + @classmethod + def timer(cls, *args, **kwargs): + from commune.utils.time import Timer + return Timer(*args, **kwargs) + + @classmethod + def timeit(cls, fn, *args, include_result=False, **kwargs): + + t = cls.time() + if isinstance(fn, str): + fn = cls.get_fn(fn) + result = fn(*args, **kwargs) + response = { + 'latency': cls.time() - t, + 'fn': fn.__name__, + + } + if include_result: + print(response) + return result + return response + + @staticmethod + def remotewrap(fn, remote_key:str = 'remote'): + ''' + calls your function if you wrap it as such + + @c.remotewrap + def fn(): + pass + + # deploy it as a remote function + fn(remote=True) + ''' + + def remotewrap(self, *args, **kwargs): + remote = kwargs.pop(remote_key, False) + if remote: + return self.remote_fn(module=self, fn=fn.__name__, args=args, kwargs=kwargs) + else: + return fn(self, *args, **kwargs) + + return remotewrap + + + @staticmethod + def is_mnemonic(s: str) -> bool: + import re + # Match 12 or 24 words separated by spaces + return bool(re.match(r'^(\w+ ){11}\w+$', s)) or bool(re.match(r'^(\w+ ){23}\w+$', s)) + + @staticmethod + def is_private_key(s: str) -> bool: + import re + # Match a 64-character hexadecimal string + pattern = r'^[0-9a-fA-F]{64}$' + return bool(re.match(pattern, s)) + + + + @staticmethod + def address2ip(address:str) -> str: + return str('.'.join(address.split(':')[:-1])) + + @staticmethod + def as_completed( futures, timeout=10, **kwargs): + return concurrent.futures.as_completed(futures, timeout=timeout, **kwargs) + + + @classmethod + def dict2munch(cls, x:dict, recursive:bool=True)-> 'Munch': + from munch import Munch + ''' + Turn dictionary into Munch + ''' + if isinstance(x, dict): + for k,v in x.items(): + if isinstance(v, dict) and recursive: + x[k] = cls.dict2munch(v) + x = Munch(x) + return x + + @classmethod + def munch2dict(cls, x:'Munch', recursive:bool=True)-> dict: + from munch import Munch + ''' + Turn munch object into dictionary + ''' + if isinstance(x, Munch): + x = dict(x) + for k,v in x.items(): + if isinstance(v, Munch) and recursive: + x[k] = cls.munch2dict(v) + + return x + + + @classmethod + def munch(cls, x:Dict) -> 'Munch': + ''' + Converts a dict to a munch + ''' + return cls.dict2munch(x) + + + @classmethod + def time( cls, t=None) -> float: + import time + if t is not None: + return time.time() - t + else: + return time.time() + + @classmethod + def datetime(cls): + import datetime + # UTC + return datetime.datetime.utcnow().strftime("%Y-%m-%d_%H:%M:%S") + + @classmethod + def time2datetime(cls, t:float): + import datetime + return datetime.datetime.fromtimestamp(t).strftime("%Y-%m-%d_%H:%M:%S") + + time2date = time2datetime + + @classmethod + def datetime2time(cls, x:str): + import datetime + return datetime.datetime.strptime(x, "%Y-%m-%d %H:%M:%S").timestamp() + + date2time = datetime2time + + @classmethod + def delta_t(cls, t): + return t - cls.time() + @classmethod + def timestamp(cls) -> float: + return int(cls.time()) + @classmethod + def sleep(cls, seconds:float) -> None: + import time + time.sleep(seconds) + return None + + + def search_dict(self, d:dict = 'k,d', search:str = {'k.d': 1}) -> dict: + search = search.split(',') + new_d = {} + + for k,v in d.items(): + if search in k.lower(): + new_d[k] = v + + return new_d + + @classmethod + def path2text(cls, path:str, relative=False): + + path = cls.resolve_path(path) + assert os.path.exists(path), f'path {path} does not exist' + if os.path.isdir(path): + filepath_list = cls.glob(path + '/**') + else: + assert os.path.exists(path), f'path {path} does not exist' + filepath_list = [path] + path2text = {} + for filepath in filepath_list: + try: + path2text[filepath] = cls.get_text(filepath) + except Exception as e: + pass + if relative: + pwd = cls.pwd() + path2text = {os.path.relpath(k, pwd):v for k,v in path2text.items()} + return path2text + + @classmethod + def root_key(cls): + return cls.get_key() + + @classmethod + def root_key_address(cls) -> str: + return cls.root_key().ss58_address + + + @classmethod + def is_root_key(cls, address:str)-> str: + return address == cls.root_key().ss58_address + + # time within the context + @classmethod + def context_timer(cls, *args, **kwargs): + return cls.timer(*args, **kwargs) + + + @classmethod + def folder_structure(cls, path:str='./', search='py', max_depth:int=5, depth:int=0)-> dict: + import glob + files = cls.glob(path + '/**') + results = [] + for file in files: + if os.path.isdir(file): + cls.folder_structure(file, search=search, max_depth=max_depth, depth=depth+1) + else: + if search in file: + results.append(file) + + return results + + + @classmethod + def copy(cls, data: Any) -> Any: + import copy + return copy.deepcopy(data) + + + @classmethod + def find_word(cls, word:str, path='./')-> str: + import commune as c + path = c.resolve_path(path) + files = c.glob(path) + progress = c.tqdm(len(files)) + found_files = {} + for f in files: + try: + text = c.get_text(f) + if word not in text: + continue + lines = text.split('\n') + except Exception as e: + continue + + line2text = {i:line for i, line in enumerate(lines) if word in line} + found_files[f[len(path)+1:]] = line2text + progress.update(1) + return found_files + + + + @classmethod + def pip_install(cls, + lib:str= None, + upgrade:bool=True , + verbose:str=True, + ): + import commune as c + + if lib in c.modules(): + c.print(f'Installing {lib} Module from local directory') + lib = c.resolve_object(lib).dirpath() + if lib == None: + lib = c.libpath + + if c.exists(lib): + cmd = f'pip install -e' + else: + cmd = f'pip install' + if upgrade: + cmd += ' --upgrade' + return cls.cmd(cmd, verbose=verbose) + + + @classmethod + def pip_exists(cls, lib:str, verbose:str=True): + return bool(lib in cls.pip_libs()) + + + @classmethod + def hash(cls, x, mode: str='sha256',*args,**kwargs) -> str: + import hashlib + x = cls.python2str(x) + if mode == 'keccak': + return cls.import_object('web3.main.Web3').keccak(text=x, *args, **kwargs).hex() + elif mode == 'ss58': + return cls.import_object('scalecodec.utils.ss58.ss58_encode')(x, *args,**kwargs) + elif mode == 'python': + return hash(x) + elif mode == 'md5': + return hashlib.md5(x.encode()).hexdigest() + elif mode == 'sha256': + return hashlib.sha256(x.encode()).hexdigest() + elif mode == 'sha512': + return hashlib.sha512(x.encode()).hexdigest() + elif mode =='sha3_512': + return hashlib.sha3_512(x.encode()).hexdigest() + else: + raise ValueError(f'unknown mode {mode}') + + @classmethod + def hash_modes(cls): + return ['keccak', 'ss58', 'python', 'md5', 'sha256', 'sha512', 'sha3_512'] + + str2hash = hash + + + def set_api_key(self, api_key:str, cache:bool = True): + api_key = os.getenv(str(api_key), None) + if api_key == None: + api_key = self.get_api_key() + self.api_key = api_key + if cache: + self.add_api_key(api_key) + assert isinstance(api_key, str) + + + def add_api_key(self, api_key:str, path=None): + assert isinstance(api_key, str) + path = self.resolve_path(path or 'api_keys') + api_keys = self.get(path, []) + api_keys.append(api_key) + api_keys = list(set(api_keys)) + self.put(path, api_keys) + return {'api_keys': api_keys} + + def set_api_keys(self, api_keys:str): + api_keys = list(set(api_keys)) + self.put('api_keys', api_keys) + return {'api_keys': api_keys} + + def rm_api_key(self, api_key:str): + assert isinstance(api_key, str) + api_keys = self.get(self.resolve_path('api_keys'), []) + for i in range(len(api_keys)): + if api_key == api_keys[i]: + api_keys.pop(i) + break + path = self.resolve_path('api_keys') + self.put(path, api_keys) + return {'api_keys': api_keys} + + def get_api_key(self, module=None): + if module != None: + self = self.module(module) + api_keys = self.api_keys() + if len(api_keys) == 0: + raise + else: + return self.choice(api_keys) + + def api_keys(self): + return self.get(self.resolve_path('api_keys'), []) + + + def rm_api_keys(self): + self.put(self.resolve_path('api_keys'), []) + return {'api_keys': []} + + + + + thread_map = {} + + @classmethod + def wait(cls, futures:list, timeout:int = None, generator:bool=False, return_dict:bool = True) -> list: + is_singleton = bool(not isinstance(futures, list)) + + futures = [futures] if is_singleton else futures + # if type(futures[0]) in [asyncio.Task, asyncio.Future]: + # return cls.gather(futures, timeout=timeout) + + if len(futures) == 0: + return [] + if cls.is_coroutine(futures[0]): + return cls.gather(futures, timeout=timeout) + + future2idx = {future:i for i,future in enumerate(futures)} + + if timeout == None: + if hasattr(futures[0], 'timeout'): + timeout = futures[0].timeout + else: + timeout = 30 + + if generator: + def get_results(futures): + try: + for future in concurrent.futures.as_completed(futures, timeout=timeout): + if return_dict: + idx = future2idx[future] + yield {'idx': idx, 'result': future.result()} + else: + yield future.result() + except Exception as e: + yield None + + else: + def get_results(futures): + results = [None]*len(futures) + try: + for future in concurrent.futures.as_completed(futures, timeout=timeout): + idx = future2idx[future] + results[idx] = future.result() + del future2idx[future] + if is_singleton: + results = results[0] + except Exception as e: + unfinished_futures = [future for future in futures if future in future2idx] + cls.print(f'Error: {e}, {len(unfinished_futures)} unfinished futures with timeout {timeout} seconds') + return results + + return get_results(futures) + + + + @classmethod + def gather(cls,jobs:list, timeout:int = 20, loop=None)-> list: + + if loop == None: + loop = cls.get_event_loop() + + if not isinstance(jobs, list): + singleton = True + jobs = [jobs] + else: + singleton = False + + assert isinstance(jobs, list) and len(jobs) > 0, f'Invalid jobs: {jobs}' + # determine if we are using asyncio or multiprocessing + + # wait until they finish, and if they dont, give them none + + # return the futures that done timeout or not + async def wait_for(future, timeout): + try: + result = await asyncio.wait_for(future, timeout=timeout) + except asyncio.TimeoutError: + result = {'error': f'TimeoutError: {timeout} seconds'} + + return result + + jobs = [wait_for(job, timeout=timeout) for job in jobs] + future = asyncio.gather(*jobs) + results = loop.run_until_complete(future) + + if singleton: + return results[0] + return results + + + + + @classmethod + def submit(cls, + fn, + params = None, + kwargs: dict = None, + args:list = None, + timeout:int = 40, + return_future:bool=True, + init_args : list = [], + init_kwargs:dict= {}, + executor = None, + module: str = None, + mode:str='thread', + max_workers : int = 100, + ): + kwargs = {} if kwargs == None else kwargs + args = [] if args == None else args + if params != None: + if isinstance(params, dict): + kwargs = {**kwargs, **params} + elif isinstance(params, list): + args = [*args, *params] + else: + raise ValueError('params must be a list or a dictionary') + + fn = cls.get_fn(fn) + executor = cls.executor(max_workers=max_workers, mode=mode) if executor == None else executor + args = cls.copy(args) + kwargs = cls.copy(kwargs) + init_kwargs = cls.copy(init_kwargs) + init_args = cls.copy(init_args) + if module == None: + module = cls + else: + module = cls.module(module) + if isinstance(fn, str): + method_type = cls.classify_fn(getattr(module, fn)) + elif callable(fn): + method_type = cls.classify_fn(fn) + else: + raise ValueError('fn must be a string or a callable') + + if method_type == 'self': + module = module(*init_args, **init_kwargs) + + future = executor.submit(fn=fn, args=args, kwargs=kwargs, timeout=timeout) + + if not hasattr(cls, 'futures'): + cls.futures = [] + + cls.futures.append(future) + + + if return_future: + return future + else: + return cls.wait(future, timeout=timeout) + + @classmethod + def submit_batch(cls, fn:str, batch_kwargs: List[Dict[str, Any]], return_future:bool=False, timeout:int=10, module = None, *args, **kwargs): + n = len(batch_kwargs) + module = cls if module == None else module + executor = cls.executor(max_workers=n) + futures = [ executor.submit(fn=getattr(module, fn), kwargs=batch_kwargs[i], timeout=timeout) for i in range(n)] + if return_future: + return futures + return cls.wait(futures) + + + executor_cache = {} + @classmethod + def executor(cls, max_workers:int=None, mode:str="thread", maxsize=200, **kwargs): + return c.module(f'executor')(max_workers=max_workers, maxsize=maxsize ,mode=mode, **kwargs) + + @staticmethod + def detailed_error(e) -> dict: + import traceback + tb = traceback.extract_tb(e.__traceback__) + file_name = tb[-1].filename + line_no = tb[-1].lineno + line_text = tb[-1].line + response = { + 'success': False, + 'error': str(e), + 'file_name': file_name, + 'line_no': line_no, + 'line_text': line_text + } + return response + + + @classmethod + def as_completed(cls , futures:list, timeout:int=10, **kwargs): + return concurrent.futures.as_completed(futures, timeout=timeout) + + @classmethod + def is_coroutine(cls, future): + """ + returns True if future is a coroutine + """ + return cls.obj2typestr(future) == 'coroutine' + + + @classmethod + def obj2typestr(cls, obj): + return str(type(obj)).split("'")[1] + + @classmethod + def tasks(cls, task = None, mode='pm2',**kwargs) -> List[str]: + kwargs['network'] = 'local' + kwargs['update'] = False + modules = cls.servers( **kwargs) + tasks = getattr(cls, f'{mode}_list')(task) + tasks = list(filter(lambda x: x not in modules, tasks)) + return tasks + + + @classmethod + def asubmit(cls, fn:str, *args, **kwargs): + + async def _asubmit(): + kwargs.update(kwargs.pop('kwargs',{})) + return fn(*args, **kwargs) + return _asubmit() + + + + thread_map = {} + + @classmethod + def thread(cls,fn: Union['callable', str], + args:list = None, + kwargs:dict = None, + daemon:bool = True, + name = None, + tag = None, + start:bool = True, + tag_seperator:str='::', + **extra_kwargs): + + if isinstance(fn, str): + fn = cls.get_fn(fn) + if args == None: + args = [] + if kwargs == None: + kwargs = {} + + assert callable(fn), f'target must be callable, got {fn}' + assert isinstance(args, list), f'args must be a list, got {args}' + assert isinstance(kwargs, dict), f'kwargs must be a dict, got {kwargs}' + + # unique thread name + if name == None: + name = fn.__name__ + cnt = 0 + while name in cls.thread_map: + cnt += 1 + if tag == None: + tag = '' + name = name + tag_seperator + tag + str(cnt) + + if name in cls.thread_map: + cls.thread_map[name].join() + + t = threading.Thread(target=fn, args=args, kwargs=kwargs, **extra_kwargs) + # set the time it starts + setattr(t, 'start_time', cls.time()) + t.daemon = daemon + if start: + t.start() + cls.thread_map[name] = t + return t + + @classmethod + def threads(cls, search:str = None): + threads = list(cls.thread_map.keys()) + if search != None: + threads = [t for t in threads if search in t] + return threads + + + + + +c.enable_routes() +# c.add_utils() + +Module = c # Module is alias of c +Module.run(__name__) + + diff --git a/commune/module/__init__.py b/commune/module/__init__.py deleted file mode 100644 index 8d74e5fb..00000000 --- a/commune/module/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .module import Module \ No newline at end of file diff --git a/commune/module/_config.py b/commune/module/_config.py deleted file mode 100644 index 09ea71f6..00000000 --- a/commune/module/_config.py +++ /dev/null @@ -1,162 +0,0 @@ -from typing import * -import os - -class Config: - def __init__(self, *args, **kwargs): - self.set_config(*args, **kwargs) - - def set_config(self, - config:Optional[Union[str, dict]]=None, - kwargs:dict=None, - add_attributes: bool = False, - **extra_kwargs - ) -> 'Munch': - ''' - Set the config as well as its local params - ''' - kwargs = kwargs or {} - kwargs.update(extra_kwargs) - # in case they passed in a locals() dict, we want to resolve the kwargs and avoid ambiguous args - config = config or {} - config.update(kwargs) - default_config = self.config if not callable(self.config) else self.config() - config = {**default_config, **config} - if 'kwargs' in config: - config.update(config.pop('kwargs')) - if isinstance(config, dict): - config = self.dict2munch(config) - # get the config - # add the config attributes to the class (via munch -> dict -> class ) - if add_attributes: - self.__dict__.update(self.munch2dict(config)) - - self.config = config - self.kwargs = kwargs - - - return self.config - - @classmethod - def config(cls) -> 'Munch': - ''' - Returns the config - ''' - config = cls.load_config() - if not config: - if hasattr(cls, 'init_kwargs'): - config = cls.init_kwargs() # from _schema.py - else: - config = {} - return config - - - @classmethod - def load_config(cls, path:str=None, - default=None, - to_munch:bool = True - ) -> Union['Munch', Dict]: - ''' - Args: - path: The path to the config file - to_munch: If true, then convert the config to a munch - ''' - - default = default or {} - path = path if path else cls.config_path() - - if os.path.exists(path): - config = cls.load_yaml(path) - else: - config = default - config = config or {} - if to_munch: - config = cls.dict2munch(config) - return config - - @classmethod - def save_config(cls, config:Union['Munch', Dict]= None, path:str=None) -> 'Munch': - from copy import deepcopy - from munch import Munch - - ''' - Saves the config to a yaml file - ''' - if config == None: - config = cls.config() - - if isinstance(config, Munch): - config = cls.munch2dict(deepcopy(config)) - elif isinstance(config, dict): - config = deepcopy(config) - else: - raise ValueError(f'config must be a dict or munch, not {type(config)}') - - assert isinstance(config, dict), f'config must be a dict, not {config}' - - config = cls.save_yaml(data=config , path=path) - - return config - - - def config_exists(self, path:str=None) -> bool: - ''' - Returns true if the config exists - ''' - path = path if path else self.config_path() - return self.path_exists(path) - - - - @classmethod - def munch(cls, x:dict, recursive:bool=True)-> 'Munch': - from munch import Munch - ''' - Turn dictionary into Munch - ''' - if isinstance(x, dict): - for k,v in x.items(): - if isinstance(v, dict) and recursive: - x[k] = cls.dict2munch(v) - x = Munch(x) - return x - dict2munch = munch - - @classmethod - def munch2dict(cls, x:'Munch', recursive:bool=True)-> dict: - from munch import Munch - - ''' - Turn munch object into dictionary - ''' - if isinstance(x, Munch): - x = dict(x) - for k,v in x.items(): - if isinstance(v, Munch) and recursive: - x[k] = cls.munch2dict(v) - return x - to_dict = munch2dict - - - @classmethod - def has_config(cls) -> bool: - - try: - return os.path.exists(cls.config_path()) - except: - return False - - @classmethod - def config_path(cls) -> str: - return os.path.abspath('./config.yaml') - - def update_config(self, config): - self.config.update(config) - return self.config - - - @classmethod - def base_config(cls, cache=True): - if cache and hasattr(cls, '_base_config'): - return cls._base_config - cls._base_config = cls.get_yaml(cls.config_path()) - return cls._base_config \ No newline at end of file diff --git a/commune/module/_logger.py b/commune/module/_logger.py deleted file mode 100644 index d0a99d33..00000000 --- a/commune/module/_logger.py +++ /dev/null @@ -1,89 +0,0 @@ -from rich.console import Console -class Logger: - - @classmethod - def critical(cls, *args, **kwargs): - console = cls.resolve_console() - return console.critical(*args, **kwargs) - - @classmethod - def resolve_console(cls, console = None, **kwargs): - if hasattr(cls,'console'): - return cls.console - import logging - from rich.logging import RichHandler - from rich.console import Console - logging.basicConfig( handlers=[RichHandler()]) - # print the line number - console = Console() - cls.console = console - return console - - @classmethod - def print(cls, *text:str, - color:str=None, - verbose:bool = True, - console: Console = None, - flush:bool = False, - buffer:str = None, - **kwargs): - - if not verbose: - return - if color == 'random': - color = cls.random_color() - if color: - kwargs['style'] = color - - if buffer != None: - text = [buffer] + list(text) + [buffer] - - console = cls.resolve_console(console) - try: - if flush: - console.print(**kwargs, end='\r') - console.print(*text, **kwargs) - except Exception as e: - print(e) - @classmethod - def success(cls, *args, **kwargs): - logger = cls.resolve_logger() - return logger.success(*args, **kwargs) - - @classmethod - def error(cls, *args, **kwargs): - logger = cls.resolve_logger() - return logger.error(*args, **kwargs) - - @classmethod - def debug(cls, *args, **kwargs): - logger = cls.resolve_logger() - return logger.debug(*args, **kwargs) - - @classmethod - def warning(cls, *args, **kwargs): - logger = cls.resolve_logger() - return logger.warning(*args, **kwargs) - @classmethod - def status(cls, *args, **kwargs): - console = cls.resolve_console() - return console.status(*args, **kwargs) - @classmethod - def log(cls, *args, **kwargs): - console = cls.resolve_console() - return console.log(*args, **kwargs) - - ### LOGGER LAND ### - @classmethod - def resolve_logger(cls, logger = None): - if not hasattr(cls,'logger'): - from loguru import logger - cls.logger = logger.opt(colors=True) - if logger is not None: - cls.logger = logger - return cls.logger - - @staticmethod - def echo(x): - return x - diff --git a/commune/module/_manager.py b/commune/module/_manager.py deleted file mode 100644 index a71b776a..00000000 --- a/commune/module/_manager.py +++ /dev/null @@ -1,680 +0,0 @@ - -from typing import * -import os - -class Manager: - - @classmethod - def resolve_extension(cls, filename:str, extension = '.py') -> str: - if filename.endswith(extension): - return filename - return filename + extension - - @classmethod - def simple2path(cls, - simple:str, - extension = '.py', - avoid_dirnames = ['', 'src', - 'commune', - 'commune/module', - 'commune/modules', - 'modules', - 'blocks', - 'agents', - 'commune/agents'], - **kwargs) -> bool: - """ - converts the module path to a file path - - for example - - model.openai.gpt3 -> model/openai/gpt3.py, model/openai/gpt3_module.py, model/openai/__init__.py - model.openai -> model/openai.py or model/openai_module.py or model/__init__.py - - Parameters: - path (str): The module path - """ - # if cls.libname in simple and '/' not in simple and cls.can_import_module(simple): - # return simple - shortcuts = cls.shortcuts() - simple = shortcuts.get(simple, simple) - - if simple.endswith(extension): - simple = simple[:-len(extension)] - - path = None - pwd = cls.pwd() - path_options = [] - simple = simple.replace('/', '.') - - # create all of the possible paths by combining the avoid_dirnames with the simple path - dir_paths = list([pwd+ '/' + x for x in avoid_dirnames]) # local first - dir_paths += list([cls.libpath + '/' + x for x in avoid_dirnames]) # add libpath stuff - - for dir_path in dir_paths: - if dir_path.endswith('/'): - dir_path = dir_path[:-1] - # '/' count how many times the path has been split - module_dirpath = dir_path + '/' + simple.replace('.', '/') - if os.path.isdir(module_dirpath): - simple_filename = simple.replace('.', '_') - filename_options = [simple_filename, simple_filename + '_module', 'module_'+ simple_filename] + ['module'] + simple.split('.') + ['__init__'] - path_options += [module_dirpath + '/' + f for f in filename_options] - else: - module_filepath = dir_path + '/' + simple.replace('.', '/') - path_options += [module_filepath] - for p in path_options: - p = cls.resolve_extension(p) - if os.path.exists(p): - p_text = cls.get_text(p) - path = p - if 'commune' in p_text and 'class ' in p_text or ' def ' in p_text: - return p - if path != None: - break - return path - - - @classmethod - def is_repo(cls, libpath:str ): - # has the .git folder - return bool([f for f in cls.ls(libpath) if '.git' in f and os.path.isdir(f)]) - - - @classmethod - def path2simple(cls, - path:str, - tree = None, - ignore_prefixes = ['src', 'commune', 'modules', 'commune.modules', - 'commune.commune', - 'commune.module', 'module', 'router'], - module_folder_filnames = ['__init__', 'main', 'module'], - module_extension = 'py', - ignore_suffixes = ['module'], - name_map = {'commune': 'module'}, - compress_path = True, - verbose = False, - num_lines_to_read = 100, - ) -> str: - - path = os.path.abspath(path) - path_filename_with_extension = path.split('/')[-1] # get the filename with extension - path_extension = path_filename_with_extension.split('.')[-1] # get the extension - assert path_extension == module_extension, f'Invalid extension {path_extension} for path {path}' - path_filename = path_filename_with_extension[:-len(path_extension)-1] # remove the extension - path_filename_chunks = path_filename.split('_') - path_chunks = path.split('/') - - if path.startswith(cls.libpath): - path = path[len(cls.libpath):] - else: - # if the tree path is not in the path, we want to remove the root path - pwd = cls.pwd() - path = path[len(pwd):] - dir_chunks = path.split('/')[:-1] if '/' in path else [] - is_module_folder = all([bool(chunk in dir_chunks) for chunk in path_filename_chunks]) - is_module_folder = is_module_folder or (path_filename in module_folder_filnames) - if is_module_folder: - path = '/'.join(path.split('/')[:-1]) - path = path[1:] if path.startswith('/') else path - path = path.replace('/', '.') - module_extension = '.'+module_extension - if path.endswith(module_extension): - path = path[:-len(module_extension)] - if compress_path: - # we want to remove redundant chunks - # for example if the path is 'module/module' we want to remove the redundant module - path_chunks = path.split('.') - simple_path = [] - for chunk in path_chunks: - if chunk not in simple_path: - simple_path += [chunk] - simple_path = '.'.join(simple_path) - else: - simple_path = path - # FILTER PREFIXES - for prefix in ignore_prefixes: - prefix += '.' - if simple_path.startswith(prefix) and simple_path != prefix: - simple_path = simple_path[len(prefix):] - cls.print(f'Prefix {prefix} in path {simple_path}', color='yellow', verbose=verbose) - # FILTER SUFFIXES - for suffix in ignore_suffixes: - suffix = '.' + suffix - if simple_path.endswith(suffix) and simple_path != suffix: - simple_path = simple_path[:-len(suffix)] - cls.print(f'Suffix {suffix} in path {simple_path}', color='yellow', verbose=verbose) - - # remove leading and trailing dots - if simple_path.startswith('.'): - simple_path = simple_path[1:] - if simple_path.endswith('.'): - simple_path = simple_path[:-1] - simple_path = name_map.get(simple_path, simple_path) - return simple_path - - @classmethod - def path_config_exists(cls, path:str, - config_files = ['config.yaml', 'config.yml'], - config_extensions=['.yaml', '.yml']) -> bool: - ''' - Checks if the path exists - ''' - config_files += [path.replace('.py', ext) for ext in config_extensions] - dirpath = os.path.dirname(path) - dir_files = os.listdir(dirpath) - if os.path.exists(dirpath) and any([[f.endswith(cf) for cf in config_files] for f in dir_files]): - return True - return False - - - @classmethod - def resolve_cache_path(self, path): - path = path.replace("/", "_") - if path.startswith('_'): - path = path[1:] - path = f'cached_path/{path}' - return path - - @classmethod - def cached_paths(cls): - return cls.ls('cached_paths') - - - @classmethod - def find_classes(cls, path='./', working=False): - - path = os.path.abspath(path) - if os.path.isdir(path): - classes = [] - generator = cls.glob(path+'/**/**.py', recursive=True) - for p in generator: - if p.endswith('.py'): - p_classes = cls.find_classes(p ) - if working: - for class_path in p_classes: - try: - cls.import_object(class_path) - classes += [class_path] - except Exception as e: - r = cls.detailed_error(e) - r['class'] = class_path - cls.print(r, color='red') - continue - else: - classes += p_classes - - return classes - - code = cls.get_text(path) - classes = [] - file_path = cls.path2objectpath(path) - - for line in code.split('\n'): - if all([s in line for s in ['class ', ':']]): - new_class = line.split('class ')[-1].split('(')[0].strip() - if new_class.endswith(':'): - new_class = new_class[:-1] - if ' ' in new_class: - continue - classes += [new_class] - classes = [file_path + '.' + c for c in classes] - - libpath_objpath_prefix = cls.libpath.replace('/', '.')[1:] + '.' - classes = [c.replace(libpath_objpath_prefix, '') for c in classes] - return classes - - - - - @classmethod - def find_class2functions(cls, path, working=False): - - path = os.path.abspath(path) - if os.path.isdir(path): - class2functions = {} - for p in cls.glob(path+'/**/**.py', recursive=True): - if p.endswith('.py'): - object_path = cls.path2objectpath(p) - response = cls.find_class2functions(p ) - for k,v in response.items(): - class2functions[object_path+ '.' +k] = v - return class2functions - - code = cls.get_text(path) - classes = [] - class2functions = {} - class_functions = [] - new_class = None - for line in code.split('\n'): - if all([s in line for s in ['class ', ':']]): - new_class = line.split('class ')[-1].split('(')[0].strip() - if new_class.endswith(':'): - new_class = new_class[:-1] - if ' ' in new_class: - continue - classes += [new_class] - if len(class_functions) > 0: - class2functions[new_class] = cls.copy(class_functions) - class_functions = [] - if all([s in line for s in [' def', '(']]): - fn = line.split(' def')[-1].split('(')[0].strip() - class_functions += [fn] - if new_class != None: - class2functions[new_class] = class_functions - - return class2functions - - @classmethod - def path2objectpath(cls, path:str, **kwargs) -> str: - libpath = cls.libpath - path.replace - if path.startswith(libpath): - path = path.replace(libpath , '')[1:].replace('/', '.').replace('.py', '') - else: - pwd = cls.pwd() - if path.startswith(pwd): - path = path.replace(pwd, '')[1:].replace('/', '.').replace('.py', '') - - return path.replace('__init__.', '.') - - - @classmethod - def find_functions(cls, path = './', working=False): - fns = [] - if os.path.isdir(path): - path = os.path.abspath(path) - for p in cls.glob(path+'/**/**.py', recursive=True): - p_fns = cls.find_functions(p) - file_object_path = cls.path2objectpath(p) - p_fns = [file_object_path + '.' + f for f in p_fns] - for fn in p_fns: - if working: - try: - cls.import_object(fn) - except Exception as e: - r = cls.detailed_error(e) - r['fn'] = fn - cls.print(r, color='red') - continue - fns += [fn] - - else: - code = cls.get_text(path) - for line in code.split('\n'): - if line.startswith('def ') or line.startswith('async def '): - fn = line.split('def ')[-1].split('(')[0].strip() - fns += [fn] - return fns - - - @classmethod - def find_async_functions(cls, path): - if os.path.isdir(path): - path2classes = {} - for p in cls.glob(path+'/**/**.py', recursive=True): - path2classes[p] = cls.find_functions(p) - return path2classes - code = cls.get_text(path) - fns = [] - for line in code.split('\n'): - if line.startswith('async def '): - fn = line.split('def ')[-1].split('(')[0].strip() - fns += [fn] - return [c for c in fns] - - @classmethod - def find_objects(cls, path:str = './', search=None, working=False, **kwargs): - classes = cls.find_classes(path, working=working) - functions = cls.find_functions(path, working=working) - - if search != None: - classes = [c for c in classes if search in c] - functions = [f for f in functions if search in f] - object_paths = functions + classes - return object_paths - objs = find_objects - - - - def find_working_objects(self, path:str = './', **kwargs): - objects = self.find_objects(path, **kwargs) - working_objects = [] - progress = self.tqdm(objects, desc='Progress') - error_progress = self.tqdm(objects, desc='Errors') - - for obj in objects: - - try: - self.import_object(obj) - working_objects += [obj] - progress.update(1) - except: - error_progress.update(1) - pass - return working_objects - - search = find_objects - - @classmethod - def simple2objectpath(cls, - simple_path:str, - cactch_exception = False, - **kwargs) -> str: - - object_path = cls.simple2path(simple_path, **kwargs) - classes = cls.find_classes(object_path) - return classes[-1] - - @classmethod - def simple2object(cls, path:str, **kwargs) -> str: - path = cls.simple2objectpath(path, **kwargs) - try: - return cls.import_object(path) - except: - path = cls.tree().get(path) - return cls.import_object(path) - - included_pwd_in_path = False - @classmethod - def import_module(cls, - import_path:str, - included_pwd_in_path=True, - try_prefixes = ['commune','commune.modules', 'modules', 'commune.subspace', 'subspace'] - ) -> 'Object': - from importlib import import_module - if included_pwd_in_path and not cls.included_pwd_in_path: - import sys - pwd = cls.pwd() - sys.path.append(pwd) - sys.path = list(set(sys.path)) - cls.included_pwd_in_path = True - - # if commune is in the path more than once, we want to remove the duplicates - if cls.libname in import_path: - import_path = cls.libname + import_path.split(cls.libname)[-1] - pwd = cls.pwd() - try: - return import_module(import_path) - except Exception as _e: - for prefix in try_prefixes: - try: - return import_module(f'{prefix}.{import_path}') - except Exception as e: - pass - raise _e - - @classmethod - def can_import_module(cls, module:str) -> bool: - ''' - Returns true if the module is valid - ''' - try: - cls.import_module(module) - return True - except: - return False - @classmethod - def can_import_object(cls, module:str) -> bool: - ''' - Returns true if the module is valid - ''' - try: - cls.import_object(module) - return True - except: - return False - - @classmethod - def import_object(cls, key:str, verbose: bool = 0, trials=3)-> Any: - ''' - Import an object from a string with the format of {module_path}.{object} - Examples: import_object("torch.nn"): imports nn from torch - ''' - module = '.'.join(key.split('.')[:-1]) - object_name = key.split('.')[-1] - if verbose: - cls.print(f'Importing {object_name} from {module}') - obj = getattr(cls.import_module(module), object_name) - return obj - - obj = get_obj = import_object - - - @classmethod - def object_exists(cls, path:str, verbose=False)-> Any: - try: - cls.import_object(path, verbose=verbose) - return True - except Exception as e: - return False - - imp = get_object = importobj = import_object - - @classmethod - def module_exists(cls, module:str, **kwargs) -> bool: - ''' - Returns true if the module exists - ''' - module_exists = module in cls.modules(**kwargs) - if not module_exists: - try: - module_path = cls.simple2path(module) - module_exists = cls.exists(module_path) - except: - pass - return module_exists - - @classmethod - def has_app(cls, module:str, **kwargs) -> bool: - return cls.module_exists(module + '.app', **kwargs) - - @classmethod - def simplify_paths(cls, paths): - paths = [cls.simplify_path(p) for p in paths] - paths = [p for p in paths if p] - return paths - - @classmethod - def simplify_path(cls, p, avoid_terms=['modules', 'agents']): - chunks = p.split('.') - if len(chunks) < 2: - return None - file_name = chunks[-2] - chunks = chunks[:-1] - path = '' - for chunk in chunks: - if chunk in path: - continue - path += chunk + '.' - if file_name.endswith('_module'): - path = '.'.join(path.split('.')[:-1]) - - if path.startswith(cls.libname + '.'): - path = path[len(cls.libname)+1:] - - if path.endswith('.'): - path = path[:-1] - - if '_' in file_name: - file_chunks = file_name.split('_') - if all([c in path for c in file_chunks]): - path = '.'.join(path.split('.')[:-1]) - for avoid in avoid_terms: - avoid = f'{avoid}.' - if avoid in path: - path = path.replace(avoid, '') - return path - - @classmethod - def local_modules(cls, search=None): - object_paths = cls.find_classes(cls.pwd()) - object_paths = cls.simplify_paths(object_paths) - if search != None: - object_paths = [p for p in object_paths if search in p] - return sorted(list(set(object_paths))) - @classmethod - def lib_tree(cls, ): - return cls.get_tree(cls.libpath) - @classmethod - def local_tree(cls ): - return cls.get_tree(cls.pwd()) - - @classmethod - def get_tree(cls, path): - class_paths = cls.find_classes(path) - simple_paths = cls.simplify_paths(class_paths) - return dict(zip(simple_paths, class_paths)) - - @classmethod - def get_module(cls, - path:str = 'module', - cache=True, - verbose = False, - update_tree_if_fail = True, - init_kwargs = None, - catch_error = False, - ) -> str: - import commune as c - path = path or 'module' - if catch_error: - try: - return cls.get_module(path=path, cache=cache, - verbose=verbose, - update_tree_if_fail=update_tree_if_fail, - init_kwargs=init_kwargs, - catch_error=False) - except Exception as e: - return c.detailed_error(e) - if path in ['module', 'c']: - return c.Module - # if the module is a valid import path - shortcuts = c.shortcuts() - if path in shortcuts: - path = shortcuts[path] - module = None - cache_key = path - t0 = c.time() - if cache and cache_key in c.module_cache: - module = c.module_cache[cache_key] - return module - module = c.simple2object(path) - # ensure module - if verbose: - c.print(f'Loaded {path} in {c.time() - t0} seconds', color='green') - - if init_kwargs != None: - module = module(**init_kwargs) - is_module = c.is_module(module) - if not is_module: - module = cls.obj2module(module) - if cache: - c.module_cache[cache_key] = module - return module - - - _tree = None - @classmethod - def tree(cls, search=None, cache=True): - if cls._tree != None and cache: - return cls._tree - local_tree = cls.local_tree() - lib_tree = cls.lib_tree() - tree = {**local_tree, **lib_tree} - if cache: - cls._tree = tree - if search != None: - tree = {k:v for k,v in tree.items() if search in k} - return tree - - return tree - - - def overlapping_modules(self, search:str=None, **kwargs): - local_modules = self.local_modules(search=search) - lib_modules = self.lib_modules(search=search) - return [m for m in local_modules if m in lib_modules] - - - @classmethod - def lib_modules(cls, search=None): - object_paths = cls.find_classes(cls.libpath ) - object_paths = cls.simplify_paths(object_paths) - if search != None: - object_paths = [p for p in object_paths if search in p] - return sorted(list(set(object_paths))) - - @classmethod - def find_modules(cls, search=None, **kwargs): - local_modules = cls.local_modules(search=search) - lib_modules = cls.lib_modules(search=search) - return sorted(list(set(local_modules + lib_modules))) - - _modules = None - @classmethod - def modules(cls, search=None, cache=True, **kwargs)-> List[str]: - modules = cls._modules - if not cache or modules == None: - modules = cls.find_modules(search=None, **kwargs) - if search != None: - modules = [m for m in modules if search in m] - return modules - get_modules = modules - - @classmethod - def has_module(cls, module): - return module in cls.modules() - - - - - - def new_modules(self, *modules, **kwargs): - for module in modules: - self.new_module(module=module, **kwargs) - - - - @classmethod - def new_module( cls, - module : str , - base_module : str = 'demo', - folder_module : bool = False, - update=1 - ): - - import commune as c - base_module = c.module(base_module) - module_class_name = ''.join([m[0].capitalize() + m[1:] for m in module.split('.')]) - base_module_class_name = base_module.class_name() - base_module_code = base_module.code().replace(base_module_class_name, module_class_name) - pwd = c.pwd() - path = os.path.join(pwd, module.replace('.', '/')) - if folder_module: - dirpath = path - filename = module.replace('.', '_') - path = os.path.join(path, filename) - - path = path + '.py' - dirpath = os.path.dirname(path) - if os.path.exists(path) and not update: - return {'success': True, 'msg': f'Module {module} already exists', 'path': path} - if not os.path.exists(dirpath): - os.makedirs(dirpath, exist_ok=True) - - c.put_text(path, base_module_code) - - return {'success': True, 'msg': f'Created module {module}', 'path': path} - - add_module = new_module - - - @classmethod - def has_local_module(cls, path=None): - import commune as c - path = '.' if path == None else path - if os.path.exists(f'{path}/module.py'): - text = c.get_text(f'{path}/module.py') - if 'class ' in text: - return True - return False diff --git a/commune/module/_misc.py b/commune/module/_misc.py deleted file mode 100644 index 605910f9..00000000 --- a/commune/module/_misc.py +++ /dev/null @@ -1,1102 +0,0 @@ - -from typing import * -import asyncio -from functools import partial -import random -import os -from copy import deepcopy -import concurrent - -class Misc: - - def path2functions(self, path=None): - path = path or (self.root_path + '/utils') - paths = self.ls(path) - path2functions = {} - print(paths) - - for p in paths: - - functions = [] - if os.path.isfile(p) == False: - continue - text = self.get_text(p) - if len(text) == 0: - continue - - for line in text.split('\n'): - print(line) - if 'def ' in line and '(' in line: - functions.append(line.split('def ')[1].split('(')[0]) - replative_path = p[len(path)+1:] - path2functions[replative_path] = functions - return path2functions - - @staticmethod - def chunk(sequence:list = [0,2,3,4,5,6,6,7], - chunk_size:int=4, - num_chunks:int= None): - assert chunk_size != None or num_chunks != None, 'must specify chunk_size or num_chunks' - if chunk_size == None: - chunk_size = len(sequence) / num_chunks - if chunk_size > len(sequence): - return [sequence] - if num_chunks == None: - num_chunks = int(len(sequence) / chunk_size) - if num_chunks == 0: - num_chunks = 1 - chunks = [[] for i in range(num_chunks)] - for i, element in enumerate(sequence): - idx = i % num_chunks - chunks[idx].append(element) - return chunks - - @classmethod - def batch(cls, x: list, batch_size:int=8): - return cls.chunk(x, chunk_size=batch_size) - - def cancel(self, futures): - for f in futures: - f.cancel() - return {'success': True, 'msg': 'cancelled futures'} - - - @classmethod - def cachefn(cls, func, max_age=60, update=False, cache=True, cache_folder='cachefn'): - import functools - path_name = cache_folder+'/'+func.__name__ - def wrapper(*args, **kwargs): - fn_name = func.__name__ - cache_params = {'max_age': max_age, 'cache': cache} - for k, v in cache_params.items(): - cache_params[k] = kwargs.pop(k, v) - - - if not update: - result = cls.get(fn_name, **cache_params) - if result != None: - return result - - result = func(*args, **kwargs) - - if cache: - cls.put(fn_name, result, cache=cache) - return result - return wrapper - - - @staticmethod - def round(x:Union[float, int], sig: int=6, small_value: float=1.0e-9): - from commune.utils.math import round_sig - return round_sig(x, sig=sig, small_value=small_value) - - @classmethod - def round_decimals(cls, x:Union[float, int], decimals: int=6, small_value: float=1.0e-9): - - import math - """ - Rounds x to the number of {sig} digits - :param x: - :param sig: signifant digit - :param small_value: smallest possible value - :return: - """ - x = float(x) - return round(x, decimals) - - - - - @staticmethod - def num_words( text): - return len(text.split(' ')) - - @classmethod - def random_word(cls, *args, n=1, seperator='_', **kwargs): - import commune as c - random_words = cls.module('key').generate_mnemonic(*args, **kwargs).split(' ')[0] - random_words = random_words.split(' ')[:n] - if n == 1: - return random_words[0] - else: - return seperator.join(random_words.split(' ')[:n]) - - @classmethod - def filter(cls, text_list: List[str], filter_text: str) -> List[str]: - return [text for text in text_list if filter_text in text] - - - - @staticmethod - def tqdm(*args, **kwargs): - from tqdm import tqdm - return tqdm(*args, **kwargs) - - progress = tqdm - - emojis = { - 'smile': '😊', - 'sad': '😞', - 'heart': '❤️', - 'star': '⭐', - 'fire': '🔥', - 'check': '✅', - 'cross': '❌', - 'warning': '⚠️', - 'info': 'ℹ️', - 'question': '❓', - 'exclamation': '❗', - 'plus': '➕', - 'minus': '➖', - - } - - - @classmethod - def emoji(cls, name:str): - return cls.emojis.get(name, '❓') - - @staticmethod - def tqdm(*args, **kwargs): - from tqdm import tqdm - return tqdm(*args, **kwargs) - progress = tqdm - - - - - @classmethod - def jload(cls, json_string): - import json - return json.loads(json_string.replace("'", '"')) - - @classmethod - def partial(cls, fn, *args, **kwargs): - return partial(fn, *args, **kwargs) - - - @classmethod - def sizeof(cls, obj): - import sys - sizeof = 0 - if isinstance(obj, dict): - for k,v in obj.items(): - sizeof += cls.sizeof(k) + cls.sizeof(v) - elif isinstance(obj, list): - for v in obj: - sizeof += cls.sizeof(v) - elif any([k.lower() in cls.type_str(obj).lower() for k in ['torch', 'Tensor'] ]): - - sizeof += cls.get_tensor_size(obj) - else: - sizeof += sys.getsizeof(obj) - - return sizeof - - - @classmethod - def put_torch(cls, path:str, data:Dict, **kwargs): - import torch - path = cls.resolve_path(path=path, extension='pt') - torch.save(data, path) - return path - - def init_nn(self): - import torch - torch.nn.Module.__init__(self) - - - @classmethod - def check_word(cls, word:str)-> str: - import commune as c - files = c.glob('./') - progress = c.tqdm(len(files)) - for f in files: - try: - text = c.get_text(f) - except Exception as e: - continue - if word in text: - return True - progress.update(1) - return False - - @classmethod - def wordinfolder(cls, word:str, path:str='./')-> bool: - import commune as c - path = c.resolve_path(path) - files = c.glob(path) - progress = c.tqdm(len(files)) - for f in files: - try: - text = c.get_text(f) - except Exception as e: - continue - if word in text: - return True - progress.update(1) - return False - - - def locals2hash(self, kwargs:dict = {'a': 1}, keys=['kwargs']) -> str: - kwargs.pop('cls', None) - kwargs.pop('self', None) - return self.dict2hash(kwargs) - - @classmethod - def dict2hash(cls, d:dict) -> str: - for k in d.keys(): - assert cls.jsonable(d[k]), f'{k} is not jsonable' - return cls.hash(d) - - @classmethod - def dict_put(cls, *args, **kwargs): - from commune.utils.dict import dict_put - return dict_put(*args, **kwargs) - - @classmethod - def dict_get(cls, *args, **kwargs): - from commune.utils.dict import dict_get - return dict_get(*args, **kwargs) - - - @classmethod - def is_address(cls, address:str) -> bool: - if not isinstance(address, str): - return False - if '://' in address: - return True - conds = [] - conds.append(len(address.split('.')) >= 3) - conds.append(isinstance(address, str)) - conds.append(':' in address) - conds.append(cls.is_int(address.split(':')[-1])) - return all(conds) - - - @classmethod - def new_event_loop(cls, nest_asyncio:bool = True) -> 'asyncio.AbstractEventLoop': - import asyncio - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - if nest_asyncio: - cls.nest_asyncio() - - return loop - - - def set_event_loop(self, loop=None, new_loop:bool = False) -> 'asyncio.AbstractEventLoop': - import asyncio - try: - if new_loop: - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - else: - loop = loop if loop else asyncio.get_event_loop() - except RuntimeError as e: - self.new_event_loop() - - self.loop = loop - return self.loop - - @classmethod - def get_event_loop(cls, nest_asyncio:bool = True) -> 'asyncio.AbstractEventLoop': - try: - loop = asyncio.get_event_loop() - except Exception as e: - loop = cls.new_event_loop(nest_asyncio=nest_asyncio) - return loop - - - - - @classmethod - def merge(cls, from_obj= None, - to_obj = None, - include_hidden:bool=True, - allow_conflicts:bool=True, - verbose: bool = False): - - ''' - Merge the functions of a python object into the current object (a) - ''' - from_obj = from_obj or cls - to_obj = to_obj or cls - - for fn in dir(from_obj): - if fn.startswith('_') and not include_hidden: - continue - if hasattr(to_obj, fn) and not allow_conflicts: - continue - if verbose: - cls.print(f'Adding {fn}') - setattr(to_obj, fn, getattr(from_obj, fn)) - - return to_obj - - - # JUPYTER NOTEBOOKS - @classmethod - def enable_jupyter(cls): - cls.nest_asyncio() - - - - jupyter = enable_jupyter - - - @classmethod - def pip_list(cls, lib=None): - pip_list = cls.cmd(f'pip list', verbose=False, bash=True).split('\n') - if lib != None: - pip_list = [l for l in pip_list if l.startswith(lib)] - return pip_list - - - @classmethod - def pip_libs(cls): - return list(cls.lib2version().values()) - - @classmethod - def ensure_lib(cls, lib:str, verbose:bool=False): - if cls.pip_exists(lib): - return {'lib':lib, 'version':cls.version(lib), 'status':'exists'} - elif cls.pip_exists(lib) == False: - cls.pip_install(lib, verbose=verbose) - return {'lib':lib, 'version':cls.version(lib), 'status':'installed'} - - required_libs = [] - @classmethod - def ensure_libs(cls, libs: List[str] = None, verbose:bool=False): - if hasattr(cls, 'libs'): - libs = cls.libs - results = [] - for lib in libs: - results.append(cls.ensure_lib(lib, verbose=verbose)) - return results - - @classmethod - def install(cls, libs: List[str] = None, verbose:bool=False): - return cls.ensure_libs(libs, verbose=verbose) - - @classmethod - def ensure_env(cls): - cls.ensure_libs(cls.libs) - - ensure_package = ensure_lib - - @classmethod - def queue(cls, size:str=-1, *args, mode='queue', **kwargs): - if mode == 'queue': - return cls.import_object('queue.Queue')(size, *args, **kwargs) - elif mode in ['multiprocessing', 'mp', 'process']: - return cls.module('process')(size, *args, **kwargs) - elif mode == 'ray': - return cls.import_object('ray.util.queue.Queue')(size, *args, **kwargs) - elif mode == 'redis': - return cls.import_object('redis.Queue')(size, *args, **kwargs) - elif mode == 'rabbitmq': - return cls.import_object('pika.Queue')(size, *args, **kwargs) - else: - raise NotImplementedError(f'mode {mode} not implemented') - - - - - @staticmethod - def is_class(module: Any) -> bool: - return type(module).__name__ == 'type' - - - - - - @classmethod - def param_keys(cls, model:'nn.Module' = None)->List[str]: - model = cls.resolve_model(model) - return list(model.state_dict().keys()) - - @classmethod - def params_map(cls, model, fmt='b'): - params_map = {} - state_dict = cls.resolve_model(model).state_dict() - for k,v in state_dict.items(): - params_map[k] = {'shape': list(v.shape) , - 'size': cls.get_tensor_size(v, fmt=fmt), - 'dtype': str(v.dtype), - 'requires_grad': v.requires_grad, - 'device': v.device, - 'numel': v.numel(), - - } - - return params_map - - - - @classmethod - def get_shortcut(cls, shortcut:str) -> dict: - return cls.shortcuts().get(shortcut) - - @classmethod - def rm_shortcut(cls, shortcut) -> str: - shortcuts = cls.shortcuts() - if shortcut in shortcuts: - cls.shortcuts.pop(shortcut) - cls.put_json('shortcuts', cls.shortcuts) - return shortcut - - - - @classmethod - def repo_url(cls, *args, **kwargs): - return cls.module('git').repo_url(*args, **kwargs) - - - - - - @classmethod - def compose(cls, *args, **kwargs): - return cls.module('docker').compose(*args, **kwargs) - - - @classmethod - def ps(cls, *args, **kwargs): - return cls.get_module('docker').ps(*args, **kwargs) - - @classmethod - def has_gpus(cls): - return bool(len(cls.gpus())>0) - - - @classmethod - def split_gather(cls,jobs:list, n=3, **kwargs)-> list: - if len(jobs) < n: - return cls.gather(jobs, **kwargs) - gather_jobs = [asyncio.gather(*job_chunk) for job_chunk in cls.chunk(jobs, num_chunks=n)] - gather_results = cls.gather(gather_jobs, **kwargs) - results = [] - for gather_result in gather_results: - results += gather_result - return results - - @classmethod - def addresses(cls, *args, **kwargs) -> List[str]: - return list(cls.namespace(*args,**kwargs).values()) - - @classmethod - def address_exists(cls, address:str) -> List[str]: - addresses = cls.addresses() - return address in addresses - - - - @classmethod - def task(cls, fn, timeout=1, mode='asyncio'): - - if mode == 'asyncio': - assert callable(fn) - future = asyncio.wait_for(fn, timeout=timeout) - return future - else: - raise NotImplemented - - - @classmethod - def shuffle(cls, x:list)->list: - if len(x) == 0: - return x - random.shuffle(x) - return x - - - @staticmethod - def retry(fn, trials:int = 3, verbose:bool = True): - # if fn is a self method, then it will be a bound method, and we need to get the function - if hasattr(fn, '__self__'): - fn = fn.__func__ - def wrapper(*args, **kwargs): - for i in range(trials): - try: - cls.print(fn) - return fn(*args, **kwargs) - except Exception as e: - if verbose: - cls.print(cls.detailed_error(e), color='red') - cls.print(f'Retrying {fn.__name__} {i+1}/{trials}', color='red') - - return wrapper - - - @staticmethod - def reverse_map(x:dict)->dict: - ''' - reverse a dictionary - ''' - return {v:k for k,v in x.items()} - - @classmethod - def df(cls, x, **kwargs): - return cls.import_object('pandas.DataFrame')(x, **kwargs) - - @classmethod - def torch(cls): - return cls.import_module('torch') - - @classmethod - def tensor(cls, *args, **kwargs): - return cls.import_object('torch.tensor')(*args, **kwargs) - - - @staticmethod - def random_int(start_value=100, end_value=None): - if end_value == None: - end_value = start_value - start_value, end_value = 0 , start_value - - assert start_value != None, 'start_value must be provided' - assert end_value != None, 'end_value must be provided' - return random.randint(start_value, end_value) - - - - def mean(self, x:list=[0,1,2,3,4,5,6,7,8,9,10]): - if not isinstance(x, list): - x = list(x) - return sum(x) / len(x) - - def median(self, x:list=[0,1,2,3,4,5,6,7,8,9,10]): - if not isinstance(x, list): - x = list(x) - x = sorted(x) - n = len(x) - if n % 2 == 0: - return (x[n//2] + x[n//2 - 1]) / 2 - else: - return x[n//2] - - @classmethod - def stdev(cls, x:list= [0,1,2,3,4,5,6,7,8,9,10], p=2): - if not isinstance(x, list): - x = list(x) - mean = cls.mean(x) - return (sum([(i - mean)**p for i in x]) / len(x))**(1/p) - std = stdev - - @classmethod - def set_env(cls, key:str, value:str)-> None: - ''' - Pay attention to this function. It sets the environment variable - ''' - os.environ[key] = value - return value - - - @classmethod - def pwd(cls): - pwd = os.getenv('PWD', cls.libpath) # the current wor king directory from the process starts - return pwd - - @classmethod - def choice(cls, options:Union[list, dict])->list: - options = deepcopy(options) # copy to avoid changing the original - if len(options) == 0: - return None - if isinstance(options, dict): - options = list(options.values()) - assert isinstance(options, list),'options must be a list' - return random.choice(options) - - @classmethod - def sample(cls, options:list, n=2): - if isinstance(options, int): - options = list(range(options)) - options = cls.shuffle(options) - return options[:n] - - @classmethod - def chown(cls, path:str = None, sudo:bool =True): - path = cls.resolve_path(path) - user = cls.env('USER') - cmd = f'chown -R {user}:{user} {path}' - cls.cmd(cmd , sudo=sudo, verbose=True) - return {'success':True, 'message':f'chown cache {path}'} - - @classmethod - def chown_cache(cls, sudo:bool = True): - return cls.chown(cls.cache_path, sudo=sudo) - - @classmethod - def colors(cls): - return ['black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white', 'bright_black', 'bright_red', 'bright_green', 'bright_yellow', 'bright_blue', 'bright_magenta', 'bright_cyan', 'bright_white'] - colours = colors - @classmethod - def random_color(cls): - return random.choice(cls.colors()) - randcolor = randcolour = colour = color = random_colour = random_color - - - def get_util(self, util:str): - return self.get_module(util) - - @classmethod - def random_float(cls, min=0, max=1): - return random.uniform(min, max) - - @classmethod - def random_ratio_selection(cls, x:list, ratio:float = 0.5)->list: - if type(x) in [float, int]: - x = list(range(int(x))) - assert len(x)>0 - if ratio == 1: - return x - assert ratio > 0 and ratio <= 1 - random.shuffle(x) - k = max(int(len(x) * ratio),1) - return x[:k] - - - def link_cmd(cls, old, new): - - link_cmd = cls.get('link_cmd', {}) - assert isinstance(old, str), old - assert isinstance(new, str), new - link_cmd[new] = old - - cls.put('link_cmd', link_cmd) - - - - - @classmethod - def resolve_memory(cls, memory: Union[str, int, float]) -> str: - - scale_map = { - 'kb': 1e3, - 'mb': 1e6, - 'gb': 1e9, - 'b': 1, - } - if isinstance(memory, str): - scale_found = False - for scale_key, scale_value in scale_map.items(): - - - if isinstance(memory, str) and memory.lower().endswith(scale_key): - memory = int(int(memory[:-len(scale_key)].strip())*scale_value) - - - if type(memory) in [float, int]: - scale_found = True - break - - assert type(memory) in [float, int], f'memory must be a float or int, got {type(memory)}' - return memory - - - - @classmethod - def filter(cls, text_list: List[str], filter_text: str) -> List[str]: - return [text for text in text_list if filter_text in text] - - - @classmethod - def is_success(cls, x): - # assume that if the result is a dictionary, and it has an error key, then it is an error - if isinstance(x, dict): - if 'error' in x: - return False - if 'success' in x and x['success'] == False: - return False - - return True - - @classmethod - def is_error(cls, x:Any): - """ - The function checks if the result is an error - The error is a dictionary with an error key set to True - """ - if isinstance(x, dict): - if 'error' in x and x['error'] == True: - return True - if 'success' in x and x['success'] == False: - return True - return False - - @classmethod - def is_int(cls, value) -> bool: - o = False - try : - int(value) - if '.' not in str(value): - o = True - except: - pass - return o - - - @classmethod - def is_float(cls, value) -> bool: - o = False - try : - float(value) - if '.' in str(value): - o = True - except: - pass - - return o - - - - @classmethod - def timer(cls, *args, **kwargs): - from commune.utils.time import Timer - return Timer(*args, **kwargs) - - @classmethod - def timeit(cls, fn, *args, include_result=False, **kwargs): - - t = cls.time() - if isinstance(fn, str): - fn = cls.get_fn(fn) - result = fn(*args, **kwargs) - response = { - 'latency': cls.time() - t, - 'fn': fn.__name__, - - } - if include_result: - print(response) - return result - return response - - @staticmethod - def remotewrap(fn, remote_key:str = 'remote'): - ''' - calls your function if you wrap it as such - - @c.remotewrap - def fn(): - pass - - # deploy it as a remote function - fn(remote=True) - ''' - - def remotewrap(self, *args, **kwargs): - remote = kwargs.pop(remote_key, False) - if remote: - return self.remote_fn(module=self, fn=fn.__name__, args=args, kwargs=kwargs) - else: - return fn(self, *args, **kwargs) - - return remotewrap - - - @staticmethod - def is_mnemonic(s: str) -> bool: - import re - # Match 12 or 24 words separated by spaces - return bool(re.match(r'^(\w+ ){11}\w+$', s)) or bool(re.match(r'^(\w+ ){23}\w+$', s)) - - @staticmethod - def is_private_key(s: str) -> bool: - import re - # Match a 64-character hexadecimal string - pattern = r'^[0-9a-fA-F]{64}$' - return bool(re.match(pattern, s)) - - - - @staticmethod - def address2ip(address:str) -> str: - return str('.'.join(address.split(':')[:-1])) - - @staticmethod - def as_completed( futures, timeout=10, **kwargs): - return concurrent.futures.as_completed(futures, timeout=timeout, **kwargs) - - - @classmethod - def dict2munch(cls, x:dict, recursive:bool=True)-> 'Munch': - from munch import Munch - ''' - Turn dictionary into Munch - ''' - if isinstance(x, dict): - for k,v in x.items(): - if isinstance(v, dict) and recursive: - x[k] = cls.dict2munch(v) - x = Munch(x) - return x - - @classmethod - def munch2dict(cls, x:'Munch', recursive:bool=True)-> dict: - from munch import Munch - ''' - Turn munch object into dictionary - ''' - if isinstance(x, Munch): - x = dict(x) - for k,v in x.items(): - if isinstance(v, Munch) and recursive: - x[k] = cls.munch2dict(v) - - return x - - - @classmethod - def munch(cls, x:Dict) -> 'Munch': - ''' - Converts a dict to a munch - ''' - return cls.dict2munch(x) - - - @classmethod - def time( cls, t=None) -> float: - import time - if t is not None: - return time.time() - t - else: - return time.time() - - @classmethod - def datetime(cls): - import datetime - # UTC - return datetime.datetime.utcnow().strftime("%Y-%m-%d_%H:%M:%S") - - @classmethod - def time2datetime(cls, t:float): - import datetime - return datetime.datetime.fromtimestamp(t).strftime("%Y-%m-%d_%H:%M:%S") - - time2date = time2datetime - - @classmethod - def datetime2time(cls, x:str): - import datetime - return datetime.datetime.strptime(x, "%Y-%m-%d %H:%M:%S").timestamp() - - date2time = datetime2time - - @classmethod - def delta_t(cls, t): - return t - cls.time() - @classmethod - def timestamp(cls) -> float: - return int(cls.time()) - @classmethod - def sleep(cls, seconds:float) -> None: - import time - time.sleep(seconds) - return None - - - def search_dict(self, d:dict = 'k,d', search:str = {'k.d': 1}) -> dict: - search = search.split(',') - new_d = {} - - for k,v in d.items(): - if search in k.lower(): - new_d[k] = v - - return new_d - - @classmethod - def path2text(cls, path:str, relative=False): - - path = cls.resolve_path(path) - assert os.path.exists(path), f'path {path} does not exist' - if os.path.isdir(path): - filepath_list = cls.glob(path + '/**') - else: - assert os.path.exists(path), f'path {path} does not exist' - filepath_list = [path] - path2text = {} - for filepath in filepath_list: - try: - path2text[filepath] = cls.get_text(filepath) - except Exception as e: - pass - if relative: - pwd = cls.pwd() - path2text = {os.path.relpath(k, pwd):v for k,v in path2text.items()} - return path2text - - @classmethod - def root_key(cls): - return cls.get_key() - - @classmethod - def root_key_address(cls) -> str: - return cls.root_key().ss58_address - - - @classmethod - def is_root_key(cls, address:str)-> str: - return address == cls.root_key().ss58_address - - # time within the context - @classmethod - def context_timer(cls, *args, **kwargs): - return cls.timer(*args, **kwargs) - - - @classmethod - def folder_structure(cls, path:str='./', search='py', max_depth:int=5, depth:int=0)-> dict: - import glob - files = cls.glob(path + '/**') - results = [] - for file in files: - if os.path.isdir(file): - cls.folder_structure(file, search=search, max_depth=max_depth, depth=depth+1) - else: - if search in file: - results.append(file) - - return results - - - @classmethod - def copy(cls, data: Any) -> Any: - import copy - return copy.deepcopy(data) - - - @classmethod - def find_word(cls, word:str, path='./')-> str: - import commune as c - path = c.resolve_path(path) - files = c.glob(path) - progress = c.tqdm(len(files)) - found_files = {} - for f in files: - try: - text = c.get_text(f) - if word not in text: - continue - lines = text.split('\n') - except Exception as e: - continue - - line2text = {i:line for i, line in enumerate(lines) if word in line} - found_files[f[len(path)+1:]] = line2text - progress.update(1) - return found_files - - - - @classmethod - def pip_install(cls, - lib:str= None, - upgrade:bool=True , - verbose:str=True, - ): - import commune as c - - if lib in c.modules(): - c.print(f'Installing {lib} Module from local directory') - lib = c.resolve_object(lib).dirpath() - if lib == None: - lib = c.libpath - - if c.exists(lib): - cmd = f'pip install -e' - else: - cmd = f'pip install' - if upgrade: - cmd += ' --upgrade' - return cls.cmd(cmd, verbose=verbose) - - - @classmethod - def pip_exists(cls, lib:str, verbose:str=True): - return bool(lib in cls.pip_libs()) - - - @classmethod - def hash(cls, x, mode: str='sha256',*args,**kwargs) -> str: - import hashlib - x = cls.python2str(x) - if mode == 'keccak': - return cls.import_object('web3.main.Web3').keccak(text=x, *args, **kwargs).hex() - elif mode == 'ss58': - return cls.import_object('scalecodec.utils.ss58.ss58_encode')(x, *args,**kwargs) - elif mode == 'python': - return hash(x) - elif mode == 'md5': - return hashlib.md5(x.encode()).hexdigest() - elif mode == 'sha256': - return hashlib.sha256(x.encode()).hexdigest() - elif mode == 'sha512': - return hashlib.sha512(x.encode()).hexdigest() - elif mode =='sha3_512': - return hashlib.sha3_512(x.encode()).hexdigest() - else: - raise ValueError(f'unknown mode {mode}') - - @classmethod - def hash_modes(cls): - return ['keccak', 'ss58', 'python', 'md5', 'sha256', 'sha512', 'sha3_512'] - - str2hash = hash - - - def set_api_key(self, api_key:str, cache:bool = True): - api_key = os.getenv(str(api_key), None) - if api_key == None: - api_key = self.get_api_key() - self.api_key = api_key - if cache: - self.add_api_key(api_key) - assert isinstance(api_key, str) - - - def add_api_key(self, api_key:str, path=None): - assert isinstance(api_key, str) - path = self.resolve_path(path or 'api_keys') - api_keys = self.get(path, []) - api_keys.append(api_key) - api_keys = list(set(api_keys)) - self.put(path, api_keys) - return {'api_keys': api_keys} - - def set_api_keys(self, api_keys:str): - api_keys = list(set(api_keys)) - self.put('api_keys', api_keys) - return {'api_keys': api_keys} - - def rm_api_key(self, api_key:str): - assert isinstance(api_key, str) - api_keys = self.get(self.resolve_path('api_keys'), []) - for i in range(len(api_keys)): - if api_key == api_keys[i]: - api_keys.pop(i) - break - path = self.resolve_path('api_keys') - self.put(path, api_keys) - return {'api_keys': api_keys} - - def get_api_key(self, module=None): - if module != None: - self = self.module(module) - api_keys = self.api_keys() - if len(api_keys) == 0: - raise - else: - return self.choice(api_keys) - - def api_keys(self): - return self.get(self.resolve_path('api_keys'), []) - - - def rm_api_keys(self): - self.put(self.resolve_path('api_keys'), []) - return {'api_keys': []} - - diff --git a/commune/module/_network.py b/commune/module/_network.py deleted file mode 100644 index 9461d651..00000000 --- a/commune/module/_network.py +++ /dev/null @@ -1,581 +0,0 @@ -import os -import urllib -import requests -import netaddr -from typing import * -import socket - -class Network: - - default_port_range = [50050, 50150] # the port range between 50050 and 50150 - - @staticmethod - def int_to_ip(int_val: int) -> str: - r""" Maps an integer to a unique ip-string - Args: - int_val (:type:`int128`, `required`): - The integer representation of an ip. Must be in the range (0, 3.4028237e+38). - - Returns: - str_val (:tyep:`str`, `required): - The string representation of an ip. Of form *.*.*.* for ipv4 or *::*:*:*:* for ipv6 - - Raises: - netaddr.core.AddrFormatError (Exception): - Raised when the passed int_vals is not a valid ip int value. - """ - import netaddr - return str(netaddr.IPAddress(int_val)) - - @staticmethod - def ip_to_int(str_val: str) -> int: - r""" Maps an ip-string to a unique integer. - arg: - str_val (:tyep:`str`, `required): - The string representation of an ip. Of form *.*.*.* for ipv4 or *::*:*:*:* for ipv6 - - Returns: - int_val (:type:`int128`, `required`): - The integer representation of an ip. Must be in the range (0, 3.4028237e+38). - - Raises: - netaddr.core.AddrFormatError (Exception): - Raised when the passed str_val is not a valid ip string value. - """ - return int(netaddr.IPAddress(str_val)) - - @staticmethod - def ip_version(str_val: str) -> int: - r""" Returns the ip version (IPV4 or IPV6). - arg: - str_val (:tyep:`str`, `required): - The string representation of an ip. Of form *.*.*.* for ipv4 or *::*:*:*:* for ipv6 - - Returns: - int_val (:type:`int128`, `required`): - The ip version (Either 4 or 6 for IPv4/IPv6) - - Raises: - netaddr.core.AddrFormatError (Exception): - Raised when the passed str_val is not a valid ip string value. - """ - return int(netaddr.IPAddress(str_val).version) - - @staticmethod - def ip__str__(ip_type:int, ip_str:str, port:int): - """ Return a formatted ip string - """ - return "/ipv%i/%s:%i" % (ip_type, ip_str, port) - - @classmethod - def is_valid_ip(cls, ip:str) -> bool: - r""" Checks if an ip is valid. - Args: - ip (:obj:`str` `required`): - The ip to check. - - Returns: - valid (:obj:`bool` `required`): - True if the ip is valid, False otherwise. - """ - try: - netaddr.IPAddress(ip) - return True - except Exception as e: - return False - - @classmethod - def external_ip(cls, default_ip='0.0.0.0') -> str: - r""" Checks CURL/URLLIB/IPIFY/AWS for your external ip. - Returns: - external_ip (:obj:`str` `required`): - Your routers external facing ip as a string. - - Raises: - Exception(Exception): - Raised if all external ip attempts fail. - """ - # --- Try curl. - - - - ip = None - try: - ip = cls.cmd('curl -s ifconfig.me') - assert isinstance(cls.ip_to_int(ip), int) - except Exception as e: - print(e) - - if cls.is_valid_ip(ip): - return ip - try: - ip = requests.get('https://api.ipify.org').text - assert isinstance(cls.ip_to_int(ip), int) - except Exception as e: - print(e) - - if cls.is_valid_ip(ip): - return ip - # --- Try AWS - try: - ip = requests.get('https://checkip.amazonaws.com').text.strip() - assert isinstance(cls.ip_to_int(ip), int) - except Exception as e: - print(e) - - if cls.is_valid_ip(ip): - return ip - # --- Try myip.dnsomatic - try: - process = os.popen('curl -s myip.dnsomatic.com') - ip = process.readline() - assert isinstance(cls.ip_to_int(ip), int) - process.close() - except Exception as e: - print(e) - - if cls.is_valid_ip(ip): - return ip - # --- Try urllib ipv6 - try: - ip = urllib.request.urlopen('https://ident.me').read().decode('utf8') - assert isinstance(cls.ip_to_int(ip), int) - except Exception as e: - print(e) - - if cls.is_valid_ip(ip): - return ip - # --- Try Wikipedia - try: - ip = requests.get('https://www.wikipedia.org').headers['X-Client-IP'] - assert isinstance(cls.ip_to_int(ip), int) - except Exception as e: - print(e) - - if cls.is_valid_ip(ip): - return ip - - return default_ip - - @classmethod - def unreserve_port(cls,port:int, - var_path='reserved_ports'): - reserved_ports = cls.get(var_path, {}, root=True) - - port_info = reserved_ports.pop(port,None) - if port_info == None: - port_info = reserved_ports.pop(str(port),None) - - output = {} - if port_info != None: - cls.put(var_path, reserved_ports, root=True) - output['msg'] = 'port removed' - else: - output['msg'] = f'port {port} doesnt exist, so your good' - - output['reserved'] = cls.reserved_ports() - return output - - - - - @classmethod - def unreserve_ports(cls,*ports, - var_path='reserved_ports' ): - reserved_ports = cls.get(var_path, {}) - if len(ports) == 0: - # if zero then do all fam, tehe - ports = list(reserved_ports.keys()) - elif len(ports) == 1 and isinstance(ports[0],list): - ports = ports[0] - ports = list(map(str, ports)) - reserved_ports = {rp:v for rp,v in reserved_ports.items() if not any([p in ports for p in [str(rp), int(rp)]] )} - cls.put(var_path, reserved_ports) - return cls.reserved_ports() - - - @classmethod - def check_used_ports(cls, start_port = 8501, end_port = 8600, timeout=5): - port_range = [start_port, end_port] - used_ports = {} - for port in range(*port_range): - used_ports[port] = cls.port_used(port) - return used_ports - - - @classmethod - def kill_port(cls, port:int): - r""" Kills a process running on the passed port. - Args: - port (:obj:`int` `required`): - The port to kill the process on. - """ - try: - os.system(f'kill -9 $(lsof -t -i:{port})') - except Exception as e: - print(e) - return False - return True - - def kill_ports(self, ports = None, *more_ports): - ports = ports or self.used_ports() - if isinstance(ports, int): - ports = [ports] - if '-' in ports: - ports = list(range([int(p) for p in ports.split('-')])) - ports = list(ports) + list(more_ports) - for port in ports: - self.kill_port(port) - return self.check_used_ports() - - def public_ports(self, timeout=1.0): - import commune as c - futures = [] - for port in self.free_ports(): - c.print(f'Checking port {port}') - futures += [c.submit(self.is_port_open, {'port':port}, timeout=timeout)] - results = c.wait(futures, timeout=timeout) - results = list(map(bool, results)) - return results - - - - def is_port_open(self, port:int, ip:str=None, timeout=0.5): - import commune as c - ip = ip or self.ip() - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: - return s.connect_ex((ip, port)) == 0 - return False - - - - @classmethod - def free_ports(cls, n=10, random_selection:bool = False, **kwargs ) -> List[int]: - free_ports = [] - avoid_ports = kwargs.pop('avoid_ports', []) - for i in range(n): - try: - free_ports += [cls.free_port( random_selection=random_selection, - avoid_ports=avoid_ports, **kwargs)] - except Exception as e: - cls.print(f'Error: {e}', color='red') - break - avoid_ports += [free_ports[-1]] - - - return free_ports - - @classmethod - def random_port(cls, *args, **kwargs): - return cls.choice(cls.free_ports(*args, **kwargs)) - - - - - @classmethod - def free_port(cls, - ports = None, - port_range: List[int] = None , - ip:str =None, - avoid_ports = None, - random_selection:bool = True) -> int: - - ''' - - Get an availabldefe port within the {port_range} [start_port, end_poort] and {ip} - ''' - avoid_ports = avoid_ports if avoid_ports else [] - - if ports == None: - port_range = cls.get_port_range(port_range) - ports = list(range(*port_range)) - - ip = ip if ip else cls.default_ip - - if random_selection: - ports = cls.shuffle(ports) - port = None - for port in ports: - if port in avoid_ports: - continue - - if cls.port_available(port=port, ip=ip): - return port - - raise Exception(f'ports {port_range[0]} to {port_range[1]} are occupied, change the port_range to encompase more ports') - - get_available_port = free_port - - - - def check_used_ports(self, start_port = 8501, end_port = 8600, timeout=5): - port_range = [start_port, end_port] - used_ports = {} - for port in range(*port_range): - used_ports[port] = self.port_used(port) - return used_ports - - - - @classmethod - def resolve_port(cls, port:int=None, **kwargs): - - ''' - - Resolves the port and finds one that is available - ''' - if port == None or port == 0: - port = cls.free_port(port, **kwargs) - - if cls.port_used(port): - port = cls.free_port(port, **kwargs) - - return int(port) - - - - @classmethod - def port_available(cls, port:int, ip:str ='0.0.0.0'): - return not cls.port_used(port=port, ip=ip) - - - @classmethod - def port_used(cls, port: int, ip: str = '0.0.0.0', timeout: int = 1): - import socket - if not isinstance(port, int): - return False - - with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: - # Set the socket timeout - sock.settimeout(timeout) - - # Try to connect to the specified IP and port - try: - port=int(port) - sock.connect((ip, port)) - return True - except socket.error: - return False - - @classmethod - def port_free(cls, *args, **kwargs) -> bool: - return not cls.port_used(*args, **kwargs) - - @classmethod - def port_available(cls, port:int, ip:str ='0.0.0.0'): - return not cls.port_used(port=port, ip=ip) - - - - @classmethod - def used_ports(cls, ports:List[int] = None, ip:str = '0.0.0.0', port_range:Tuple[int, int] = None): - ''' - Get availabel ports out of port range - - Args: - ports: list of ports - ip: ip address - - ''' - port_range = cls.resolve_port_range(port_range=port_range) - if ports == None: - ports = list(range(*port_range)) - - async def check_port(port, ip): - return cls.port_used(port=port, ip=ip) - - used_ports = [] - jobs = [] - for port in ports: - jobs += [check_port(port=port, ip=ip)] - - results = cls.wait(jobs) - for port, result in zip(ports, results): - if isinstance(result, bool) and result: - used_ports += [port] - - return used_ports - - - - - @classmethod - def scan_ports(cls,host=None, start_port=None, end_port=None, timeout=24): - if start_port == None and end_port == None: - start_port, end_port = cls.port_range() - if host == None: - host = cls.external_ip() - import socket - open_ports = [] - future2port = {} - for port in range(start_port, end_port + 1): # ports from start_port to end_port - future2port[cls.submit(cls.port_used, kwargs=dict(port=port, ip=host), timeout=timeout)] = port - port2open = {} - for future in cls.as_completed(future2port, timeout=timeout): - port = future2port[future] - port2open[port] = future.result() - # sort the ports - port2open = {k: v for k, v in sorted(port2open.items(), key=lambda item: item[1])} - - return port2open - - @classmethod - def resolve_port(cls, port:int=None, **kwargs): - ''' - Resolves the port and finds one that is available - ''' - if port == None or port == 0: - port = cls.free_port(port, **kwargs) - if cls.port_used(port): - port = cls.free_port(port, **kwargs) - return int(port) - - @classmethod - def has_free_ports(self, n:int = 1, **kwargs): - return len(self.free_ports(n=n, **kwargs)) > 0 - - - @classmethod - def get_port_range(cls, port_range: list = None) -> list: - base_config = cls.base_config() - if 'port_range' in base_config: - port_range = base_config['port_range'] - if port_range == None: - port_range = cls.get('port_range', default=cls.default_port_range) - if isinstance(port_range, str): - port_range = list(map(int, port_range.split('-'))) - if len(port_range) == 0: - port_range = cls.default_port_range - port_range = list(port_range) - assert isinstance(port_range, list), 'Port range must be a list' - assert isinstance(port_range[0], int), 'Port range must be a list of integers' - assert isinstance(port_range[1], int), 'Port range must be a list of integers' - return port_range - - @classmethod - def port_range(cls): - return cls.get_port_range() - - @classmethod - def resolve_port_range(cls, port_range: list = None) -> list: - return cls.get_port_range(port_range) - - @classmethod - def set_port_range(cls, *port_range: list): - if '-' in port_range[0]: - port_range = list(map(int, port_range[0].split('-'))) - if len(port_range) ==0 : - port_range = cls.default_port_range - elif len(port_range) == 1: - if port_range[0] == None: - port_range = cls.default_port_range - assert len(port_range) == 2, 'Port range must be a list of two integers' - for port in port_range: - assert isinstance(port, int), f'Port {port} range must be a list of integers' - assert port_range[0] < port_range[1], 'Port range must be a list of integers' - cls.put('port_range', port_range) - return port_range - - @classmethod - def get_port(cls, port:int = None)->int: - port = port if port is not None and port != 0 else cls.free_port() - while cls.port_used(port): - port += 1 - return port - - @classmethod - def port_free(cls, *args, **kwargs) -> bool: - return not cls.port_used(*args, **kwargs) - - @classmethod - def port_available(cls, port:int, ip:str ='0.0.0.0'): - return not cls.port_used(port=port, ip=ip) - - @classmethod - def used_ports(cls, ports:List[int] = None, ip:str = '0.0.0.0', port_range:Tuple[int, int] = None): - ''' - Get availabel ports out of port range - - Args: - ports: list of ports - ip: ip address - - ''' - port_range = cls.resolve_port_range(port_range=port_range) - if ports == None: - ports = list(range(*port_range)) - - async def check_port(port, ip): - return cls.port_used(port=port, ip=ip) - - used_ports = [] - jobs = [] - for port in ports: - jobs += [check_port(port=port, ip=ip)] - - results = cls.gather(jobs) - for port, result in zip(ports, results): - if isinstance(result, bool) and result: - used_ports += [port] - - return used_ports - - - get_used_ports = used_ports - - @classmethod - def get_available_ports(cls, port_range: List[int] = None , ip:str =None) -> int: - port_range = cls.resolve_port_range(port_range) - ip = ip if ip else cls.default_ip - - available_ports = [] - # return only when the port is available - for port in range(*port_range): - if not cls.port_used(port=port, ip=ip): - available_ports.append(port) - - return available_ports - available_ports = get_available_ports - - @classmethod - def set_ip(cls, ip): - - cls.put('ip', ip) - return ip - - @classmethod - def ip(cls, max_age=None, update:bool = False, **kwargs) -> str: - ip = cls.get('ip', None, max_age=max_age, update=update) - if ip == None: - ip = cls.external_ip(**kwargs) - cls.put('ip', ip) - return ip - - @classmethod - def resolve_address(cls, address:str = None): - if address == None: - address = c.free_address() - assert isinstance(address, str), 'address must be a string' - return address - - @classmethod - def free_address(cls, **kwargs): - return f'{cls.ip()}:{cls.free_port(**kwargs)}' - - @classmethod - def check_used_ports(cls, start_port = 8501, end_port = 8600, timeout=5): - port_range = [start_port, end_port] - used_ports = {} - for port in range(*port_range): - used_ports[port] = cls.port_used(port) - return used_ports - - @classmethod - def resolve_ip(cls, ip=None, external:bool=True) -> str: - if ip == None: - if external: - ip = cls.external_ip() - else: - ip = '0.0.0.0' - assert isinstance(ip, str) - return ip \ No newline at end of file diff --git a/commune/module/_os.py b/commune/module/_os.py deleted file mode 100644 index 4d0d1c82..00000000 --- a/commune/module/_os.py +++ /dev/null @@ -1,505 +0,0 @@ - -import os -import shutil -from typing import * -import gc -import subprocess -import shlex -import sys - -class OS: - @classmethod - def check_pid(cls, pid): - """ Check For the existence of a unix pid. """ - try: - os.kill(pid, 0) - except OSError: - return False - else: - return True - @staticmethod - def kill_process(pid): - import signal - if isinstance(pid, str): - pid = int(pid) - - os.kill(pid, signal.SIGKILL) - - @classmethod - def path_exists(cls, path:str): - return os.path.exists(path) - - @classmethod - def ensure_path(cls, path): - """ - ensures a dir_path exists, otherwise, it will create it - """ - - dir_path = os.path.dirname(path) - if not os.path.isdir(dir_path): - os.makedirs(dir_path, exist_ok=True) - - return path - - - @staticmethod - def seed_everything(seed: int) -> None: - import torch, random - import numpy as np - "seeding function for reproducibility" - random.seed(seed) - os.environ["PYTHONHASHSEED"] = str(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.backends.cudnn.deterministic = True - - @staticmethod - def cpu_count(): - return os.cpu_count() - - num_cpus = cpu_count - - @staticmethod - def get_env(key:str): - return os.environ.get(key) - - @staticmethod - def set_env(key:str, value:str): - os.environ[key] = value - return {'success': True, 'key': key, 'value': value} - - @staticmethod - def get_cwd(): - return os.getcwd() - - @staticmethod - def set_cwd(path:str): - return os.chdir(path) - - - @staticmethod - def get_pid(): - return os.getpid() - - @classmethod - def memory_usage_info(cls, fmt='gb'): - import psutil - process = psutil.Process(os.getpid()) - memory_info = process.memory_info() - response = { - 'rss': memory_info.rss, - 'vms': memory_info.vms, - 'pageins' : memory_info.pageins, - 'pfaults': memory_info.pfaults, - } - - - for key, value in response.items(): - response[key] = cls.format_data_size(value, fmt=fmt) - - return response - - - - @classmethod - def memory_info(cls, fmt='gb'): - import psutil - - """ - Returns the current memory usage and total memory of the system. - """ - # Get memory statistics - memory_stats = psutil.virtual_memory() - - # Total memory in the system - response = { - 'total': memory_stats.total, - 'available': memory_stats.available, - 'used': memory_stats.total - memory_stats.available, - 'free': memory_stats.available, - 'active': memory_stats.active, - 'inactive': memory_stats.inactive, - 'percent': memory_stats.percent, - 'ratio': memory_stats.percent/100, - } - - for key, value in response.items(): - if key in ['percent', 'ratio']: - continue - response[key] = cls.format_data_size(value, fmt=fmt) - - return response - - @classmethod - def virtual_memory_available(cls): - import psutil - return psutil.virtual_memory().available - - @classmethod - def virtual_memory_total(cls): - import psutil - return psutil.virtual_memory().total - - @classmethod - def virtual_memory_percent(cls): - import psutil - return psutil.virtual_memory().percent - - @classmethod - def cpu_type(cls): - import platform - return platform.processor() - - @classmethod - def cpu_info(cls): - - return { - 'cpu_count': cls.cpu_count(), - 'cpu_type': cls.cpu_type(), - } - - - def cpu_usage(self): - import psutil - # get the system performance data for the cpu - cpu_usage = psutil.cpu_percent() - return cpu_usage - - - - @classmethod - def gpu_memory(cls): - import torch - return torch.cuda.memory_allocated() - - @classmethod - def num_gpus(cls): - import torch - return torch.cuda.device_count() - - - @classmethod - def gpus(cls): - return list(range(cls.num_gpus())) - - def add_rsa_key(cls, b=2048, t='rsa'): - return cls.cmd(f"ssh-keygen -b {b} -t {t}") - - - @classmethod - def stream_output(cls, process, verbose=False): - try: - modes = ['stdout', 'stderr'] - for mode in modes: - pipe = getattr(process, mode) - if pipe == None: - continue - for line in iter(pipe.readline, b''): - line = line.decode('utf-8') - if verbose: - cls.print(line[:-1]) - yield line - except Exception as e: - print(e) - pass - - cls.kill_process(process) - - @classmethod - def cmd(cls, - command:Union[str, list], - *args, - verbose:bool = False , - env:Dict[str, str] = {}, - sudo:bool = False, - password: bool = None, - bash : bool = False, - return_process: bool = False, - generator: bool = False, - color : str = 'white', - cwd : str = None, - **kwargs) -> 'subprocess.Popen': - - ''' - Runs a command in the shell. - - ''' - - if len(args) > 0: - command = ' '.join([command] + list(args)) - - - if password != None: - sudo = True - - if sudo: - command = f'sudo {command}' - - - if bash: - command = f'bash -c "{command}"' - - cwd = cls.resolve_path(cwd) - - env = {**os.environ, **env} - - process = subprocess.Popen(shlex.split(command), - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - cwd = cwd, - env=env, **kwargs) - if return_process: - return process - streamer = cls.stream_output(process, verbose=verbose) - if generator: - return streamer - else: - text = '' - for ch in streamer: - text += ch - return text - - @staticmethod - def kill_process(process): - import signal - process_id = process.pid - process.stdout.close() - process.send_signal(signal.SIGINT) - process.wait() - return {'success': True, 'msg': 'process killed', 'pid': process_id} - # sys.exit(0) - - @staticmethod - def format_data_size(x: Union[int, float], fmt:str='b', prettify:bool=False): - assert type(x) in [int, float, str], f'x must be int or float, not {type(x)}' - x = float(x) - fmt2scale = { - 'b': 1, - 'kb': 1000, - 'mb': 1000**2, - 'gb': 1000**3, - 'GiB': 1024**3, - 'tb': 1000**4, - } - - assert fmt in fmt2scale.keys(), f'fmt must be one of {fmt2scale.keys()}' - scale = fmt2scale[fmt] - x = x/scale - - return x - - - @classmethod - def disk_info(cls, path:str = '/', fmt:str='gb'): - path = cls.resolve_path(path) - import shutil - response = shutil.disk_usage(path) - response = { - 'total': response.total, - 'used': response.used, - 'free': response.free, - } - for key, value in response.items(): - response[key] = cls.format_data_size(value, fmt=fmt) - return response - - - @classmethod - def mv(cls, path1, path2): - - assert os.path.exists(path1), path1 - if not os.path.isdir(path2): - path2_dirpath = os.path.dirname(path2) - if not os.path.isdir(path2_dirpath): - os.makedirs(path2_dirpath, exist_ok=True) - shutil.move(path1, path2) - assert os.path.exists(path2), path2 - assert not os.path.exists(path1), path1 - return path2 - - - @classmethod - def cp(cls, path1:str, path2:str, refresh:bool = False): - import shutil - # what if its a folder? - assert os.path.exists(path1), path1 - if refresh == False: - assert not os.path.exists(path2), path2 - - path2_dirpath = os.path.dirname(path2) - if not os.path.isdir(path2_dirpath): - os.makedirs(path2_dirpath, exist_ok=True) - assert os.path.isdir(path2_dirpath), f'Failed to create directory {path2_dirpath}' - - if os.path.isdir(path1): - shutil.copytree(path1, path2) - - - elif os.path.isfile(path1): - - shutil.copy(path1, path2) - else: - raise ValueError(f'path1 is not a file or a folder: {path1}') - return path2 - - - @classmethod - def cuda_available(cls) -> bool: - import torch - return torch.cuda.is_available() - - @classmethod - def free_gpu_memory(cls): - gpu_info = cls.gpu_info() - return {gpu_id: gpu_info['free'] for gpu_id, gpu_info in gpu_info.items()} - - def most_used_gpu(self): - most_used_gpu = max(self.free_gpu_memory().items(), key=lambda x: x[1])[0] - return most_used_gpu - - def most_used_gpu_memory(self): - most_used_gpu = max(self.free_gpu_memory().items(), key=lambda x: x[1])[1] - return most_used_gpu - - - def least_used_gpu(self): - least_used_gpu = min(self.free_gpu_memory().items(), key=lambda x: x[1])[0] - return least_used_gpu - - def least_used_gpu_memory(self): - least_used_gpu = min(self.free_gpu_memory().items(), key=lambda x: x[1])[1] - return least_used_gpu - - - - - @classmethod - def gpu_info(cls, fmt='gb') -> Dict[int, Dict[str, float]]: - import torch - gpu_info = {} - for gpu_id in cls.gpus(): - mem_info = torch.cuda.mem_get_info(gpu_id) - gpu_info[int(gpu_id)] = { - 'name': torch.cuda.get_device_name(gpu_id), - 'free': mem_info[0], - 'used': (mem_info[1]- mem_info[0]), - 'total': mem_info[1], - 'ratio': mem_info[0]/mem_info[1], - } - - gpu_info_map = {} - - skip_keys = ['ratio', 'total', 'name'] - - for gpu_id, gpu_info in gpu_info.items(): - for key, value in gpu_info.items(): - if key in skip_keys: - continue - gpu_info[key] = cls.format_data_size(value, fmt=fmt) - gpu_info_map[gpu_id] = gpu_info - return gpu_info_map - - - gpu_map =gpu_info - - @classmethod - def hardware(cls, fmt:str='gb'): - return { - 'cpu': cls.cpu_info(), - 'memory': cls.memory_info(fmt=fmt), - 'disk': cls.disk_info(fmt=fmt), - 'gpu': cls.gpu_info(fmt=fmt), - } - - - @classmethod - def get_folder_size(cls, folder_path:str='/'): - folder_path = cls.resolve_path(folder_path) - """Calculate the total size of all files in the folder.""" - total_size = 0 - for root, dirs, files in os.walk(folder_path): - for file in files: - file_path = os.path.join(root, file) - if not os.path.islink(file_path): - total_size += os.path.getsize(file_path) - return total_size - - @classmethod - def find_largest_folder(cls, directory: str = '~/'): - directory = cls.resolve_path(directory) - """Find the largest folder in the given directory.""" - largest_size = 0 - largest_folder = "" - - for folder_name in os.listdir(directory): - folder_path = os.path.join(directory, folder_name) - if os.path.isdir(folder_path): - folder_size = cls.get_folder_size(folder_path) - if folder_size > largest_size: - largest_size = folder_size - largest_folder = folder_path - - return largest_folder, largest_size - - - @classmethod - def getcwd(*args, **kwargs): - return os.getcwd(*args, **kwargs) - - - @classmethod - def argv(cls, include_script:bool = False): - args = sys.argv - if include_script: - return args - else: - return args[1:] - - @classmethod - def mv(cls, path1, path2): - assert os.path.exists(path1), path1 - if not os.path.isdir(path2): - path2_dirpath = os.path.dirname(path2) - if not os.path.isdir(path2_dirpath): - os.makedirs(path2_dirpath, exist_ok=True) - shutil.move(path1, path2) - assert os.path.exists(path2), path2 - assert not os.path.exists(path1), path1 - return {'success': True, 'msg': f'Moved {path1} to {path2}'} - - @classmethod - def sys_path(cls): - return sys.path - - @classmethod - def gc(cls): - gc.collect() - return {'success': True, 'msg': 'garbage collected'} - - @staticmethod - def get_pid(): - return os.getpid() - - @classmethod - def nest_asyncio(cls): - import nest_asyncio - nest_asyncio.apply() - - @staticmethod - def memory_usage(fmt='gb'): - fmt2scale = {'b': 1e0, 'kb': 1e1, 'mb': 1e3, 'gb': 1e6} - import psutil - process = psutil.Process() - scale = fmt2scale.get(fmt) - return (process.memory_info().rss // 1024) / scale - - @classmethod - def get_env(cls, key:str)-> None: - ''' - Pay attention to this function. It sets the environment variable - ''' - return os.environ[key] - - env = get_env - \ No newline at end of file diff --git a/commune/module/_schema.py b/commune/module/_schema.py deleted file mode 100644 index 509be48c..00000000 --- a/commune/module/_schema.py +++ /dev/null @@ -1,1020 +0,0 @@ - -from typing import * -import inspect - -class Schema: - whitelist = [] - _schema = None - @classmethod - def get_schema(cls, - module = None, - search = None, - whitelist = None, - fn = None, - docs: bool = True, - include_parents:bool = False, - defaults:bool = True, cache=False) -> 'Schema': - - if '/' in str(search): - module, fn = search.split('/') - cls = cls.module(module) - if isinstance(module, str): - if '/' in module: - module , fn = module.split('/') - module = cls.module(module) - module = module or cls - schema = {} - fns = module.get_functions() - for fn in fns: - if search != None and search not in fn: - continue - if callable(getattr(module, fn )): - schema[fn] = cls.fn_schema(fn, defaults=defaults,docs=docs) - # sort by keys - schema = dict(sorted(schema.items())) - if whitelist != None : - schema = {k:v for k,v in schema.items() if k in whitelist} - return schema - - - @classmethod - def determine_type(cls, x): - if x.lower() == 'null' or x == 'None': - return None - elif x.lower() in ['true', 'false']: - return bool(x.lower() == 'true') - elif x.startswith('[') and x.endswith(']'): - # this is a list - try: - - list_items = x[1:-1].split(',') - # try to convert each item to its actual type - x = [cls.determine_type(item.strip()) for item in list_items] - if len(x) == 1 and x[0] == '': - x = [] - return x - - except: - # if conversion fails, return as string - return x - elif x.startswith('{') and x.endswith('}'): - # this is a dictionary - if len(x) == 2: - return {} - try: - dict_items = x[1:-1].split(',') - # try to convert each item to a key-value pair - return {key.strip(): cls.determine_type(value.strip()) for key, value in [item.split(':', 1) for item in dict_items]} - except: - # if conversion fails, return as string - return x - else: - # try to convert to int or float, otherwise return as string - try: - return int(x) - except ValueError: - try: - return float(x) - except ValueError: - return x - - - @classmethod - def fn2code(cls, search=None, module=None)-> Dict[str, str]: - module = module if module else cls - functions = module.fns(search) - fn_code_map = {} - for fn in functions: - try: - fn_code_map[fn] = module.fn_code(fn) - except Exception as e: - print(f'Error: {e}') - return fn_code_map - - - - @classmethod - def fn_code(cls,fn:str, - detail:bool=False, - seperator: str = '/' - ) -> str: - ''' - Returns the code of a function - ''' - try: - fn = cls.get_fn(fn) - code_text = inspect.getsource(fn) - text_lines = code_text.split('\n') - if 'classmethod' in text_lines[0] or 'staticmethod' in text_lines[0] or '@' in text_lines[0]: - text_lines.pop(0) - fn_code = '\n'.join([l[len(' '):] for l in code_text.split('\n')]) - assert 'def' in text_lines[0], 'Function not found in code' - - if detail: - start_line = cls.find_code_line(search=text_lines[0]) - fn_code = { - 'text': fn_code, - 'start_line': start_line , - 'end_line': start_line + len(text_lines) - } - except Exception as e: - print(f'Error: {e}') - fn_code = None - - return fn_code - - - @classmethod - def fn_hash(cls,fn:str = 'subspace/ls', detail:bool=False, seperator: str = '/') -> str: - - fn_code = cls.fn_code(fn, detail=detail, seperator=seperator) - return cls.hash(fn_code) - - @classmethod - def is_generator(cls, obj): - """ - Is this shiz a generator dawg? - """ - if isinstance(obj, str): - if not hasattr(cls, obj): - return False - obj = getattr(cls, obj) - if not callable(obj): - result = inspect.isgenerator(obj) - else: - result = inspect.isgeneratorfunction(obj) - return result - @classmethod - def get_parents(cls, obj = None,recursive=True, avoid_classes=['object']) -> List[str]: - obj = cls.resolve_object(obj) - parents = list(obj.__bases__) - if recursive: - for parent in parents: - parent_parents = cls.get_parents(parent, recursive=recursive) - if len(parent_parents) > 0: - for pp in parent_parents: - if pp.__name__ not in avoid_classes: - - parents += [pp] - return parents - - - @classmethod - def get_class_name(cls, obj = None) -> str: - obj = cls or obj - if not cls.is_class(obj): - obj = type(obj) - return obj.__name__ - - - @classmethod - def fn_signature_map(cls, obj=None, include_parents:bool = False): - obj = cls.resolve_object(obj) - function_signature_map = {} - for f in cls.get_functions(obj = obj, include_parents=include_parents): - if f.startswith('__') and f.endswith('__'): - if f in ['__init__']: - pass - else: - continue - if not hasattr(cls, f): - continue - if callable(getattr(cls, f )): - function_signature_map[f] = {k:str(v) for k,v in cls.get_function_signature(getattr(cls, f )).items()} - return function_signature_map - - - @classmethod - def fn_schema(cls, fn:str, - defaults:bool=True, - code:bool = False, - docs:bool = True, **kwargs)->dict: - ''' - Get function schema of function in cls - ''' - fn_schema = {} - fn = cls.get_fn(fn) - input_schema = cls.fn_signature(fn) - for k,v in input_schema.items(): - v = str(v) - if v.startswith(' dict: - r = {} - code = cls.fn_code(fn) - lines = code.split('\n') - mode = 'self' - if '@classmethod' in lines[0]: - mode = 'class' - elif '@staticmethod' in lines[0]: - mode = 'static' - - start_line_text = 0 - lines_before_fn_def = 0 - for l in lines: - - if f'def {fn}('.replace(' ', '') in l.replace(' ', ''): - start_line_text = l - break - else: - lines_before_fn_def += 1 - - assert start_line_text != None, f'Could not find function {fn} in {cls.pypath()}' - module_code = cls.code() - start_line = cls.find_code_line(start_line_text, code=module_code) - 1 - - end_line = start_line + len(lines) # find the endline - has_docs = bool('"""' in code or "'''" in code) - filepath = cls.filepath() - - # start code line - for i, line in enumerate(lines): - - is_end = bool(')' in line and ':' in line) - if is_end: - start_code_line = i - break - - - return { - 'start_line': start_line, - 'end_line': end_line, - 'has_docs': has_docs, - 'code': code, - 'n_lines': len(lines), - 'hash': cls.hash(code), - 'path': filepath, - 'start_code_line': start_code_line + start_line , - 'mode': mode - - } - - - - @classmethod - def find_code_line(cls, search:str=None, code:str = None): - if code == None: - code = cls.code() # get the code - found_lines = [] # list of found lines - for i, line in enumerate(code.split('\n')): - if str(search) in line: - found_lines.append({'idx': i+1, 'text': line}) - if len(found_lines) == 0: - return None - elif len(found_lines) == 1: - return found_lines[0]['idx'] - return found_lines - - - - @classmethod - def attributes(cls): - return list(cls.__dict__.keys()) - - - @classmethod - def get_attributes(cls, search = None, obj=None): - if obj is None: - obj = cls - if isinstance(obj, str): - obj = c.module(obj) - # assert hasattr(obj, '__dict__'), f'{obj} has no __dict__' - attrs = dir(obj) - if search is not None: - attrs = [a for a in attrs if search in a and callable(a)] - return attrs - - - - def add_fn(self, fn, name=None): - if name == None: - name = fn.__name__ - assert not hasattr(self, name), f'{name} already exists' - - setattr(self, name, fn) - - return { - 'success':True , - 'message':f'Added {name} to {self.__class__.__name__}' - } - - - add_attribute = add_attr = add_function = add_fn - - @classmethod - def init_schema(cls): - return cls.fn_schema('__init__') - - - - @classmethod - def init_kwargs(cls): - kwargs = cls.fn_defaults('__init__') - kwargs.pop('self', None) - if 'config' in kwargs: - if kwargs['config'] != None: - kwargs.update(kwargs.pop('config')) - del kwargs['config'] - if 'kwargs' in kwargs: - if kwargs['kwargs'] != None: - kwargs = kwargs.pop('kwargs') - del kwargs['kwargs'] - - return kwargs - init_params = init_kwargs - - @classmethod - def lines_of_code(cls, code:str=None): - if code == None: - code = cls.code() - return len(code.split('\n')) - - @classmethod - def code(cls, module = None, search=None, *args, **kwargs): - if '/' in str(module) or module in cls.fns(): - return cls.fn_code(module) - module = cls.resolve_object(module) - print(module) - text = cls.get_text( module.filepath(), *args, **kwargs) - if search != None: - find_lines = cls.find_lines(text=text, search=search) - return find_lines - return text - pycode = code - @classmethod - def chash(cls, *args, **kwargs): - import commune as c - """ - The hash of the code, where the code is the code of the class (cls) - """ - code = cls.code(*args, **kwargs) - return c.hash(code) - - @classmethod - def find_code_line(cls, search:str, code:str = None): - if code == None: - code = cls.code() # get the code - found_lines = [] # list of found lines - for i, line in enumerate(code.split('\n')): - if search in line: - found_lines.append({'idx': i+1, 'text': line}) - if len(found_lines) == 0: - return None - elif len(found_lines) == 1: - return found_lines[0]['idx'] - return found_lines - - - def fn_code_first_line(self, fn): - code = self.fn_code(fn) - return code.split('):')[0] + '):' - - def fn_code_first_line_idx(self, fn): - code = self.fn_code(fn) - return self.find_code_line(self.fn_code_first_line(fn), code=code) - - - @classmethod - def fn_info(cls, fn:str='test_fn') -> dict: - r = {} - code = cls.fn_code(fn) - lines = code.split('\n') - mode = 'self' - if '@classmethod' in lines[0]: - mode = 'class' - elif '@staticmethod' in lines[0]: - mode = 'static' - module_code = cls.code() - in_fn = False - start_line = 0 - end_line = 0 - fn_code_lines = [] - for i, line in enumerate(module_code.split('\n')): - if f'def {fn}('.replace(' ', '') in line.replace(' ', ''): - in_fn = True - start_line = i + 1 - if in_fn: - fn_code_lines.append(line) - if ('def ' in line or '' == line) and len(fn_code_lines) > 1: - end_line = i - 1 - break - - if not in_fn: - end_line = start_line + len(fn_code_lines) # find the endline - # start code line - for i, line in enumerate(lines): - - is_end = bool(')' in line and ':' in line) - if is_end: - start_code_line = i - break - - return { - 'start_line': start_line, - 'end_line': end_line, - 'code': code, - 'n_lines': len(lines), - 'hash': cls.hash(code), - 'start_code_line': start_code_line + start_line , - 'mode': mode - - } - - - @classmethod - def set_line(cls, idx:int, text:str): - code = cls.code() - lines = code.split('\n') - if '\n' in text: - front_lines = lines[:idx] - back_lines = lines[idx:] - new_lines = text.split('\n') - lines = front_lines + new_lines + back_lines - else: - lines[idx-1] = text - new_code = '\n'.join(lines) - cls.put_text(cls.filepath(), new_code) - return {'success': True, 'msg': f'Set line {idx} to {text}'} - - @classmethod - def add_line(cls, idx=0, text:str = '', module=None ): - """ - add line to an index of the module code - """ - - code = cls.code() if module == None else c.module(module).code() - lines = code.split('\n') - new_lines = text.split('\n') if '\n' in text else [text] - lines = lines[:idx] + new_lines + lines[idx:] - new_code = '\n'.join(lines) - cls.put_text(cls.filepath(), new_code) - return {'success': True, 'msg': f'Added line {idx} to {text}'} - - @classmethod - def get_line(cls, idx): - code = cls.code() - lines = code.split('\n') - assert idx < len(lines), f'idx {idx} is out of range for {len(lines)}' - line = lines[max(idx, 0)] - print(len(line)) - return line - - @classmethod - def fn_defaults(cls, fn): - """ - Gets the function defaults - """ - fn = cls.get_fn(fn) - function_defaults = dict(inspect.signature(fn)._parameters) - for k,v in function_defaults.items(): - if v._default != inspect._empty and v._default != None: - function_defaults[k] = v._default - else: - function_defaults[k] = None - - return function_defaults - - @staticmethod - def is_class(obj): - ''' - is the object a class - ''' - return type(obj).__name__ == 'type' - - - @classmethod - def resolve_class(cls, obj): - ''' - resolve class of object or return class if it is a class - ''' - if cls.is_class(obj): - return obj - else: - return obj.__class__ - - - - @classmethod - def has_var_keyword(cls, fn='__init__', fn_signature=None): - if fn_signature == None: - fn_signature = cls.resolve_fn(fn) - for param_info in fn_signature.values(): - if param_info.kind._name_ == 'VAR_KEYWORD': - return True - return False - - - - @classmethod - def fn_signature(cls, fn) -> dict: - ''' - get the signature of a function - ''' - if isinstance(fn, str): - fn = getattr(cls, fn) - return dict(inspect.signature(fn)._parameters) - - get_function_signature = fn_signature - @classmethod - def is_arg_key_valid(cls, key='config', fn='__init__'): - fn_signature = cls.fn_signature(fn) - if key in fn_signature: - return True - else: - for param_info in fn_signature.values(): - if param_info.kind._name_ == 'VAR_KEYWORD': - return True - - return False - - - - @classmethod - def self_functions(cls: Union[str, type], obj=None, search=None): - ''' - Gets the self methods in a class - ''' - obj = cls.resolve_object(obj) - functions = cls.get_functions(obj) - signature_map = {f:cls.get_function_args(getattr(obj, f)) for f in functions} - if search != None: - functions = [f for f in functions if search in f] - return [k for k, v in signature_map.items() if 'self' in v] - - @classmethod - def class_functions(cls: Union[str, type], obj=None): - ''' - Gets the self methods in a class - ''' - obj = cls.resolve_object(obj) - functions = cls.get_functions(obj) - signature_map = {f:cls.get_function_args(getattr(obj, f)) for f in functions} - return [k for k, v in signature_map.items() if 'cls' in v] - - class_methods = get_class_methods = class_fns = class_functions - - @classmethod - def static_functions(cls: Union[str, type], obj=None): - ''' - Gets the self methods in a class - ''' - obj = obj or cls - functions = cls.get_functions(obj) - signature_map = {f:cls.get_function_args(getattr(obj, f)) for f in functions} - return [k for k, v in signature_map.items() if not ('self' in v or 'cls' in v)] - - static_methods = static_fns = static_functions - - @classmethod - def property_fns(cls) -> bool: - ''' - Get a list of property functions in a class - ''' - return [fn for fn in dir(cls) if cls.is_property(fn)] - - parents = get_parents - - @classmethod - def parent2functions(cls, obj=None): - ''' - Get the parent classes of a class - ''' - obj = cls.resolve_object(obj) - parent_functions = {} - for parent in cls.parents(obj): - parent_functions[parent.__name__] = cls.get_functions(parent) - return parent_functions - - parent2fns = parent2functions - - @classmethod - def get_functions(cls, obj: Any = None, - search = None, - include_parents:bool=True, - include_hidden:bool = False) -> List[str]: - ''' - Get a list of functions in a class - - Args; - obj: the class to get the functions from - include_parents: whether to include the parent functions - include_hidden: whether to include hidden functions (starts and begins with "__") - ''' - is_root_module = cls.is_root_module() - obj = cls.resolve_object(obj) - if include_parents: - parent_functions = cls.parent_functions(obj) - else: - parent_functions = [] - avoid_functions = [] - if not is_root_module: - import commune as c - avoid_functions = c.functions() - else: - avoid_functions = [] - - functions = [] - child_functions = dir(obj) - function_names = [fn_name for fn_name in child_functions + parent_functions] - - for fn_name in function_names: - if fn_name in avoid_functions: - continue - if not include_hidden: - if ((fn_name.startswith('__') or fn_name.endswith('_'))): - if fn_name != '__init__': - continue - fn_obj = getattr(obj, fn_name) - # if the function is callable, include it - if callable(fn_obj): - functions.append(fn_name) - - text_derived_fns = cls.parse_functions_from_module_text() - - functions = sorted(list(set(functions + text_derived_fns))) - - if search != None: - functions = [f for f in functions if search in f] - return functions - - @classmethod - def functions(cls, search = None, include_parents = True): - return cls.get_functions(search=search, include_parents=include_parents) - - - @classmethod - def get_conflict_functions(cls, obj = None): - ''' - Does the object conflict with the current object - ''' - if isinstance(obj, str): - obj = cls.get_module(obj) - root_fns = cls.root_functions() - conflict_functions = [] - for fn in obj.functions(): - if fn in root_fns: - print(f'Conflict: {fn}') - conflict_functions.append(fn) - return conflict_functions - - @classmethod - def does_module_conflict(cls, obj): - return len(cls.get_conflict_functions(obj)) > 0 - - - - @classmethod - def parse_functions_from_module_text(cls, obj=None, splitter_options = [" def " , " def "]): - # reutrn only functions in this class - import inspect - obj = obj or cls - text = inspect.getsource(obj) - functions = [] - for splitter in splitter_options: - for line in text.split('\n'): - if f'"{splitter}"' in line: - continue - if line.startswith(splitter): - functions += [line.split(splitter)[1].split('(')[0]] - - return functions - - - def n_fns(self, search = None): - return len(self.fns(search=search)) - - fn_n = n_fns - @classmethod - def fns(self, search = None, include_parents = True): - return self.get_functions(search=search, include_parents=include_parents) - @classmethod - def is_property(cls, fn: 'Callable') -> bool: - ''' - is the function a property - ''' - try: - fn = cls.get_fn(fn, ignore_module_pattern=True) - except : - return False - - return isinstance(fn, property) - - def is_fn_self(self, fn): - fn = self.resolve_fn(fn) - return hasattr(fn, '__self__') and fn.__self__ == self - - - - @classmethod - def get_fn(cls, fn:str, init_kwargs = None): - """ - Gets the function from a string or if its an attribute - """ - if isinstance(fn, str): - if '/' in fn: - module, fn = fn.split('/') - cls = cls.get_module(module) - try: - fn = getattr(cls, fn) - except: - init_kwargs = init_kwargs or {} - fn = getattr(cls(**init_kwargs), fn) - - if callable(fn) or isinstance(fn, property): - pass - - return fn - - - - @classmethod - def self_functions(cls, search = None): - fns = cls.classify_fns(cls)['self'] - if search != None: - fns = [f for f in fns if search in f] - return fns - - - - @classmethod - def classify_fns(cls, obj= None, mode=None): - method_type_map = {} - obj = obj or c.module(obj) - if isinstance(obj, str): - obj = c.module(obj) - for attr_name in dir(obj): - method_type = None - try: - method_type = cls.classify_fn(getattr(obj, attr_name)) - except Exception as e: - continue - - if method_type not in method_type_map: - method_type_map[method_type] = [] - method_type_map[method_type].append(attr_name) - if mode != None: - method_type_map = method_type_map[mode] - return method_type_map - - - @classmethod - def get_args(cls, fn) -> List[str]: - """ - get the arguments of a function - params: - fn: the function - - """ - # if fn is an object get the __ - - if not callable(fn): - fn = cls.get_fn(fn) - try: - args = inspect.getfullargspec(fn).args - except Exception as e: - args = [] - return args - - get_function_args = get_args - - - @classmethod - def has_function_arg(cls, fn, arg:str): - args = cls.get_function_args(fn) - return arg in args - - - fn_args = get_fn_args = get_function_args - - @classmethod - def classify_fn(cls, fn): - try: - if not callable(fn): - fn = cls.get_fn(fn) - if not callable(fn): - return None - args = cls.get_function_args(fn) - if len(args) == 0: - return 'static' - elif args[0] == 'self': - return 'self' - else: - return 'class' - except Exception as e: - return 'property' - - - - @classmethod - def python2types(cls, d:dict)-> dict: - return {k:str(type(v)).split("'")[1] for k,v in d.items()} - - - - - @classmethod - def fn2str(cls,search = None, code = True, defaults = True, **kwargs): - fns = cls.fns(search=search) - fn2str = {} - for fn in fns: - fn2str[fn] = cls.fn_code(fn) - - return fn2str - @classmethod - def fn2hash(cls, fn=None , mode='sha256', **kwargs): - fn2hash = {} - for k,v in cls.fn2str(**kwargs).items(): - fn2hash[k] = c.hash(v,mode=mode) - if fn: - return fn2hash[fn] - return fn2hash - - # TAG CITY - @classmethod - def parent_functions(cls, obj = None, include_root = True): - functions = [] - obj = obj or cls - parents = cls.get_parents(obj) - for parent in parents: - is_parent_root = cls.is_root_module(parent) - if is_parent_root: - continue - - for name, member in parent.__dict__.items(): - if not name.startswith('__'): - functions.append(name) - return functions - - @classmethod - def child_functions(cls, obj=None): - obj = cls.resolve_object(obj) - - methods = [] - for name, member in obj.__dict__.items(): - if inspect.isfunction(member) and not name.startswith('__'): - methods.append(name) - - return methods - - @classmethod - def locals2kwargs(cls,locals_dict:dict, kwargs_keys=['kwargs']) -> dict: - locals_dict = locals_dict or {} - kwargs = locals_dict or {} - kwargs.pop('cls', None) - kwargs.pop('self', None) - - assert isinstance(kwargs, dict), f'kwargs must be a dict, got {type(kwargs)}' - - # These lines are needed to remove the self and cls from the locals_dict - for k in kwargs_keys: - kwargs.update( locals_dict.pop(k, {}) or {}) - - return kwargs - - - - - - def kwargs2attributes(self, kwargs:dict, ignore_error:bool = False): - for k,v in kwargs.items(): - if k != 'self': # skip the self - # we dont want to overwrite existing variables from - if not ignore_error: - assert not hasattr(self, k) - setattr(self, k) - - def num_fns(self): - return len(self.fns()) - - - def fn2type(self): - fn2type = {} - fns = self.fns() - for f in fns: - if callable(getattr(self, f)): - fn2type[f] = self.classify_fn(getattr(self, f)) - return fn2type - - - @classmethod - def is_dir_module(cls, path:str) -> bool: - """ - determine if the path is a module - """ - filepath = cls.simple2path(path) - if path.replace('.', '/') + '/' in filepath: - return True - if ('modules/' + path.replace('.', '/')) in filepath: - return True - return False - - @classmethod - def add_line(cls, path:str, text:str, line=None) -> None: - # Get the absolute path of the file - path = cls.resolve_path(path) - text = str(text) - # Write the text to the file - if line != None: - line=int(line) - lines = cls.get_text(path).split('\n') - lines = lines[:line] + [text] + lines[line:] - - text = '\n'.join(lines) - with open(path, 'w') as file: - file.write(text) - - - return {'success': True, 'msg': f'Added line to {path}'} - - - @classmethod - def readme(cls): - # Markdown input - markdown_text = "## Hello, *Markdown*!" - path = cls.filepath().replace('.py', '_docs.md') - markdown_text = cls.get_text(path=path) - return markdown_text - - docs = readme - - - @staticmethod - def is_imported(package:str) : - return bool(package in sys.modules) - - @classmethod - def is_parent(cls, obj=None): - obj = obj or cls - return bool(obj in cls.get_parents()) - - @classmethod - def find_code_lines(cls, search:str = None , module=None) -> List[str]: - module_code = cls.get_module(module).code() - return cls.find_lines(search=search, text=module_code) - - @classmethod - def find_lines(self, text:str, search:str) -> List[str]: - """ - Finds the lines in text with search - """ - found_lines = [] - lines = text.split('\n') - for line in lines: - if search in line: - found_lines += [line] - - return found_lines - - - @classmethod - def params(cls, fn='__init__'): - params = cls.fn_defaults(fn) - params.pop('self', None) - return params - - - @classmethod - def is_str_fn(cls, fn): - if fn == None: - return False - if '/' in fn: - module, fn = fn.split('/') - module = cls.module(module) - else: - module = cls - - return hasattr(module, fn) - diff --git a/commune/module/_storage.py b/commune/module/_storage.py deleted file mode 100644 index f1a15a99..00000000 --- a/commune/module/_storage.py +++ /dev/null @@ -1,680 +0,0 @@ - -from typing import * -import os -import glob -import inspect -import yaml -import json -import time -import shutil -import pandas as pd - -class Storage: - - @classmethod - def put_json(cls, - path:str, - data:Dict, - meta = None, - verbose: bool = False, - **kwargs) -> str: - if meta != None: - data = {'data':data, 'meta':meta} - path = cls.resolve_path(path=path, extension='json') - # cls.lock_file(path) - if isinstance(data, dict): - data = json.dumps(data) - cls.put_text(path, data) - return path - - save_json = put_json - - - - @classmethod - def rm_json(cls, path=None): - from commune.utils.dict import rm_json - if path in ['all', '**']: - return [cls.rm_json(f) for f in cls.glob(files_only=False)] - path = cls.resolve_path(path=path, extension='json') - return rm_json(path ) - - @classmethod - def rmdir(cls, path): - return shutil.rmtree(path) - - @classmethod - def isdir(cls, path): - path = cls.resolve_path(path=path) - return os.path.isdir(path) - - @classmethod - def isfile(cls, path): - path = cls.resolve_path(path=path) - return os.path.isfile(path) - - @classmethod - def rm_all(cls): - for path in cls.ls(): - cls.rm(path) - return {'success':True, 'message':f'{cls.storage_dir()} removed'} - - @classmethod - def rm(cls, path, extension=None, mode = 'json'): - - assert isinstance(path, str), f'path must be a string, got {type(path)}' - path = cls.resolve_path(path=path, extension=extension) - - # incase we want to remove the json file - mode_suffix = f'.{mode}' - if not os.path.exists(path) and os.path.exists(path+mode_suffix): - path += mode_suffix - - if not os.path.exists(path): - return {'success':False, 'message':f'{path} does not exist'} - if os.path.isdir(path): - cls.rmdir(path) - if os.path.isfile(path): - os.remove(path) - assert not os.path.exists(path), f'{path} was not removed' - - return {'success':True, 'message':f'{path} removed'} - - @classmethod - def rm_all(cls): - storage_dir = cls.storage_dir() - if cls.exists(storage_dir): - cls.rm(storage_dir) - assert not cls.exists(storage_dir), f'{storage_dir} was not removed' - cls.makedirs(storage_dir) - assert cls.is_dir_empty(storage_dir), f'{storage_dir} was not removed' - return {'success':True, 'message':f'{storage_dir} removed'} - - - - @classmethod - def rm_all(cls): - storage_dir = cls.storage_dir() - if cls.exists(storage_dir): - cls.rm(storage_dir) - assert not cls.exists(storage_dir), f'{storage_dir} was not removed' - cls.makedirs(storage_dir) - assert cls.is_dir_empty(storage_dir), f'{storage_dir} was not removed' - return {'success':True, 'message':f'{storage_dir} removed'} - - - @classmethod - def glob(cls, path =None, files_only:bool = True, recursive:bool=True): - path = cls.resolve_path(path, extension=None) - if os.path.isdir(path): - path = os.path.join(path, '**') - paths = glob.glob(path, recursive=recursive) - if files_only: - paths = list(filter(lambda f:os.path.isfile(f), paths)) - return paths - - - @classmethod - def put_cache(cls,k,v ): - cls.cache[k] = v - - @classmethod - def get_cache(cls,k, default=None, **kwargs): - v = cls.cache.get(k, default) - return v - - - @classmethod - def get_json(cls, - path:str, - default:Any=None, - verbose: bool = False,**kwargs): - path = cls.resolve_path(path=path, extension='json') - - cls.print(f'Loading json from {path}', verbose=verbose) - - try: - data = cls.get_text(path, **kwargs) - except Exception as e: - return default - if isinstance(data, str): - try: - data = json.loads(data) - except Exception as e: - return default - if isinstance(data, dict): - if 'data' in data and 'meta' in data: - data = data['data'] - return data - @classmethod - async def async_get_json(cls,*args, **kwargs): - return cls.get_json(*args, **kwargs) - - load_json = get_json - - - @classmethod - def file_exists(cls, path:str)-> bool: - path = cls.resolve_path(path) - exists = os.path.exists(path) - return exists - - - exists = exists_json = file_exists - - - @classmethod - def makedirs(cls, *args, **kwargs): - return os.makedirs(*args, **kwargs) - - - @classmethod - def mv(cls, path1, path2): - path1 = cls.resolve_path(path1) - path2 = cls.resolve_path(path2) - assert os.path.exists(path1), path1 - if not os.path.isdir(path2): - path2_dirpath = os.path.dirname(path2) - if not os.path.isdir(path2_dirpath): - os.makedirs(path2_dirpath, exist_ok=True) - shutil.move(path1, path2) - assert os.path.exists(path2), path2 - assert not os.path.exists(path1), path1 - return path2 - - @classmethod - def resolve_path(cls, path:str = None, extension=None): - ''' - ### Documentation for `resolve_path` class method - - #### Purpose: - The `resolve_path` method is a class method designed to process and resolve file and directory paths based on various inputs and conditions. This method is useful for preparing file paths for operations such as reading, writing, and manipulation. - - #### Parameters: - - `path` (str, optional): The initial path to be resolved. If not provided, a temporary directory path will be returned. - - `extension` (Optional[str], optional): The file extension to append to the path if necessary. Defaults to None. - - `root` (bool, optional): A flag to determine whether the path should be resolved in relation to the root directory. Defaults to False. - - `file_type` (str, optional): The default file type/extension to append if the `path` does not exist but appending the file type results in a valid path. Defaults to 'json'. - - #### Behavior: - - If `path` is not provided, the method returns a path to a temporary directory. - - If `path` starts with '/', it is returned as is. - - If `path` starts with '~/', it is expanded to the user’s home directory. - - If `path` starts with './', it is resolved to an absolute path. - - If `path` does not fall under the above conditions, it is treated as a relative path. If `root` is True, it is resolved relative to the root temp directory; otherwise, relative to the class's temp directory. - - If `path` is a relative path and does not contain the temp directory, the method joins `path` with the appropriate temp directory. - - If `path` does not exist as a directory and an `extension` is provided, the extension is appended to `path`. - - If `path` does not exist but appending the `file_type` results in an existing path, the `file_type` is appended. - - The parent directory of `path` is created if it does not exist, avoiding any errors when the path is accessed later. - - #### Returns: - - `str`: The resolved and potentially created path, ensuring it is ready for further file operations. - - #### Example Usage: - ```python - # Resolve a path in relation to the class's temporary directory - file_path = MyClassName.resolve_path('data/subfolder/file', extension='txt') - - # Resolve a path in relation to the root temporary directory - root_file_path = MyClassName.resolve_path('configs/settings' - ``` - - #### Notes: - - This method relies on the `os` module to perform path manipulations and checks. - - This method is versatile and can handle various input path formats, simplifying file path resolution in the class's context. - ''' - - if path == None: - return cls.storage_dir() - - if path.startswith('/'): - path = path - elif path.startswith('~'): - path = os.path.expanduser(path) - elif path.startswith('.'): - path = os.path.abspath(path) - else: - # if it is a relative path, then it is relative to the module path - # ex: 'data' -> '.commune/path_module/data' - storage_dir = cls.storage_dir() - if storage_dir not in path: - path = os.path.join(storage_dir, path) - - if extension != None and not path.endswith(extension): - path = path + '.' + extension - - return path - - - - @staticmethod - def ensure_path( path): - """ - ensures a dir_path exists, otherwise, it will create it - """ - - dir_path = os.path.dirname(path) - if not os.path.isdir(dir_path): - os.makedirs(dir_path, exist_ok=True) - - return path - - @staticmethod - async def async_write(path, data, mode ='w'): - import aiofiles - async with aiofiles.open(path, mode=mode) as f: - await f.write(data) - - @classmethod - def put_yaml(cls, path:str, data: dict) -> Dict: - from munch import Munch - from copy import deepcopy - ''' - Loads a yaml file - ''' - # Directly from dictionary - data_type = type(data) - if data_type in [pd.DataFrame]: - data = data.to_dict() - if data_type in [Munch]: - data = cls.munch2dict(deepcopy(data)) - if data_type in [dict, list, tuple, set, float, str, int]: - yaml_str = yaml.dump(data) - else: - raise NotImplementedError(f"{data_type}, is not supported") - with open(path, 'w') as file: - file.write(yaml_str) - return {'success': True, 'msg': f'Wrote yaml to {path}'} - - - - - - @classmethod - def get_yaml(cls, path:str=None, default={}, **kwargs) -> Dict: - '''f - Loads a yaml file - ''' - path = cls.resolve_path(path) - with open(path, 'r') as file: - data = yaml.load(file, Loader=yaml.FullLoader) - - return data - - - load_yaml = get_yaml - - save_yaml = put_yaml - - @classmethod - def filesize(cls, filepath:str): - filepath = cls.resolve_path(filepath) - return os.path.getsize(filepath) - - - def search_files(self, path:str='./', search:str='__pycache__') -> List[str]: - path = self.resolve_path(path) - files = self.glob(path) - return list(filter(lambda x: search in x, files)) - - def rm_pycache(self, path:str='./') -> List[str]: - files = self.search_files(path, search='__pycache__') - for file in files: - self.print(self.rm(file)) - return files - - def file2size(self, path='./', fmt='mb') -> int: - files = self.glob(path) - file2size = {} - pwd = self.pwd() - for file in files: - file2size[file.replace(pwd+'/','')] = self.format_data_size(self.filesize(file), fmt) - - # sort by size - file2size = dict(sorted(file2size.items(), key=lambda item: item[1])) - return file2size - - - @classmethod - def cp(cls, path1:str, path2:str, refresh:bool = False): - # what if its a folder? - assert os.path.exists(path1), path1 - if refresh == False: - assert not os.path.exists(path2), path2 - - path2_dirpath = os.path.dirname(path2) - if not os.path.isdir(path2_dirpath): - os.makedirs(path2_dirpath, exist_ok=True) - assert os.path.isdir(path2_dirpath), f'Failed to create directory {path2_dirpath}' - - if os.path.isdir(path1): - shutil.copytree(path1, path2) - - - elif os.path.isfile(path1): - - shutil.copy(path1, path2) - else: - raise ValueError(f'path1 is not a file or a folder: {path1}') - return {'success': True, 'msg': f'Copied {path1} to {path2}'} - - @classmethod - def put_text(cls, path:str, text:str, key=None, bits_per_character=8) -> None: - # Get the absolute path of the file - path = cls.resolve_path(path) - dirpath = os.path.dirname(path) - if not os.path.exists(dirpath): - os.makedirs(dirpath, exist_ok=True) - if not isinstance(text, str): - text = cls.python2str(text) - if key != None: - text = cls.get_key(key).encrypt(text) - # Write the text to the file - with open(path, 'w') as file: - file.write(text) - # get size - text_size = len(text)*bits_per_character - - return {'success': True, 'msg': f'Wrote text to {path}', 'size': text_size} - - @classmethod - def lsdir(cls, path:str) -> List[str]: - path = os.path.abspath(path) - return os.listdir(path) - - @classmethod - def abspath(cls, path:str) -> str: - return os.path.abspath(path) - - - @classmethod - def ls(cls, path:str = '', - recursive:bool = False, - search = None, - return_full_path:bool = True): - """ - provides a list of files in the path - - this path is relative to the module path if you dont specifcy ./ or ~/ or / - which means its based on the module path - """ - path = cls.resolve_path(path) - try: - ls_files = cls.lsdir(path) if not recursive else cls.walk(path) - except FileNotFoundError: - return [] - if return_full_path: - ls_files = [os.path.abspath(os.path.join(path,f)) for f in ls_files] - - ls_files = sorted(ls_files) - if search != None: - ls_files = list(filter(lambda x: search in x, ls_files)) - return ls_files - - - - @classmethod - def put(cls, - k: str, - v: Any, - mode: bool = 'json', - encrypt: bool = False, - verbose: bool = False, - password: str = None, **kwargs) -> Any: - ''' - Puts a value in the config - ''' - encrypt = encrypt or password != None - - if encrypt or password != None: - v = cls.encrypt(v, password=password) - - if not cls.jsonable(v): - v = cls.serialize(v) - - data = {'data': v, 'encrypted': encrypt, 'timestamp': cls.timestamp()} - - # default json - getattr(cls,f'put_{mode}')(k, data) - - data_size = cls.sizeof(v) - - return {'k': k, 'data_size': data_size, 'encrypted': encrypt, 'timestamp': cls.timestamp()} - - @classmethod - def get(cls, - k:str, - default: Any=None, - mode:str = 'json', - max_age:str = None, - cache :bool = False, - full :bool = False, - key: 'Key' = None, - update :bool = False, - password : str = None, - verbose = True, - **kwargs) -> Any: - - ''' - Puts a value in sthe config, with the option to encrypt it - - Return the value - ''' - if cache: - if k in cls.cache: - return cls.cache[k] - data = getattr(cls, f'get_{mode}')(k,default=default, **kwargs) - - - if password != None: - assert data['encrypted'] , f'{k} is not encrypted' - data['data'] = cls.decrypt(data['data'], password=password, key=key) - - data = data or default - - if isinstance(data, dict): - if update: - max_age = 0 - if max_age != None: - timestamp = data.get('timestamp', None) - if timestamp != None: - age = int(time.time() - timestamp) - if age > max_age: # if the age is greater than the max age - cls.print(f'{k} is too old ({age} > {max_age})', verbose=verbose) - return default - else: - data = default - - if not full: - if isinstance(data, dict): - if 'data' in data: - data = data['data'] - - # local cache - if cache: - cls.cache[k] = data - return data - - def get_age(self, k:str) -> int: - data = self.get_json(k) - timestamp = data.get('timestamp', None) - if timestamp != None: - age = int(time.time() - timestamp) - return age - return -1 - - @classmethod - def get_text(cls, - path: str, - tail = None, - start_byte:int = 0, - end_byte:int = 0, - start_line :int= None, - end_line:int = None ) -> str: - # Get the absolute path of the file - path = cls.resolve_path(path) - - if not os.path.exists(path): - if os.path.exists(path + '.json'): - path = path + '.json' - - # Read the contents of the file - with open(path, 'rb') as file: - - file.seek(0, 2) # this is done to get the fiel size - file_size = file.tell() # Get the file size - if start_byte < 0: - start_byte = file_size - start_byte - if end_byte <= 0: - end_byte = file_size - end_byte - if end_byte < start_byte: - end_byte = start_byte + 100 - chunk_size = end_byte - start_byte + 1 - - file.seek(start_byte) - - content_bytes = file.read(chunk_size) - - # Convert the bytes to a string - try: - content = content_bytes.decode() - except UnicodeDecodeError as e: - if hasattr(content_bytes, 'hex'): - content = content_bytes.hex() - else: - raise e - - if tail != None: - content = content.split('\n') - content = '\n'.join(content[-tail:]) - - elif start_line != None or end_line != None: - - content = content.split('\n') - if end_line == None or end_line == 0 : - end_line = len(content) - if start_line == None: - start_line = 0 - if start_line < 0: - start_line = start_line + len(content) - if end_line < 0 : - end_line = end_line + len(content) - content = '\n'.join(content[start_line:end_line]) - else: - content = content_bytes.decode() - return content - - - def is_encrypted(self, path:str) -> bool: - try: - return self.get_json(path).get('encrypted', False) - except: - return False - - @classmethod - def storage_dir(cls): - return f'{cls.cache_path}/{cls.module_name()}' - - tmp_dir = cache_dir = storage_dir - - @classmethod - def refresh_storage(cls): - cls.rm(cls.storage_dir()) - - @classmethod - def refresh_storage_dir(cls): - cls.rm(cls.storage_dir()) - cls.makedirs(cls.storage_dir()) - - - @classmethod - def rm_lines(cls, path:str, start_line:int, end_line:int) -> None: - # Get the absolute path of the file - text = cls.get_text(path) - text = text.split('\n') - text = text[:start_line-1] + text[end_line:] - text = '\n'.join(text) - cls.put_text(path, text) - return {'success': True, 'msg': f'Removed lines {start_line} to {end_line} from {path}'} - @classmethod - def rm_line(cls, path:str, line:int, text=None) -> None: - # Get the absolute path of the file - text = cls.get_text(path) - text = text.split('\n') - text = text[:line-1] + text[line:] - text = '\n'.join(text) - cls.put_text(path, text) - return {'success': True, 'msg': f'Removed line {line} from {path}'} - # Write the text to the file - - @classmethod - def tilde_path(cls): - return os.path.expanduser('~') - - def is_dir_empty(self, path:str): - return len(self.ls(path)) == 0 - - @classmethod - def get_file_size(cls, path:str): - path = cls.resolve_path(path) - return os.path.getsize(path) - - @staticmethod - def jsonable( value): - import json - try: - json.dumps(value) - return True - except: - return False - - def file2text(self, path = './', relative=True, **kwargs): - path = os.path.abspath(path) - file2text = {} - for file in c.glob(path, recursive=True): - with open(file, 'r') as f: - content = f.read() - file2text[file] = content - if relative: - print(path) - return {k[len(path)+1:]:v for k,v in file2text.items()} - - return file2text - - def file2lines(self, path:str='./')-> List[str]: - file2text = self.file2text(path) - file2lines = {f: text.split('\n') for f, text in file2text.items()} - return file2lines - - def num_files(self, path:str='./')-> int: - import commune as c - return len(c.glob(path)) - - def hidden_files(self, path:str='./')-> List[str]: - import commune as c - path = self.resolve_path(path) - files = [f[len(path)+1:] for f in c.glob(path)] - print(files) - hidden_files = [f for f in files if f.startswith('.')] - return hidden_files - - @staticmethod - def format_data_size(x: Union[int, float], fmt:str='b', prettify:bool=False): - assert type(x) in [int, float], f'x must be int or float, not {type(x)}' - fmt2scale = { - 'b': 1, - 'kb': 1000, - 'mb': 1000**2, - 'gb': 1000**3, - 'GiB': 1024**3, - 'tb': 1000**4, - } - - assert fmt in fmt2scale.keys(), f'fmt must be one of {fmt2scale.keys()}' - scale = fmt2scale[fmt] - x = x/scale - - if prettify: - return f'{x:.2f} {f}' - else: - return x diff --git a/commune/module/module.py b/commune/module/module.py deleted file mode 100755 index 9b5964bc..00000000 --- a/commune/module/module.py +++ /dev/null @@ -1,1043 +0,0 @@ -import os -import inspect -from typing import * -import json - -import argparse -import nest_asyncio -nest_asyncio.apply() - -# for instance if you have a class called 'os_fam' the file would be ./commune/module/_os_fam.py -def get_core_modules(prefix = 'commune.module', core_prefix = '_'): - """ - find the core modules that construct the commune block module - """ - core_dirpath = os.path.dirname(__file__) - core_modules = [] - for f in os.listdir(core_dirpath): - f = f.split('/')[-1].split('.')[0] - if f.startswith(core_prefix) and not f.startswith('__') : - core_modules.append(f[1:]) - results = [] - for cm in core_modules: - obj_name = cm.upper() if cm.lower() == 'os' else cm.capitalize() - exec(f'from {prefix}.{core_prefix}{cm} import {obj_name}') - results.append(eval(obj_name)) - return results - -# AGI BEGINS -CORE_MODULES = get_core_modules() - -class c(*CORE_MODULES): - core_modules = ['module', 'key', 'subspace', 'web3', 'serializer', 'pm2', - 'executor', 'client', 'server', - 'namespace' ] - libname = lib_name = lib = 'commune' # the name of the library - cost = 1 - description = """This is a module""" - base_module = 'module' # the base module - giturl = 'https://github.com/commune-ai/commune.git' # tge gutg - root_module_class = 'c' # WE REPLACE THIS THIS Module at the end, kindof odd, i know, ill fix it fam, chill out dawg, i didnt sleep with your girl - default_port_range = [50050, 50150] # the port range between 50050 and 50150 - default_ip = local_ip = loopback = '0.0.0.0' - address = '0.0.0.0:8888' # the address of the server (default) - rootpath = root_path = root = '/'.join(__file__.split('/')[:-2]) # the path to the root of the library - homepath = home_path = os.path.expanduser('~') # the home path - libpath = lib_path = os.path.dirname(root_path) # the path to the library - repopath = repo_path = os.path.dirname(root_path) # the path to the repo - cache = {} # cache for module objects - home = os.path.expanduser('~') # the home directory - __ss58_format__ = 42 # the ss58 format for the substrate address - cache_path = os.path.expanduser(f'~/.{libname}') - default_tag = 'base' - - def __init__(self, *args, **kwargs): - pass - - @property - def key(self): - if not hasattr(self, '_key'): - if not hasattr(self, 'server_name') or self.server_name == None: - self.server_name = self.module_name() - self._key = c.get_key(self.server_name, create_if_not_exists=True) - return self._key - - @key.setter - def key(self, key: 'Key'): - if key == None: - key = self.server_name - self._key = key if hasattr(key, 'ss58_address') else c.get_key(key, create_if_not_exists=True) - return self._key - - @classmethod - async def async_call(cls, *args,**kwargs): - return c.call(*args, **kwargs) - - def getattr(self, k:str)-> Any: - return getattr(self, k) - - @classmethod - def getclassattr(cls, k:str)-> Any: - return getattr(cls, k) - - @classmethod - def module_file(cls) -> str: - # get the file of the module - return inspect.getfile(cls) - @classmethod - def filepath(cls, obj=None) -> str: - ''' - removes the PWD with respect to where module.py is located - ''' - obj = cls.resolve_object(obj) - try: - module_path = inspect.getfile(obj) - except Exception as e: - c.print(f'Error: {e} {cls}', color='red') - module_path = inspect.getfile(cls) - return module_path - - pythonpath = pypath = file_path = filepath - - @classmethod - def dirpath(cls) -> str: - ''' - removes the PWD with respect to where module.py is located - ''' - return os.path.dirname(cls.filepath()) - folderpath = dirname = dir_path = dirpath - - @classmethod - def module_name(cls, obj=None): - if hasattr(cls, 'name') and isinstance(cls.name, str): - return cls.name - obj = cls.resolve_object(obj) - module_file = inspect.getfile(obj) - return c.path2simple(module_file) - - path = name = module_name - - @classmethod - def module_class(cls) -> str: - return cls.__name__ - @classmethod - def class_name(cls, obj= None) -> str: - obj = obj if obj != None else cls - return obj.__name__ - - classname = class_name - - @classmethod - def config_path(cls) -> str: - return cls.filepath().replace('.py', '.yaml') - - @classmethod - def sandbox(cls): - c.cmd(f'python3 {c.root_path}/sandbox.py', verbose=True) - return - - sand = sandbox - - module_cache = {} - _obj = None - - @classmethod - def obj2module(cls,obj): - import commune as c - class WrapperModule(c.Module): - _obj = obj - def __name__(self): - return obj.__name__ - def __class__(self): - return obj.__class__ - @classmethod - def filepath(cls) -> str: - return super().filepath(cls._obj) - - for fn in dir(WrapperModule): - try: - setattr(obj, fn, getattr(WrapperModule, fn)) - except: - pass - - return obj - - @classmethod - def storage_dir(cls): - return f'{c.cache_path}/{cls.module_name()}' - - @classmethod - def refresh_storage(cls): - cls.rm(cls.storage_dir()) - - @classmethod - def refresh_storage_dir(cls): - c.rm(cls.storage_dir()) - c.makedirs(cls.storage_dir()) - - ############ JSON LAND ############### - - @classmethod - def __str__(cls): - return cls.__name__ - - @classmethod - def root_address(cls, name:str='module', - network : str = 'local', - timeout:int = 100, - sleep_interval:int = 1, - **kwargs): - """ - Root module - """ - try: - if not c.server_exists(name, network=network): - c.serve(name, network=network, wait_for_server=True, **kwargs) - address = c.call('module/address', network=network, timeout=timeout) - ip = c.ip() - address = ip+':'+address.split(':')[-1] - except Exception as e: - c.print(f'Error: {e}', color='red') - address = None - return address - - addy = root_address - - @property - def key_address(self): - return self.key.ss58_address - - @classmethod - def is_module(cls, obj=None) -> bool: - - if obj is None: - obj = cls - if all([hasattr(obj, k) for k in ['info', 'schema', 'set_config', 'config']]): - return True - return False - - @classmethod - def root_functions(cls): - return c.fns() - - @classmethod - def is_root(cls, obj=None) -> bool: - required_features = ['module_class','root_module_class', 'module_name'] - if obj is None: - obj = cls - if all([hasattr(obj, k) for k in required_features]): - module_class = obj.module_class() - if module_class == cls.root_module_class: - return True - return False - is_module_root = is_root_module = is_root - - @classmethod - def serialize(cls, *args, **kwargs): - return c.module('serializer')().serialize(*args, **kwargs) - @classmethod - def deserialize(cls, *args, **kwargs): - return c.module('serializer')().deserialize(*args, **kwargs) - - @property - def server_name(self): - if not hasattr(self, '_server_name'): - self._server_name = self.module_name() - return self._server_name - - @server_name.setter - def server_name(self, name): - self._server_name = name - - @classmethod - def resolve_object(cls, obj:str = None, **kwargs): - if isinstance(obj, str): - obj = c.module(obj, **kwargs) - if cls._obj != None: - return cls._obj - else: - return obj or cls - - def self_destruct(self): - c.kill(self.server_name) - - def self_restart(self): - c.restart(self.server_name) - - @classmethod - def pm2_start(cls, *args, **kwargs): - return c.module('pm2').start(*args, **kwargs) - - @classmethod - def pm2_launch(cls, *args, **kwargs): - return c.module('pm2').launch(*args, **kwargs) - - @classmethod - def restart(cls, name:str, mode:str='pm2', verbose:bool = False, prefix_match:bool = True): - refreshed_modules = getattr(cls, f'{mode}_restart')(name, verbose=verbose, prefix_match=prefix_match) - return refreshed_modules - - def restart_self(self): - """ - Helper function to restart the server - """ - return c.restart(self.server_name) - - update_self = restart_self - - def kill_self(self): - """ - Helper function to kill the server - """ - return c.kill(self.server_name) - - refresh = reset = restart - - @classmethod - def argparse(cls): - parser = argparse.ArgumentParser(description='Argparse for the module') - parser.add_argument('-m', '--m', '--module', '-module', dest='function', help='The function', type=str, default=cls.module_name()) - parser.add_argument('-fn', '--fn', dest='function', help='The function', type=str, default="__init__") - parser.add_argument('-kw', '-kwargs', '--kwargs', dest='kwargs', help='key word arguments to the function', type=str, default="{}") - parser.add_argument('-p', '-params', '--params', dest='params', help='key word arguments to the function', type=str, default="{}") - parser.add_argument('-i','-input', '--input', dest='input', help='key word arguments to the function', type=str, default="{}") - parser.add_argument('-args', '--args', dest='args', help='arguments to the function', type=str, default="[]") - args = parser.parse_args() - args.kwargs = json.loads(args.kwargs.replace("'",'"')) - args.params = json.loads(args.params.replace("'",'"')) - args.inputs = json.loads(args.input.replace("'",'"')) - args.args = json.loads(args.args.replace("'",'"')) - args.fn = args.function - # if you pass in the params, it will override the kwargs - if len(args.params) > 0: - if isinstance(args.params, dict): - args.kwargs = args.params - elif isinstance(args.params, list): - args.args = args.params - else: - raise Exception('Invalid params', args.params) - return args - - @classmethod - def run(cls, name:str = None) -> Any: - is_main = name == '__main__' or name == None or name == cls.__name__ - if not is_main: - return {'success':False, 'message':f'Not main module {name}'} - args = cls.argparse() - if args.function == '__init__': - return cls(*args.args, **args.kwargs) - else: - fn = getattr(cls, args.function) - fn_type = cls.classify_fn(fn) - if fn_type == 'self': - module = cls(*args.args, **args.kwargs) - else: - module = cls - return getattr(module, args.function)(*args.args, **args.kwargs) - - @classmethod - def commit_hash(cls, libpath:str = None): - if libpath == None: - libpath = c.libpath - return c.cmd('git rev-parse HEAD', cwd=libpath, verbose=False).split('\n')[0].strip() - - @classmethod - def commit_ticket(cls, **kwargs): - commit_hash = cls.commit_hash() - ticket = c.ticket(commit_hash, **kwargs) - assert c.verify(ticket) - return ticket - - @classmethod - def module_fn(cls, module:str, fn:str , args:list = None, kwargs:dict= None): - module = c.module(module) - is_self_method = bool(fn in module.self_functions()) - if is_self_method: - module = module() - fn = getattr(module, fn) - else: - fn = getattr(module, fn) - args = args or [] - kwargs = kwargs or {} - return fn(*args, **kwargs) - - fn = module_fn - - @classmethod - def info_hash(self): - return c.commit_hash() - - @classmethod - def module(cls,module: Any = 'module' , verbose=False, **kwargs): - ''' - Wraps a python class as a module - ''' - t0 = c.time() - module_class = c.get_module(module,**kwargs) - latency = c.time() - t0 - c.print(f'Loaded {module} in {latency} seconds', color='green', verbose=verbose) - return module_class - - - _module = m = mod = module - - # UNDER CONSTRUCTION (USE WITH CAUTION) - - def setattr(self, k, v): - setattr(self, k, v) - - @classmethod - def pip_exists(cls, lib:str, verbose:str=True): - return bool(lib in cls.pip_libs()) - - @classmethod - def version(cls, lib:str=libname): - lines = [l for l in cls.cmd(f'pip3 list', verbose=False).split('\n') if l.startswith(lib)] - if len(lines)>0: - return lines[0].split(' ')[-1].strip() - else: - return f'No Library Found {lib}' - - def forward(self, a=1, b=2): - return a+b - - ### DICT LAND ### - - def to_dict(self)-> Dict: - return self.__dict__ - - @classmethod - def from_dict(cls, input_dict:Dict[str, Any]) -> 'Module': - return cls(**input_dict) - - def to_json(self) -> str: - state_dict = self.to_dict() - assert isinstance(state_dict, dict), 'State dict must be a dictionary' - assert self.jsonable(state_dict), 'State dict must be jsonable' - return json.dumps(state_dict) - - @classmethod - def from_json(cls, json_str:str) -> 'Module': - import json - return cls.from_dict(json.loads(json_str)) - - @classmethod - def test_fns(cls, *args, **kwargs): - return [f for f in cls.functions(*args, **kwargs) if f.startswith('test_')] - - @classmethod - def argv(cls, include_script:bool = False): - import sys - args = sys.argv - if include_script: - return args - else: - return args[1:] - - @classmethod - def is_file_module(cls, module = None) -> bool: - if module != None: - cls = c.module(module) - dirpath = cls.dirpath() - filepath = cls.filepath() - return bool(dirpath.split('/')[-1] != filepath.split('/')[-1].split('.')[0]) - - @classmethod - def is_folder_module(cls, module = None) -> bool: - if module != None: - cls = c.module(module) - return not cls.is_file_module() - - is_module_folder = is_folder_module - - @classmethod - def get_key(cls,key:str = None ,mode='commune', **kwargs) -> None: - mode2module = { - 'commune': 'key', - 'subspace': 'subspace.key', - 'substrate': 'web3.account.substrate', - 'evm': 'web3.account.evm', - 'aes': 'key.aes', - } - - key = cls.resolve_keypath(key) - if 'Keypair' in c.type_str(key): - return key - module = c.module(mode2module[mode]) - if hasattr(module, 'get_key'): - key = module.get_key(key, **kwargs) - else: - key = module(key, **kwargs) - - return key - - @classmethod - def id(self): - return self.key.ss58_address - - @property - def ss58_address(self): - if not hasattr(self, '_ss58_address'): - self._ss58_address = self.key.ss58_address - return self._ss58_address - - @ss58_address.setter - def ss58_address(self, value): - self._ss58_address = value - return self._ss58_address - - @classmethod - def readme_paths(cls): - readme_paths = [f for f in c.ls(cls.dirpath()) if f.endswith('md')] - return readme_paths - - @classmethod - def has_readme(cls): - return len(cls.readme_paths()) > 0 - - @classmethod - def readme(cls) -> str: - readme_paths = cls.readme_paths() - if len(readme_paths) == 0: - return '' - return c.get_text(readme_paths[0]) - - @classmethod - def encrypt(cls, - data: Union[str, bytes], - key: str = None, - password: str = None, - **kwargs - ) -> bytes: - """ - encrypt data with key - """ - key = c.get_key(key) - return key.encrypt(data, password=password,**kwargs) - - @classmethod - def decrypt(cls, - data: Union[str, bytes], - key: str = None, - password : str = None, - **kwargs) -> bytes: - key = c.get_key(key) - return key.decrypt(data, password=password, **kwargs) - - @classmethod - def type_str(cls, x): - return type(x).__name__ - - @classmethod - def keys(cls, search = None, ss58=False,*args, **kwargs): - if search == None: - search = cls.module_name() - if search == 'module': - search = None - keys = c.module('key').keys(search, *args, **kwargs) - if ss58: - keys = [c.get_key_address(k) for k in keys] - return keys - - @classmethod - def get_mem(cls, *args, **kwargs): - return c.module('key').get_mem(*args, **kwargs) - - mem = get_mem - - @classmethod - def set_key(self, key:str, **kwargs) -> None: - key = self.get_key(key) - self.key = key - return key - - @classmethod - def resolve_keypath(cls, key = None): - if key == None: - key = cls.module_name() - return key - - def resolve_key(self, key: str = None) -> str: - if key == None: - if hasattr(self, 'key'): - key = self.key - key = self.resolve_keypath(key) - key = self.get_key(key) - return key - - def sign(self, data:dict = None, key: str = None, **kwargs) -> bool: - return self.resolve_key(key).sign(data, **kwargs) - - @classmethod - def verify(cls, auth, key=None, **kwargs ) -> bool: - return c.get_key(key).verify(auth, **kwargs) - - @classmethod - def verify_ticket(cls, auth, key=None, **kwargs ) -> bool: - return c.get_key(key).verify_ticket(auth, **kwargs) - - @classmethod - def start(cls, *args, **kwargs): - return cls(*args, **kwargs) - - def remove_user(self, key: str) -> None: - if not hasattr(self, 'users'): - self.users = [] - self.users.pop(key, None) - - @classmethod - def is_pwd(cls, module:str = None): - if module != None: - module = c.module(module) - else: - module = cls - return module.dirpath() == c.pwd() - - - @classmethod - def shortcuts(cls, cache=True) -> Dict[str, str]: - return cls.get_yaml(os.path.dirname(__file__)+ '/module.yaml' ).get('shortcuts') - - def __repr__(self) -> str: - return f'<{self.class_name()}' - def __str__(self) -> str: - return f'<{self.class_name()}' - - - @classmethod - def get_commune(cls): - from commune import c - return c - - def pull(self): - return c.cmd('git pull', verbose=True, cwd=c.libpath) - - def push(self, msg:str = 'update'): - c.cmd('git add .', verbose=True, cwd=c.libpath) - c.cmd(f'git commit -m "{msg}"', verbose=True, cwd=c.libpath) - return c.cmd('git push', verbose=True, cwd=c.libpath) - @classmethod - def base_config(cls, cache=True): - if cache and hasattr(cls, '_base_config'): - return cls._base_config - cls._base_config = cls.get_yaml(cls.config_path()) - return cls._base_config - - @classmethod - def local_config(cls, filename_options = ['module', 'commune', 'config', 'cfg'], cache=True): - if cache and hasattr(cls, '_local_config'): - return cls._local_config - local_config = {} - for filename in filename_options: - if os.path.exists(f'./{filename}.yaml'): - local_config = cls.get_yaml(f'./{filename}.yaml') - if local_config != None: - break - cls._local_config = local_config - return cls._local_config - - @classmethod - def local_module(cls, filename_options = ['module', 'agent', 'block'], cache=True): - for filename in filename_options: - path = os.path.dirname(f'./{filename}.py') - for filename in filename_options: - if os.path.exists(path): - classes = cls.find_classes(path) - if len(classes) > 0: - return classes[-1] - return None - - # local update - @classmethod - def update(cls, - module = None, - namespace: bool = False, - subspace: bool = False, - network: str = 'local', - **kwargs - ): - responses = [] - if module != None: - return c.module(module).update() - # update local namespace - if namespace: - responses.append(c.namespace(network=network, update=True)) - return {'success': True, 'responses': responses} - - @classmethod - def set_key(self, key:str, **kwargs) -> None: - key = self.get_key(key) - self.key = key - return key - - @classmethod - def resolve_keypath(cls, key = None): - if key == None: - key = cls.module_name() - return key - - def sign(self, data:dict = None, key: str = None, **kwargs) -> bool: - key = self.resolve_key(key) - signature = key.sign(data, **kwargs) - return signature - - def logs(self, name:str = None, verbose: bool = False): - return c.pm2_logs(name, verbose=verbose) - - def hardware(self, *args, **kwargs): - return c.obj('commune.utils.os.hardware')(*args, **kwargs) - - def set_params(self,*args, **kwargs): - return self.set_config(*args, **kwargs) - - def init_module(self,*args, **kwargs): - return self.set_config(*args, **kwargs) - - - - - helper_functions = ['info', - 'metadata', - 'schema', - 'server_name', - 'is_admin', - 'namespace', - 'whitelist', - 'endpoints', - 'forward', - 'module_name', - 'class_name', - 'name', - 'address', - 'fns'] # whitelist of helper functions to load - - def add_endpoint(self, name, fn): - setattr(self, name, fn) - self.endpoints.append(name) - assert hasattr(self, name), f'{name} not added to {self.__class__.__name__}' - return {'success':True, 'message':f'Added {fn} to {self.__class__.__name__}'} - - def is_endpoint(self, fn) -> bool: - if isinstance(fn, str): - fn = getattr(self, fn) - return hasattr(fn, '__metadata__') - - def get_endpoints(self, search: str =None , helper_fn_attributes = ['helper_functions', - 'whitelist', - '_endpoints', - '__endpoints___']): - endpoints = [] - for k in helper_fn_attributes: - if hasattr(self, k): - fn_obj = getattr(self, k) - if callable(fn_obj): - endpoints += fn_obj() - else: - endpoints += fn_obj - for f in dir(self): - try: - if not callable(getattr(self, f)) or (search != None and search not in f): - continue - fn_obj = getattr(self, f) # you need to watchout for properties - is_endpoint = hasattr(fn_obj, '__metadata__') - if is_endpoint: - endpoints.append(f) - except Exception as e: - print(f'Error in get_endpoints: {e} for {f}') - return sorted(list(set(endpoints))) - - endpoints = get_endpoints - - - def cost_fn(self, fn:str, args:list, kwargs:dict): - return 1 - - @classmethod - def endpoint(cls, - cost=1, # cost per call - user2rate : dict = None, - rate_limit : int = 100, # calls per minute - timestale : int = 60, - public:bool = False, - cost_keys = ['cost', 'w', 'weight'], - **kwargs): - - for k in cost_keys: - if k in kwargs: - cost = kwargs[k] - break - - def decorator_fn(fn): - metadata = { - **cls.fn_schema(fn), - 'cost': cost, - 'rate_limit': rate_limit, - 'user2rate': user2rate, - 'timestale': timestale, - 'public': public, - } - import commune as c - fn.__dict__['__metadata__'] = metadata - - return fn - - return decorator_fn - - - - def metadata(self, to_string=False): - if hasattr(self, '_metadata'): - return self._metadata - metadata = {} - metadata['schema'] = self.schema() - metadata['description'] = self.description - metadata['urls'] = {k: v for k,v in self.urls.items() if v != None} - if to_string: - return self.python2str(metadata) - self._metadata = metadata - return metadata - - def info(self , - module = None, - lite_features = ['name', 'address', 'schema', 'key', 'description'], - lite = True, - cost = False, - **kwargs - ) -> Dict[str, Any]: - ''' - hey, whadup hey how is it going - ''' - info = self.metadata() - info['name'] = self.server_name or self.module_name() - info['address'] = self.address - info['key'] = self.key.ss58_address - return info - - @classmethod - def is_public(cls, fn): - if not cls.is_endpoint(fn): - return False - return getattr(fn, '__metadata__')['public'] - - - urls = {'github': None, - 'website': None, - 'docs': None, - 'twitter': None, - 'discord': None, - 'telegram': None, - 'linkedin': None, - 'email': None} - - - - def schema(self, - search = None, - docs: bool = True, - defaults:bool = True, - cache=True) -> 'Schema': - if self.is_str_fn(search): - return self.fn_schema(search, docs=docs, defaults=defaults) - schema = {} - if cache and self._schema != None: - return self._schema - fns = self.get_endpoints() - for fn in fns: - if search != None and search not in fn: - continue - if callable(getattr(self, fn )): - schema[fn] = self.fn_schema(fn, defaults=defaults,docs=docs) - # sort by keys - schema = dict(sorted(schema.items())) - if cache: - self._schema = schema - - return schema - - - @classmethod - def has_routes(cls): - return cls.config().get('routes') is not None - - route_cache = None - @classmethod - def routes(cls, cache=True): - if cls.route_cache is not None and cache: - return cls.route_cache - routes = cls.get_yaml(os.path.dirname(__file__)+ '/module.yaml').get('routes') - cls.route_cache = routes - return routes - - #### THE FINAL TOUCH , ROUTE ALL OF THE MODULES TO THE CURRENT MODULE BASED ON THE routes CONFIG - - - @classmethod - def route_fns(cls): - routes = cls.routes() - route_fns = [] - for module, fns in routes.items(): - for fn in fns: - if isinstance(fn, dict): - fn = fn['to'] - elif isinstance(fn, list): - fn = fn[1] - elif isinstance(fn, str): - fn - else: - raise ValueError(f'Invalid route {fn}') - route_fns.append(fn) - return route_fns - - - @staticmethod - def resolve_to_from_fn_routes(fn): - ''' - resolve the from and to function names from the routes - option 1: - {fn: 'fn_name', name: 'name_in_current_module'} - option 2: - {from: 'fn_name', to: 'name_in_current_module'} - ''' - - if type(fn) in [list, set, tuple] and len(fn) == 2: - # option 1: ['fn_name', 'name_in_current_module'] - from_fn = fn[0] - to_fn = fn[1] - elif isinstance(fn, dict) and all([k in fn for k in ['fn', 'name']]): - if 'fn' in fn and 'name' in fn: - to_fn = fn['name'] - from_fn = fn['fn'] - elif 'from' in fn and 'to' in fn: - from_fn = fn['from'] - to_fn = fn['to'] - else: - from_fn = fn - to_fn = fn - - return from_fn, to_fn - - - @classmethod - def enable_routes(cls, routes:dict=None, verbose=False): - from functools import partial - """ - This ties other modules into the current module. - The way it works is that it takes the module name and the function name and creates a partial function that is bound to the module. - This allows you to call the function as if it were a method of the current module. - for example - """ - my_path = cls.class_name() - if not hasattr(cls, 'routes_enabled'): - cls.routes_enabled = False - - t0 = cls.time() - - # WARNING : THE PLACE HOLDERS MUST NOT INTERFERE WITH THE KWARGS OTHERWISE IT WILL CAUSE A BUG IF THE KWARGS ARE THE SAME AS THE PLACEHOLDERS - # THE PLACEHOLDERS ARE NAMED AS module_ph and fn_ph AND WILL UNLIKELY INTERFERE WITH THE KWARGS - def fn_generator( *args, module_ph, fn_ph, **kwargs): - module_ph = cls.module(module_ph) - fn_type = module_ph.classify_fn(fn_ph) - module_ph = module_ph() if fn_type == 'self' else module_ph - return getattr(module_ph, fn_ph)(*args, **kwargs) - - if routes == None: - if not hasattr(cls, 'routes'): - return {'success': False, 'msg': 'routes not found'} - routes = cls.routes() if callable(cls.routes) else cls.routes - for m, fns in routes.items(): - if fns in ['all', '*']: - fns = c.functions(m) - - for fn in fns: - # resolve the from and to function names - from_fn, to_fn = cls.resolve_to_from_fn_routes(fn) - # create a partial function that is bound to the module - fn_obj = partial(fn_generator, fn_ph=from_fn, module_ph=m ) - # make sure the funciton is as close to the original function as possible - fn_obj.__name__ = to_fn - # set the function to the current module - setattr(cls, to_fn, fn_obj) - cls.print(f'ROUTE({m}.{fn} -> {my_path}:{fn})', verbose=verbose) - - t1 = cls.time() - cls.print(f'enabled routes in {t1-t0} seconds', verbose=verbose) - cls.routes_enabled = True - return {'success': True, 'msg': 'enabled routes'} - - @classmethod - def fn2module(cls): - ''' - get the module of a function - ''' - routes = cls.routes() - fn2module = {} - for module, fn_routes in routes.items(): - for fn_route in fn_routes: - if isinstance(fn_route, dict): - fn_route = fn_route['to'] - elif isinstance(fn_route, list): - fn_route = fn_route[1] - fn2module[fn_route] = module - return fn2module - - def is_route(cls, fn): - ''' - check if a function is a route - ''' - return fn in cls.fn2module() - - - - @classmethod - def has_test_module(cls, module=None): - module = module or cls.module_name() - return cls.module_exists(cls.module_name() + '.test') - - @classmethod - def test(cls, - module=None, - timeout=42, - trials=3, - parallel=False, - ): - module = module or cls.module_name() - - if cls.has_test_module(module): - cls.print('FOUND TEST MODULE', color='yellow') - module = module + '.test' - self = cls.module(module)() - test_fns = self.test_fns() - print(f'testing {module} {test_fns}') - - def trial_wrapper(fn, trials=trials): - def trial_fn(trials=trials): - - for i in range(trials): - try: - return fn() - except Exception as e: - print(f'Error: {e}, Retrying {i}/{trials}') - cls.sleep(1) - return False - return trial_fn - fn2result = {} - if parallel: - future2fn = {} - for fn in self.test_fns(): - cls.print(f'testing {fn}') - f = cls.submit(trial_wrapper(getattr(self, fn)), timeout=timeout) - future2fn[f] = fn - for f in cls.as_completed(future2fn, timeout=timeout): - fn = future2fn.pop(f) - fn2result[fn] = f.result() - else: - for fn in self.test_fns(): - fn2result[fn] = trial_wrapper(getattr(self, fn))() - return fn2result - - -c.enable_routes() -Module = c # Module is alias of c -Module.run(__name__) - - diff --git a/commune/module/module.yaml b/commune/module/module.yaml deleted file mode 100644 index 667408af..00000000 --- a/commune/module/module.yaml +++ /dev/null @@ -1,218 +0,0 @@ - -port_range: 2500-5001 -shortcuts: - chain: subspace.chain - d: docker - f: frontend - freegpt: model.freegpt - lit: lit_gpt - openai: model.openai - openrouter: model.openrouter - or: model.openrouter - r: remote - s: subspace - sel: selenium - store: storage - tg: textgen - w: wombo - router: model.openrouter - ticket: key.ticket - namespace: server.namespace - -routes: - vali: - - run_epoch - cli: - - parse_args - streamlit: - - set_page_config - docker: - - containers - client: - - call - - call_search - - connect - repo: - - is_repo - key: - - rename_key - - ss58_encode - - ss58_decode - - key2mem - - key_info_map - - key_info - - valid_ss58_address - - add_key - - from_password - - str2key - - pwd2key - - getmem - - mem - - mems - - switch_key - - module_info - - rename_kefy - - mv_key - - add_key - - add_keys - - key_exists - - ls_keys - - rm_key - - key_encrypted - - encrypt_key - - staked - - encrypt_key - - get_keys - - rm_keys - - key2address - - key_addresses - - address2key - - is_key - - new_key - - save_keys - - load_key - - load_keys - - get_signer - - encrypt_file - - decrypt_file - - get_key_for_address - - resolve_key_address - - verify_ticket - - ticket - remote: - - host2ssh - namespace: [ - add_remote, - network2namespace, - register_server, - deregister_server, - server_exists, - add_server, - has_server, - add_servers, - rm_servers, - rm_server, - remote_servers, - namespace, - rm_namespace, - empty_namespace, - add_namespace, - update_namespace, - build_namespace, - put_namespace, - get_namespace, - server2info, - infos, - get_address, - servers, - name2address, - namespace, - get_address] - app : - - start_app - - app - - apps - - app2info - - [kill, kill_app] - user: - - role2users - - is_user - - get_user - - update_user - - get_role - - refresh_users - - user_exists - - is_admin - - admins - - add_admin - - rm_admin - - num_roles - - rm_user - streamlit: - - load_style - - st_load_css - docker: - - dlogs - - images - code: - - determine_type - - process_kwargs - - python2str - - str2python - - bytes2str - - str2bytes - - str2python - - python2str - - is_generator - - dict2munch - - munch2dict - - dict2json - - json2dict - - server: [serve, serve_many, serve_all, kill, kill_many, kill_all, wait_for_server] - pm2: - - [kill_many, pm2_kill_many] # we can change the name of the function - - [kill, pm2_kill] - - [pm2ls, pm2ls] - - [kill_all, pm2_kill_all] - - [start, pm2_start] - - [stop, pm2_stop] - - [restart, pm2_restart] - - [restart_prefix, pm2_restart_prefix] - - [exists, pm2_exists] - - [servers, pm2_servers] - - [logs, pm2_logs] - - launch - - remote_fn - vali: - - run_epoch - - setup_vali - - from_module - - subspace: - - key2balance - - my_keys - - key2value - - send - - transfer - - stake - - unstake - - register - - subnet_params - - global_params - - balance - - get_balance - - get_stak - - get_stake_to - - get_stake_from - - my_stake_to - - register - - unstake - - stake - - transfer - - netuid2subnet - - subnet2netuid - - stake_transfer - - is_registered - - registered_subnets - - registered_netuids - - model.openrouter: - - generate - - ask - - models - executor: - - wait - - gather - - submit - - submit_batch - - thread - - threads - - as_completed - - is_coroutine - - obj2typestr - - tasks - - thread_map - - detailed_error - - diff --git a/commune/modules/agent/data/agent_data.py b/commune/modules/agent/data/agent_data.py index 83205e65..8fac248f 100644 --- a/commune/modules/agent/data/agent_data.py +++ b/commune/modules/agent/data/agent_data.py @@ -7,7 +7,7 @@ class Demo(c.Module): """ def __init__(self, a=1, b=2): - self.set_config(kwargs=locals()) + self.set_config(locals()) def call(self, timeout=30) -> int: model = c.connect('model.openai') # connect to the model diff --git a/commune/modules/agent/factory/agent_factory.py b/commune/modules/agent/factory/agent_factory.py index 5ad8e026..038d5223 100644 --- a/commune/modules/agent/factory/agent_factory.py +++ b/commune/modules/agent/factory/agent_factory.py @@ -2,7 +2,7 @@ class AgentFactory(c.Module): def __init__(self, a=1, b=2): - self.set_config(kwargs=locals()) + self.set_config(locals()) def call(self, x:int = 1, y:int = 2) -> int: c.print(self.config) diff --git a/commune/modules/agent/maker/agent_maker.py b/commune/modules/agent/maker/agent_maker.py index a34a7ed9..4fc9786c 100644 --- a/commune/modules/agent/maker/agent_maker.py +++ b/commune/modules/agent/maker/agent_maker.py @@ -12,7 +12,7 @@ class Demo(c.Module): } def __init__(self, a=1, b=2): - self.set_config(kwargs=locals()) + self.set_config(locals()) def call(self, description) -> int: x = json.dumps({ diff --git a/commune/modules/base/base.py b/commune/modules/base/base.py index 1b0418ee..082150c9 100644 --- a/commune/modules/base/base.py +++ b/commune/modules/base/base.py @@ -2,7 +2,7 @@ class Demo(c.Module): def __init__(self, a=1, b=2): - self.set_config(kwargs=locals()) + self.set_config(locals()) def call(self, x:int = 1, y:int = 2) -> int: c.print(self.config) diff --git a/commune/modules/data/text/code/data_text_code.py b/commune/modules/data/text/code/data_text_code.py index 113b9fb0..ce598ff3 100644 --- a/commune/modules/data/text/code/data_text_code.py +++ b/commune/modules/data/text/code/data_text_code.py @@ -2,7 +2,7 @@ class DataTextCode(c.Module): def __init__(self, **kwargs): - config = self.set_config(kwargs=kwargs) + config = self.set_config(kwargs) self.folder_path = self.resolve_path(config.folder_path) self.filepaths = sorted([f for f in self.walk(self.folder_path) if f.endswith('.py')]) diff --git a/commune/modules/data/text/folder/data_text_folder.py b/commune/modules/data/text/folder/data_text_folder.py index 2d38b10c..53f5415b 100644 --- a/commune/modules/data/text/folder/data_text_folder.py +++ b/commune/modules/data/text/folder/data_text_folder.py @@ -2,7 +2,7 @@ class DataFolder(c.Module): def __init__(self, folder_path: str = './', suffix: str = '.py'): - config = self.set_config(kwargs=locals()) + config = self.set_config(locals()) self.folder_path = self.resolve_path(config.folder_path) self.filepaths = sorted([f for f in self.walk(self.folder_path) if f.endswith('.py')]) diff --git a/commune/modules/data/text/folder/docs/data_text_realfake_docs.md b/commune/modules/data/text/folder/docs/data_text_realfake_docs.md index 6e6dc748..287003b3 100644 --- a/commune/modules/data/text/folder/docs/data_text_realfake_docs.md +++ b/commune/modules/data/text/folder/docs/data_text_realfake_docs.md @@ -52,7 +52,7 @@ The `DataTextRealfake` class is defined, inheriting from the `c.Module` class (p ```python def __init__(self, **kwargs): - config = self.set_config(kwargs=kwargs) + config = self.set_config(kwargs) self.folder_path = self.resolve_path(config.folder_path) self.filepaths = sorted([f for f in self.walk(self.folder_path) if f.endswith('.py')]) ``` diff --git a/commune/modules/data/text/math/data_text_math.py b/commune/modules/data/text/math/data_text_math.py index 4ddac9c9..2d03c4cc 100644 --- a/commune/modules/data/text/math/data_text_math.py +++ b/commune/modules/data/text/math/data_text_math.py @@ -2,7 +2,7 @@ import random class DataTextMath(c.Module): def __init__(self, **kwargs): - config = self.set_config(kwargs=kwargs) + config = self.set_config(kwargs) self.operations = { 'add': '+', 'subtract': '-', diff --git a/commune/modules/data/text/realfake/data_text_realfake.py b/commune/modules/data/text/realfake/data_text_realfake.py index 93dac4eb..7e8ede5f 100644 --- a/commune/modules/data/text/realfake/data_text_realfake.py +++ b/commune/modules/data/text/realfake/data_text_realfake.py @@ -14,7 +14,7 @@ class DataTextRealfake(c.Module): ''' def __init__(self, **kwargs): - config = self.set_config(kwargs=kwargs) + config = self.set_config(kwargs) self.folder_path = self.resolve_path(config.folder_path) self.filepaths = sorted([f for f in self.walk(self.folder_path) if f.endswith('.py')]) diff --git a/commune/modules/data/text/realfake/docs/data_text_realfake_docs.md b/commune/modules/data/text/realfake/docs/data_text_realfake_docs.md index 6e6dc748..287003b3 100644 --- a/commune/modules/data/text/realfake/docs/data_text_realfake_docs.md +++ b/commune/modules/data/text/realfake/docs/data_text_realfake_docs.md @@ -52,7 +52,7 @@ The `DataTextRealfake` class is defined, inheriting from the `c.Module` class (p ```python def __init__(self, **kwargs): - config = self.set_config(kwargs=kwargs) + config = self.set_config(kwargs) self.folder_path = self.resolve_path(config.folder_path) self.filepaths = sorted([f for f in self.walk(self.folder_path) if f.endswith('.py')]) ``` diff --git a/commune/modules/evm/network.py b/commune/modules/evm/network.py index bff7d6f7..9815137c 100644 --- a/commune/modules/evm/network.py +++ b/commune/modules/evm/network.py @@ -65,7 +65,7 @@ class EVMNetwork(c.Module): def __init__(self, network:str = 'local.main'): - self.set_config(kwargs=locals()) + self.set_config(locals()) self.set_network(network) @property diff --git a/commune/modules/model/hf/model_hf.py b/commune/modules/model/hf/model_hf.py index e900c131..155ee60d 100644 --- a/commune/modules/model/hf/model_hf.py +++ b/commune/modules/model/hf/model_hf.py @@ -28,7 +28,7 @@ def __init__(self, test:bool = True): # OPTIONS = ['int4', 'int8', None] # Here you would initial - config = self.set_config(kwargs=locals()) + config = self.set_config(locals()) self.init_model() self.set_model(config) diff --git a/commune/modules/model/openai/openai.py b/commune/modules/model/openai/openai.py index c98c71bb..663bb6b8 100644 --- a/commune/modules/model/openai/openai.py +++ b/commune/modules/model/openai/openai.py @@ -22,7 +22,7 @@ def __init__(self, api_key = None, ): - self.set_config(kwargs=locals()) + self.set_config(locals()) self.usage = c.module('model.openai.usage_tracker')(tokenizer=tokenizer, max_output_tokens=max_output_tokens, max_input_tokens=max_input_tokens) self.birth_time = c.time() self.set_api_key(api_key) diff --git a/commune/modules/repo/repo.py b/commune/modules/repo/repo.py index 04370384..1feb2248 100644 --- a/commune/modules/repo/repo.py +++ b/commune/modules/repo/repo.py @@ -4,7 +4,7 @@ class Repo(c.Module): def __init__(self, a=1, b=2): - self.set_config(kwargs=locals()) + self.set_config(locals()) def is_repo(self, path): # is a directory diff --git a/commune/modules/sandbox.py b/commune/modules/sandbox.py index a2c8a42a..c74db22d 100644 --- a/commune/modules/sandbox.py +++ b/commune/modules/sandbox.py @@ -1,3 +1,4 @@ import commune as c -print(c.call('chat/module_name')) \ No newline at end of file +print(dir(c.import_module('commune.module._storage'))) +print('FAM') \ No newline at end of file diff --git a/commune/modules/tool/search/tool_search.py b/commune/modules/tool/search/tool_search.py index 75754b2e..42f1c336 100644 --- a/commune/modules/tool/search/tool_search.py +++ b/commune/modules/tool/search/tool_search.py @@ -2,7 +2,7 @@ class Demo(c.Module): def __init__(self, a=1, b=2): - self.set_config(kwargs=locals()) + self.set_config(locals()) def create_dope_stuff(self, x:int = 1, y:int = 2) -> int: c.print(self.config) diff --git a/commune/routes.yaml b/commune/routes.yaml new file mode 100644 index 00000000..2b23f81d --- /dev/null +++ b/commune/routes.yaml @@ -0,0 +1,182 @@ +vali: + - run_epoch +cli: + - parse_args +streamlit: + - set_page_config +docker: + - containers +client: + - call + - call_search + - connect +repo: + - is_repo +key: + - rename_key + - ss58_encode + - ss58_decode + - key2mem + - key_info_map + - key_info + - valid_ss58_address + - add_key + - from_password + - str2key + - pwd2key + - getmem + - mem + - mems + - switch_key + - module_info + - rename_kefy + - mv_key + - add_key + - add_keys + - key_exists + - ls_keys + - rm_key + - key_encrypted + - encrypt_key + - staked + - encrypt_key + - get_keys + - rm_keys + - key2address + - key_addresses + - address2key + - is_key + - new_key + - save_keys + - load_key + - load_keys + - get_signer + - encrypt_file + - decrypt_file + - get_key_for_address + - resolve_key_address + - verify_ticket + - ticket +remote: + - host2ssh +namespace: [ + add_remote, + network2namespace, + register_server, + deregister_server, + server_exists, + add_server, + has_server, + add_servers, + rm_servers, + rm_server, + remote_servers, + namespace, + rm_namespace, + empty_namespace, + add_namespace, + update_namespace, + build_namespace, + put_namespace, + get_namespace, + server2info, + infos, + get_address, + servers, + name2address, + namespace, + get_address] +app : + - start_app + - app + - apps + - app2info + - [kill, kill_app] +user: + - role2users + - is_user + - get_user + - update_user + - get_role + - refresh_users + - user_exists + - is_admin + - admins + - add_admin + - rm_admin + - num_roles + - rm_user +streamlit: + - load_style + - st_load_css +docker: + - dlogs + - images +code: + - determine_type + - process_kwargs + - python2str + - str2python + - bytes2str + - str2bytes + - str2python + - python2str + - is_generator + - dict2munch + - munch2dict + - dict2json + - json2dict + +server: [serve, serve_many, serve_all, kill, kill_many, kill_all, wait_for_server] +pm2: + - [kill_many, pm2_kill_many] # we can change the name of the function + - [kill, pm2_kill] + - [pm2ls, pm2ls] + - [kill_all, pm2_kill_all] + - [start, pm2_start] + - [stop, pm2_stop] + - [restart, pm2_restart] + - [restart_prefix, pm2_restart_prefix] + - [exists, pm2_exists] + - [servers, pm2_servers] + - [logs, pm2_logs] + - launch + - remote_fn +vali: + - run_epoch + - setup_vali + - from_module + +subspace: + - key2balance + - my_keys + - key2value + - send + - transfer + - stake + - unstake + - register + - subnet_params + - global_params + - balance + - get_balance + - get_stak + - get_stake_to + - get_stake_from + - my_stake_to + - register + - unstake + - stake + - transfer + - netuid2subnet + - subnet2netuid + - stake_transfer + - is_registered + - registered_subnets + - registered_netuids +model.openrouter: + - generate + - ask + - models +chat: + - ask \ No newline at end of file diff --git a/commune/server/manager.py b/commune/server/manager.py deleted file mode 100644 index c06e57eb..00000000 --- a/commune/server/manager.py +++ /dev/null @@ -1,329 +0,0 @@ -import commune as c -from typing import * -from fastapi import FastAPI, Request -from fastapi.middleware.cors import CORSMiddleware -import uvicorn -import json -import asyncio -from sse_starlette.sse import EventSourceResponse - -class ServerManager(c.Module): - - @classmethod - def serve(cls, - module: Any = None, - kwargs:Optional[dict] = None, # kwargs for the module - params: Optional[dict] = None, # kwargs for the module - tag:Optional[str]=None, - network: Optional[str] = 'subspace', # network to run the server - port :Optional[int] = None, # name of the server if None, it will be the module name - server_name:str=None, # name of the server if None, it will be the module name - name = None, # name of the server if None, it will be the module name - remote:bool = True, # runs the server remotely (pm2, ray) - tag_seperator:str='::', - max_workers:int = None, - public: bool = False, - mnemonic = None, # mnemonic for the server - key = None, - **extra_kwargs - ): - kwargs = {**(params or kwargs or {}), **extra_kwargs} - name = (name or server_name or module) or c.module_name() - if tag_seperator in name: - module, tag = name.split(tag_seperator) - if tag != None: - name = f'{module}{tag_seperator}{tag}' - if port == None: - namespace = c.namespace() - if name in namespace: - port = int(namespace.get(name).split(':')[-1]) - else: - port = c.free_port() - if c.port_used(port): - c.kill_port(port) - address = f'0.0.0.0:{port}' - # RESOLVE THE PORT FROM THE ADDRESS IF IT ALREADY EXISTS - # # NOTE REMOVE is FROM THE KWARGS REMOTE - response = { 'module':module, 'name': name, 'address':address, 'kwargs':kwargs} - if remote: - remote = False - remote_kwargs = c.locals2kwargs(locals()) # GET THE LOCAL KWARGS FOR SENDING TO THE REMOTE - for _ in ['extra_kwargs', 'address', 'response']: - remote_kwargs.pop(_, None) # WE INTRODUCED THE ADDRES - cls.remote_fn('serve', name=name, kwargs=remote_kwargs) - return response - cls(module=c.module(module)(**kwargs), - name=name, - port=port, - network=network, - max_workers=max_workers, - mnemonic = mnemonic, - public=public, - key=key) - return response - - sync_time = 0 - timescale_map = {'sec': 1, 'min': 60, 'hour': 3600, 'day': 86400, 'minute': 60, 'second': 1} - - def get_rate_limit(self, fn, address): - # stake rate limit - stake = self.state['stake'].get(address, 0) - stake_from = self.state['stake_from'].get(address, 0) - stake = (stake_from * self.stake_from_multipler) + stake - fn_info = self.state.get('fn_info', {}).get(fn, {'stake2rate': self.stake2rate, 'max_rate': self.max_rate}) - rate_limit = (stake / fn_info['stake2rate']) # convert the stake to a rate - return rate_limit - - def process_request(self, fn:str, request: Request) -> dict: - """ - input: - { - args: list = [] # the arguments to pass to the function - kwargs: dict = {} # the keyword arguments to pass to the function - timestamp: int = 0 # the timestamp to use - address: str = '' # the address to use - } - - Rules: - 1. Admins have unlimited access to all functions, do not share your admin keys with anyone - - Admins can add and remove other admins - - to check admins use the is_admin function (c.is_admin(address) or c.admins() for all admins) - - to add an admin use the add_admin function (c.add_admin(address)) - 2. Local keys have unlimited access but only to the functions in the whitelist - returns : dict - """ - - headers = dict(request.headers.items()) - address = headers.get('key', headers.get('address', None)) - assert address, 'No key or address in headers' - request_staleness = c.timestamp() - int(headers['timestamp']) - assert request_staleness < self.max_request_staleness, f"Request is too old ({request_staleness}s > {self.max_request_staleness}s (MAX)" - data = self.loop.run_until_complete(request.json()) - data = self.serializer.deserialize(data) - auth={'data': headers['hash'], 'timestamp': headers['timestamp']} - request = {'data': data, 'headers': headers} - signature = headers.get('signature', None) - assert c.verify(auth=auth,signature=signature, address=address), 'Invalid signature' - kwargs = dict(data.get('kwargs', {})) - args = list(data.get('args', [])) - if 'params' in data: - if isinstance(data['params', dict]): - kwargs = {**kwargs, **data['params']} - elif isinstance(data['params'], list): - args = [*args, *data['params']] - else: - raise ValueError('params must be a list or a dictionary') - data = {'args': args, 'kwargs': kwargs} - - if c.is_admin(address): - return request - assert fn in self.module.endpoints , f"Function {fn} not in whitelist={self.module.endpoints}" - assert not bool(fn.startswith('__') or fn.startswith('_')), f'Function {fn} is private' - is_local_key = address in self.address2key - is_user = c.is_user(address) - if is_local_key or is_user: - return request - # check if the user has exceeded the rate limit - user_info_path = self.resolve_path(f'user_info/{address}.json') # get the user info path - user_info = self.get(user_info_path, {}, max_age=self.period) # get the user info, refresh if it is too old (> period) - user_fn_info = user_info.get(fn, {"timestamp": c.time(), 'count': 0}) # get the user info for the function - reset_count = bool((c.timestamp() - user_fn_info['timestamp']) > self.period) # reset the count if the period has passed - user_fn_info['count'] = (user_fn_info.get('count', 0) if reset_count else 0) + 1 # increment the count - rate_limit = self.get_rate_limit(fn=fn, address=address) # get the rate limit for the user - assert user_fn_info['count'] <= rate_limit, f'rate limit exceeded for {fn}' - user_info[fn] = user_fn_info - return request - - def sync_loop(self): - while True: - try: - r = self.sync() - except Exception as e: - r = c.detailed_error(e) - c.print(r) - c.sleep(self.network_staleness) - - def sync(self, update=False): - path = self.resolve_path(self.path + '/network_state.json') - state = self.get(path, {}, max_age=self.network_staleness) - network = self.network - staleness = c.time() - state.get('sync_time', 0) - self.address2key = c.address2key() - response = { 'path': path, - 'network_staleness': self.network_staleness, - 'network': network, - 'staleness': int(staleness), - } - - if staleness < self.network_staleness: - response['msg'] = f'synced too earlly waiting {self.network_staleness - staleness} seconds' - return response - else: - response['msg'] = 'Synced with the network' - response['staleness'] = 0 - c.namespace(max_age=self.network_staleness) - self.subspace = c.module('subspace')(network=network) - state['stake_from'] = self.subspace.stake_from(fmt='j', update=update, max_age=self.network_staleness) - state['stake'] = {k: sum(v.values()) for k,v in state['stake_from'].items()} - self.state = state - self.put(path, self.state) - return response - - - - @classmethod - def kill(cls, - module, - mode:str = 'pm2', - verbose:bool = False, - update : bool = True, - prefix_match = False, - network = 'local', # local, dev, test, main - **kwargs): - - kill_fn = getattr(cls, f'{mode}_kill') - delete_modules = [] - - try: - killed_module =kill_fn(module, verbose=verbose,prefix_match=prefix_match, **kwargs) - except Exception as e: - return {'error':str(e)} - if isinstance(killed_module, list): - delete_modules.extend(killed_module) - elif isinstance(killed_module, str): - delete_modules.append(killed_module) - else: - delete_modules.append(killed_module) - # update modules - c.deregister_server(module, network=network) - - assert c.server_exists(module, network=network) == False, f'module {module} still exists' - - servers = c.servers() - for m in delete_modules: - if m in servers: - c.deregister_server(m, network=network) - - return {'server_killed': delete_modules, 'update': update} - - - - @classmethod - def kill_prefix(cls, prefix:str, **kwargs): - servers = c.servers(network='local') - killed_servers = [] - for s in servers: - if s.startswith(prefix): - c.kill(s, **kwargs) - killed_servers.append(s) - return {'success':True, 'message':f'Killed servers with prefix {prefix}'} - - - - @classmethod - def kill_many(cls, servers, search:str = None, network='local', timeout=10, **kwargs): - servers = c.servers(network=network) - servers = [s for s in servers if search in s] - futures = [] - for s in servers: - c.print(f'Killing {s}', color='red') - future = c.submit(c.kill, kwargs={'module':s, **kwargs}, imeout=timeout) - futures.append(future) - results = [] - for r in c.as_completed(futures, timeout=timeout): - results += [r.result()] - c.print(f'Killed {len(results)} servers', color='red') - return results - - - @classmethod - def fleet(cls, module, n=5, timeout=10): - futures = [] - if '::' not in module: - module = f'{module}::' - - - for i in range(n): - module_name = f'{module}{i}' - future = c.submit(cls.serve, kwargs=dict(module=module_name), timeout=timeout) - futures.append(future) - results = [] - for future in c.as_completed(futures, timeout=timeout): - result = future.result() - results.append(result) - - return results - - - @classmethod - def serve_many(cls, modules:list, **kwargs): - - if isinstance(modules[0], list): - modules = modules[0] - - futures = [] - for module in modules: - future = c.submit(c.serve, kwargs={'module': module, **kwargs}) - futures.append(future) - - results = [] - for future in c.as_completed(futures): - result = future.result() - results.append(result) - return results - serve_batch = serve_many - - - @classmethod - def wait_for_server(cls, - name: str , - network: str = 'local', - timeout:int = 600, - sleep_interval: int = 1, - verbose:bool = False) -> bool : - - time_waiting = 0 - while time_waiting < timeout: - namespace = c.namespace(network=network) - if name in namespace: - c.print(f'{name} is ready', color='green') - return True - time_waiting += sleep_interval - c.print(f'Waiting for {name} for {time_waiting} seconds', color='red') - c.sleep(sleep_interval) - raise TimeoutError(f'Waited for {timeout} seconds for {name} to start') - - - @staticmethod - def kill_all_servers( *args, **kwargs): - ''' - Kill all of the servers - ''' - for module in c.servers(*args, **kwargs): - c.kill(module) - - # c.update(network='local') - - @classmethod - def kill_all(cls, network='local', timeout=20, verbose=True): - futures = [] - servers = c.servers(network=network) - n = len(servers) - progress = c.tqdm(n) - for s in servers: - c.print(f'Killing {s}', color='red') - futures += [c.submit(c.kill, kwargs={'module':s, 'update': False}, return_future=True)] - results_list = [] - for f in c.as_completed(futures, timeout=timeout): - result = f.result() - print(result) - progress.update(1) - results_list += [result] - namespace = c.namespace(network=network, update=True) - new_n = len(servers) - c.print(f'Killed {n - new_n} servers, with {n} remaining {servers}', color='red') - return {'success':True, 'old_n':n, 'new_n':new_n, 'servers':servers, 'namespace':namespace} - - - -Server.run(__name__) \ No newline at end of file diff --git a/commune/server/namespace.py b/commune/server/namespace.py index 61e973e4..eda17adb 100644 --- a/commune/server/namespace.py +++ b/commune/server/namespace.py @@ -28,6 +28,7 @@ def namespace(cls, search=None, path = cls.resolve_network_path(network) namespace = cls.get(path, None, max_age=max_age) if namespace == None: + namespace = cls.update_namespace(network=network, netuid=netuid, timeout=timeout, @@ -51,7 +52,6 @@ def clean_namespace(cls, namespace): @classmethod def update_namespace(cls, network, netuid=None, timeout=5, search=None, verbose=False): c.print(f'UPDATING --> NETWORK(network={network} netuid={netuid})', color='blue') - if 'subspace' in network: if '.' in network: network, netuid = network.split('.') @@ -80,8 +80,7 @@ def update_namespace(cls, network, netuid=None, timeout=5, search=None, verbose= except Exception as e: c.print(f'Error: {e}', color='red', verbose=True) namespace = {k:v for k,v in namespace.items() if 'Error' not in k} - ip = c.ip(update=1) - namespace = {k: v.replace(ip, '0.0.0.0') for k,v in namespace.items() } + namespace = {k: '0.0.0.0:' + str(v.split(':')[-1]) for k,v in namespace.items() } else: return {} return namespace diff --git a/commune/server/server.py b/commune/server/server.py index e30e0ebe..155e306e 100644 --- a/commune/server/server.py +++ b/commune/server/server.py @@ -24,7 +24,6 @@ def __init__( nest_asyncio:bool = True, # whether to use nest asyncio process_request:Optional[Union[callable, str]] = None, network_staleness = 60, - netuid: int = 'all', # subnet id path:str = 'state', **kwargs, ) -> 'Server': diff --git a/commune/shortcuts.yaml b/commune/shortcuts.yaml new file mode 100644 index 00000000..43a4eeec --- /dev/null +++ b/commune/shortcuts.yaml @@ -0,0 +1,17 @@ +chain: subspace.chain +d: docker +f: frontend +freegpt: model.freegpt +lit: lit_gpt +openai: model.openai +openrouter: model.openrouter +or: model.openrouter +r: remote +s: subspace +sel: selenium +store: storage +tg: textgen +w: wombo +router: model.openrouter +ticket: key.ticket +namespace: server.namespace \ No newline at end of file diff --git a/commune/subspace/client.py b/commune/subspace/client.py deleted file mode 100644 index cef14fc6..00000000 --- a/commune/subspace/client.py +++ /dev/null @@ -1,3025 +0,0 @@ -import json -import queue -from concurrent.futures import Future, ThreadPoolExecutor -from contextlib import contextmanager -from copy import deepcopy -from dataclasses import dataclass -from typing import Any, Mapping, TypeVar, cast -import commune as c -from substrateinterface import ExtrinsicReceipt # type: ignore -from substrateinterface import Keypair # type: ignore -from substrateinterface import SubstrateInterface # type: ignore -from substrateinterface.storage import StorageKey # type: ignore - -from commune.subspace.utils import transform_stake_dmap -from commune.utils.errors import ChainTransactionError, NetworkQueryError -from commune.utils.types import NetworkParams, Ss58Address, SubnetParams - -# TODO: InsufficientBalanceError, MismatchedLengthError etc - -MAX_REQUEST_SIZE = 9_000_000 - - -@dataclass -class Chunk: - batch_requests: list[tuple[Any, Any]] - prefix_list: list[list[str]] - fun_params: list[tuple[Any, Any, Any, Any, str]] - - -T1 = TypeVar("T1") -T2 = TypeVar("T2") - - -class SubspaceClient(c.Module): - """ - A client for interacting with Commune network nodes, querying storage, - submitting transactions, etc. - - Attributes: - wait_for_finalization: Whether to wait for transaction finalization. - - Example: - ```py - client = CommuneClient() - client.query(name='function_name', params=['param1', 'param2']) - ``` - - Raises: - AssertionError: If the maximum connections value is less than or equal - to zero. - """ - - wait_for_finalization: bool - _num_connections: int - _connection_queue: queue.Queue[SubstrateInterface] - url: str - - def __init__( - self, - url: str, - num_connections: int = 1, - wait_for_finalization: bool = False, - timeout: int | None = None, - ): - """ - Args: - url: The URL of the network node to connect to. - num_connections: The number of websocket connections to be opened. - """ - assert num_connections > 0 - self._num_connections = num_connections - self.wait_for_finalization = wait_for_finalization - self._connection_queue = queue.Queue(num_connections) - self.url = url - ws_options = {} - if timeout is not None: - ws_options["timeout"] = timeout - - for _ in range(num_connections): - self._connection_queue.put( - SubstrateInterface(url, ws_options=ws_options) - ) - - @property - def connections(self) -> int: - """ - Gets the maximum allowed number of simultaneous connections to the - network node. - """ - return self._num_connections - - @contextmanager - def get_conn(self, timeout: float | None = None, init: bool = False): - """ - Context manager to get a connection from the pool. - - Tries to get a connection from the pool queue. If the queue is empty, - it blocks for `timeout` seconds until a connection is available. If - `timeout` is None, it blocks indefinitely. - - Args: - timeout: The maximum time in seconds to wait for a connection. - - Yields: - The connection object from the pool. - - Raises: - QueueEmptyError: If no connection is available within the timeout - period. - """ - conn = self._connection_queue.get(timeout=timeout) - if init: - conn.init_runtime() # type: ignore - try: - yield conn - finally: - self._connection_queue.put(conn) - - def _get_storage_keys( - self, - storage: str, - queries: list[tuple[str, list[Any]]], - block_hash: str | None, - ): - - send: list[tuple[str, list[Any]]] = [] - prefix_list: list[Any] = [] - - key_idx = 0 - with self.get_conn(init=True) as substrate: - for function, params in queries: - storage_key = StorageKey.create_from_storage_function( # type: ignore - storage, function, params, runtime_config=substrate.runtime_config, metadata=substrate.metadata # type: ignore - ) - - prefix = storage_key.to_hex() - prefix_list.append(prefix) - send.append(("state_getKeys", [prefix, block_hash])) - key_idx += 1 - return send, prefix_list - - def _get_lists( - self, - storage_module: str, - queries: list[tuple[str, list[Any]]], - substrate: SubstrateInterface, - ) -> list[tuple[Any, Any, Any, Any, str]]: - """ - Generates a list of tuples containing parameters for each storage function based on the given functions and substrate interface. - - Args: - functions (dict[str, list[query_call]]): A dictionary where keys are storage module names and values are lists of tuples. - Each tuple consists of a storage function name and its parameters. - substrate: An instance of the SubstrateInterface class used to interact with the substrate. - - Returns: - A list of tuples in the format `(value_type, param_types, key_hashers, params, storage_function)` for each storage function in the given functions. - - Example: - >>> _get_lists( - functions={'storage_module': [('storage_function', ['param1', 'param2'])]}, - substrate=substrate_instance - ) - [('value_type', 'param_types', 'key_hashers', ['param1', 'param2'], 'storage_function'), ...] - """ - - function_parameters: list[tuple[Any, Any, Any, Any, str]] = [] - - metadata_pallet = substrate.metadata.get_metadata_pallet( # type: ignore - storage_module - ) - for storage_function, params in queries: - storage_item = metadata_pallet.get_storage_function( # type: ignore - storage_function - ) - - value_type = storage_item.get_value_type_string() # type: ignore - param_types = storage_item.get_params_type_string() # type: ignore - key_hashers = storage_item.get_param_hashers() # type: ignore - function_parameters.append( - ( - value_type, - param_types, - key_hashers, - params, - storage_function, - ) # type: ignore - ) - return function_parameters - - def _send_batch( - self, - batch_payload: list[Any], - request_ids: list[int], - extract_result: bool = True, - ): - """ - Sends a batch of requests to the substrate and collects the results. - - Args: - substrate: An instance of the substrate interface. - batch_payload: The payload of the batch request. - request_ids: A list of request IDs for tracking responses. - results: A list to store the results of the requests. - extract_result: Whether to extract the result from the response. - - Raises: - NetworkQueryError: If there is an `error` in the response message. - - Note: - No explicit return value as results are appended to the provided 'results' list. - """ - results: list[str | dict[Any, Any]] = [] - with self.get_conn(init=True) as substrate: - try: - - substrate.websocket.send( #  type: ignore - json.dumps(batch_payload) - ) # type: ignore - except NetworkQueryError: - pass - while len(results) < len(request_ids): - received_messages = json.loads( - substrate.websocket.recv() # type: ignore - ) # type: ignore - if isinstance(received_messages, dict): - received_messages: list[dict[Any, Any]] = [received_messages] - - for message in received_messages: - if message.get("id") in request_ids: - if extract_result: - try: - results.append(message["result"]) - except Exception: - raise ( - RuntimeError( - f"Error extracting result from message: {message}" - ) - ) - else: - results.append(message) - if "error" in message: - raise NetworkQueryError(message["error"]) - - return results - - def _make_request_smaller( - self, - batch_request: list[tuple[T1, T2]], - prefix_list: list[list[str]], - fun_params: list[tuple[Any, Any, Any, Any, str]], - ) -> tuple[list[list[tuple[T1, T2]]], list[Chunk]]: - """ - Splits a batch of requests into smaller batches, each not exceeding the specified maximum size. - - Args: - batch_request: A list of requests to be sent in a batch. - max_size: Maximum size of each batch in bytes. - - Returns: - A list of smaller request batches. - - Example: - >>> _make_request_smaller(batch_request=[('method1', 'params1'), ('method2', 'params2')], max_size=1000) - [[('method1', 'params1')], [('method2', 'params2')]] - """ - assert len(prefix_list) == len(fun_params) == len(batch_request) - - def estimate_size(request: tuple[T1, T2]): - """Convert the batch request to a string and measure its length""" - return len(json.dumps(request)) - - # Initialize variables - result: list[list[tuple[T1, T2]]] = [] - current_batch = [] - current_prefix_batch = [] - current_params_batch = [] - current_size = 0 - - chunk_list: list[Chunk] = [] - - # Iterate through each request in the batch - for request, prefix, params in zip(batch_request, prefix_list, fun_params): - request_size = estimate_size(request) - - # Check if adding this request exceeds the max size - if current_size + request_size > MAX_REQUEST_SIZE: - # If so, start a new batch - - # Essentiatly checks that it's not the first iteration - if current_batch: - chunk = Chunk( - current_batch, current_prefix_batch, current_params_batch - ) - chunk_list.append(chunk) - result.append(current_batch) - - current_batch = [request] - current_prefix_batch = [prefix] - current_params_batch = [params] - current_size = request_size - else: - # Otherwise, add to the current batch - current_batch.append(request) - current_size += request_size - current_prefix_batch.append(prefix) - current_params_batch.append(params) - - # Add the last batch if it's not empty - if current_batch: - result.append(current_batch) - chunk = Chunk(current_batch, current_prefix_batch, current_params_batch) - chunk_list.append(chunk) - - return result, chunk_list - - def _are_changes_equal(self, change_a: Any, change_b: Any): - for (a, b), (c, d) in zip(change_a, change_b): - if a != c or b != d: - return False - - def _rpc_request_batch( - self, batch_requests: list[tuple[str, list[Any]]], extract_result: bool = True - ) -> list[str]: - """ - Sends batch requests to the substrate node using multiple threads and collects the results. - - Args: - substrate: An instance of the substrate interface. - batch_requests : A list of requests to be sent in batches. - max_size: Maximum size of each batch in bytes. - extract_result: Whether to extract the result from the response message. - - Returns: - A list of results from the batch requests. - - Example: - >>> _rpc_request_batch(substrate_instance, [('method1', ['param1']), ('method2', ['param2'])]) - ['result1', 'result2', ...] - """ - - chunk_results: list[Any] = [] - # smaller_requests = self._make_request_smaller(batch_requests) - request_id = 0 - with ThreadPoolExecutor() as executor: - futures: list[Future[list[str | dict[Any, Any]]]] = [] - for chunk in [batch_requests]: - request_ids: list[int] = [] - batch_payload: list[Any] = [] - for method, params in chunk: - request_id += 1 - request_ids.append(request_id) - batch_payload.append( - { - "jsonrpc": "2.0", - "method": method, - "params": params, - "id": request_id, - } - ) - - futures.append( - executor.submit( - self._send_batch, - batch_payload=batch_payload, - request_ids=request_ids, - extract_result=extract_result, - ) - ) - for future in futures: - resul = future.result() - chunk_results.append(resul) - return chunk_results - - def _rpc_request_batch_chunked( - self, chunk_requests: list[Chunk], extract_result: bool = True - ): - """ - Sends batch requests to the substrate node using multiple threads and collects the results. - - Args: - substrate: An instance of the substrate interface. - batch_requests : A list of requests to be sent in batches. - max_size: Maximum size of each batch in bytes. - extract_result: Whether to extract the result from the response message. - - Returns: - A list of results from the batch requests. - - Example: - >>> _rpc_request_batch(substrate_instance, [('method1', ['param1']), ('method2', ['param2'])]) - ['result1', 'result2', ...] - """ - - def split_chunks(chunk: Chunk, chunk_info: list[Chunk], chunk_info_idx: int): - manhattam_chunks: list[tuple[Any, Any]] = [] - mutaded_chunk_info = deepcopy(chunk_info) - max_n_keys = 35000 - for query in chunk.batch_requests: - result_keys = query[1][0] - keys_amount = len(result_keys) - if keys_amount > max_n_keys: - mutaded_chunk_info.pop(chunk_info_idx) - for i in range(0, keys_amount, max_n_keys): - new_chunk = deepcopy(chunk) - splitted_keys = result_keys[i: i + max_n_keys] - splitted_query = deepcopy(query) - splitted_query[1][0] = splitted_keys - new_chunk.batch_requests = [splitted_query] - manhattam_chunks.append(splitted_query) - mutaded_chunk_info.insert(chunk_info_idx, new_chunk) - else: - manhattam_chunks.append(query) - return manhattam_chunks, mutaded_chunk_info - - assert len(chunk_requests) > 0 - mutated_chunk_info: list[Chunk] = [] - chunk_results: list[Any] = [] - # smaller_requests = self._make_request_smaller(batch_requests) - request_id = 0 - - with ThreadPoolExecutor() as executor: - futures: list[Future[list[str | dict[Any, Any]]]] = [] - for idx, macro_chunk in enumerate(chunk_requests): - _, mutated_chunk_info = split_chunks(macro_chunk, chunk_requests, idx) - for chunk in mutated_chunk_info: - request_ids: list[int] = [] - batch_payload: list[Any] = [] - for method, params in chunk.batch_requests: - # for method, params in micro_chunk: - request_id += 1 - request_ids.append(request_id) - batch_payload.append( - { - "jsonrpc": "2.0", - "method": method, - "params": params, - "id": request_id, - } - ) - futures.append( - executor.submit( - self._send_batch, - batch_payload=batch_payload, - request_ids=request_ids, - extract_result=extract_result, - ) - ) - for future in futures: - resul = future.result() - chunk_results.append(resul) - return chunk_results, mutated_chunk_info - - def _decode_response( - self, - response: list[str], - function_parameters: list[tuple[Any, Any, Any, Any, str]], - prefix_list: list[Any], - block_hash: str, - ) -> dict[str, dict[Any, Any]]: - """ - Decodes a response from the substrate interface and organizes the data into a dictionary. - - Args: - response: A list of encoded responses from a substrate query. - function_parameters: A list of tuples containing the parameters for each storage function. - last_keys: A list of the last keys used in the substrate query. - prefix_list: A list of prefixes used in the substrate query. - substrate: An instance of the SubstrateInterface class. - block_hash: The hash of the block to be queried. - - Returns: - A dictionary where each key is a storage function name and the value is another dictionary. - This inner dictionary's key is the decoded key from the response and the value is the corresponding decoded value. - - Raises: - ValueError: If an unsupported hash type is encountered in the `concat_hash_len` function. - - Example: - >>> _decode_response( - response=[...], - function_parameters=[...], - last_keys=[...], - prefix_list=[...], - substrate=substrate_instance, - block_hash="0x123..." - ) - {'storage_function_name': {decoded_key: decoded_value, ...}, ...} - """ - - def get_item_key_value(item_key: tuple[Any, ...] | Any) -> tuple[Any, ...] | Any: - if isinstance(item_key, tuple): - return tuple(k.value for k in item_key) - return item_key.value - - def concat_hash_len(key_hasher: str) -> int: - """ - Determines the length of the hash based on the given key hasher type. - - Args: - key_hasher: The type of key hasher. - - Returns: - The length of the hash corresponding to the given key hasher type. - - Raises: - ValueError: If the key hasher type is not supported. - - Example: - >>> concat_hash_len("Blake2_128Concat") - 16 - """ - - if key_hasher == "Blake2_128Concat": - return 16 - elif key_hasher == "Twox64Concat": - return 8 - elif key_hasher == "Identity": - return 0 - else: - raise ValueError("Unsupported hash type") - - assert len(response) == len(function_parameters) == len(prefix_list) - result_dict: dict[str, dict[Any, Any]] = {} - for res, fun_params_tuple, prefix in zip( - response, function_parameters, prefix_list - ): - if not res: - continue - res = res[0] - changes = res["changes"] # type: ignore - value_type, param_types, key_hashers, params, storage_function = ( - fun_params_tuple - ) - with self.get_conn(init=True) as substrate: - for item in changes: - # Determine type string - key_type_string: list[Any] = [] - for n in range(len(params), len(param_types)): - key_type_string.append( - f"[u8; {concat_hash_len(key_hashers[n])}]" - ) - key_type_string.append(param_types[n]) - - item_key_obj = substrate.decode_scale( # type: ignore - type_string=f"({', '.join(key_type_string)})", - scale_bytes="0x" + item[0][len(prefix):], - return_scale_obj=True, - block_hash=block_hash, - ) - # strip key_hashers to use as item key - if len(param_types) - len(params) == 1: - item_key = item_key_obj.value_object[1] # type: ignore - else: - item_key = tuple( # type: ignore - item_key_obj.value_object[key + 1] # type: ignore - for key in range( # type: ignore - len(params), len(param_types) + 1, 2 - ) - ) - - item_value = substrate.decode_scale( # type: ignore - type_string=value_type, - scale_bytes=item[1], - return_scale_obj=True, - block_hash=block_hash, - ) - result_dict.setdefault(storage_function, {}) - key = get_item_key_value(item_key) # type: ignore - result_dict[storage_function][key] = item_value.value # type: ignore - - return result_dict - - def query_batch( - self, functions: dict[str, list[tuple[str, list[Any]]]] - ) -> dict[str, str]: - """ - Executes batch queries on a substrate and returns results in a dictionary format. - - Args: - substrate: An instance of SubstrateInterface to interact with the substrate. - functions (dict[str, list[query_call]]): A dictionary mapping module names to lists of query calls (function name and parameters). - - Returns: - A dictionary where keys are storage function names and values are the query results. - - Raises: - Exception: If no result is found from the batch queries. - - Example: - >>> query_batch(substrate_instance, {'module_name': [('function_name', ['param1', 'param2'])]}) - {'function_name': 'query_result', ...} - """ - - result: dict[str, str] = {} - if not functions: - raise Exception("No result") - with self.get_conn(init=True) as substrate: - for module, queries in functions.items(): - storage_keys: list[Any] = [] - for fn, params in queries: - storage_function = substrate.create_storage_key( # type: ignore - pallet=module, storage_function=fn, params=params - ) - storage_keys.append(storage_function) - - block_hash = substrate.get_block_hash() - responses: list[Any] = substrate.query_multi( # type: ignore - storage_keys=storage_keys, block_hash=block_hash - ) - - for item in responses: - fun = item[0] - query = item[1] - storage_fun = fun.storage_function - result[storage_fun] = query.value - - return result - - def query_batch_map( - self, - functions: dict[str, list[tuple[str, list[Any]]]], - block_hash: str | None = None, - ) -> dict[str, dict[Any, Any]]: - """ - Queries multiple storage functions using a map batch approach and returns the combined result. - - Args: - substrate: An instance of SubstrateInterface for substrate interaction. - functions (dict[str, list[query_call]]): A dictionary mapping module names to lists of query calls. - - Returns: - The combined result of the map batch query. - - Example: - >>> query_batch_map(substrate_instance, {'module_name': [('function_name', ['param1', 'param2'])]}) - # Returns the combined result of the map batch query - """ - multi_result: dict[str, dict[Any, Any]] = {} - - def recursive_update( - d: dict[str, dict[T1, T2] | dict[str, Any]], - u: Mapping[str, dict[Any, Any] | str], - ) -> dict[str, dict[T1, T2]]: - for k, v in u.items(): - if isinstance(v, dict): - d[k] = recursive_update(d.get(k, {}), v) # type: ignore - else: - d[k] = v # type: ignore - return d # type: ignore - - def get_page(): - send, prefix_list = self._get_storage_keys(storage, queries, block_hash) - with self.get_conn(init=True) as substrate: - function_parameters = self._get_lists(storage, queries, substrate) - responses = self._rpc_request_batch(send) - # assumption because send is just the storage_function keys - # so it should always be really small regardless of the amount of queries - assert len(responses) == 1 - res = responses[0] - built_payload: list[tuple[str, list[Any]]] = [] - for result_keys in res: - built_payload.append( - ("state_queryStorageAt", [result_keys, block_hash]) - ) - _, chunks_info = self._make_request_smaller( - built_payload, prefix_list, function_parameters - ) - chunks_response, chunks_info = self._rpc_request_batch_chunked(chunks_info) - return chunks_response, chunks_info - - if not block_hash: - with self.get_conn(init=True) as substrate: - block_hash = substrate.get_block_hash() - for storage, queries in functions.items(): - chunks, chunks_info = get_page() - # if this doesn't happen something is wrong on the code - # and we won't be able to decode the data properly - assert len(chunks) == len(chunks_info) - for chunk_info, response in zip(chunks_info, chunks): - storage_result = self._decode_response( - response, chunk_info.fun_params, chunk_info.prefix_list, block_hash - ) - multi_result = recursive_update(multi_result, storage_result) - - return multi_result - - def query( - self, - name: str, - params: list[Any] = [], - module: str = "SubspaceModule", - block_hash: str | None = None, - ) -> Any: - """ - Queries a storage function on the network. - - Sends a query to the network and retrieves data from a - specified storage function. - - Args: - name: The name of the storage function to query. - params: The parameters to pass to the storage function. - module: The module where the storage function is located. - - Returns: - The result of the query from the network. - - Raises: - NetworkQueryError: If the query fails or is invalid. - """ - - result = self.query_batch({module: [(name, params)]}) - - return result[name] - - def query_map( - self, - name: str, - params: list[Any] = [], - module: str = "SubspaceModule", - extract_value: bool = True, - block_hash: str | None = None, - ) -> dict[Any, Any]: - """ - Queries a storage map from a network node. - - Args: - name: The name of the storage map to query. - params: A list of parameters for the query. - module: The module in which the storage map is located. - - Returns: - A dictionary representing the key-value pairs - retrieved from the storage map. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - result = self.query_batch_map({module: [(name, params)]}, block_hash) - - if extract_value: - return {k.value: v.value for k, v in result} # type: ignore - - return result - - def compose_call( - self, - fn: str, - params: dict[str, Any], - key: Keypair | None, - module: str = "SubspaceModule", - wait_for_inclusion: bool = True, - wait_for_finalization: bool | None = None, - sudo: bool = False, - unsigned: bool = False, - ) -> ExtrinsicReceipt: - """ - Composes and submits a call to the network node. - - Composes and signs a call with the provided keypair, and submits it to - the network. The call can be a standard extrinsic or a sudo extrinsic if - elevated permissions are required. The method can optionally wait for - the call's inclusion in a block and/or its finalization. - - Args: - fn: The function name to call on the network. - params: A dictionary of parameters for the call. - key: The keypair for signing the extrinsic. - module: The module containing the function. - wait_for_inclusion: Wait for the call's inclusion in a block. - wait_for_finalization: Wait for the transaction's finalization. - sudo: Execute the call as a sudo (superuser) operation. - - Returns: - The receipt of the submitted extrinsic, if - `wait_for_inclusion` is True. Otherwise, returns a string - identifier of the extrinsic. - - Raises: - ChainTransactionError: If the transaction fails. - """ - - if key is None and not unsigned: - raise ValueError("Key must be provided for signed extrinsics.") - - with self.get_conn() as substrate: - if wait_for_finalization is None: - wait_for_finalization = self.wait_for_finalization - - call = substrate.compose_call( # type: ignore - call_module=module, call_function=fn, call_params=params - ) - if sudo: - call = substrate.compose_call( # type: ignore - call_module="Sudo", - call_function="sudo", - call_params={ - "call": call.value, # type: ignore - }, - ) - - if not unsigned: - extrinsic = substrate.create_signed_extrinsic( # type: ignore - call=call, keypair=key # type: ignore - ) # type: ignore - else: - extrinsic = substrate.create_unsigned_extrinsic(call=call) # type: ignore - - response = substrate.submit_extrinsic( - extrinsic=extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - if wait_for_inclusion: - if not response.is_success: - raise ChainTransactionError( - response.error_message, response # type: ignore - ) - - return response - - def compose_call_multisig( - self, - fn: str, - params: dict[str, Any], - key: Keypair, - signatories: list[Ss58Address], - threshold: int, - module: str = "SubspaceModule", - wait_for_inclusion: bool = True, - wait_for_finalization: bool | None = None, - sudo: bool = False, - era: dict[str, int] | None = None, - ) -> ExtrinsicReceipt: - """ - Composes and submits a multisignature call to the network node. - - This method allows the composition and submission of a call that - requires multiple signatures for execution, known as a multisignature - call. It supports specifying signatories, a threshold of signatures for - the call's execution, and an optional era for the call's mortality. The - call can be a standard extrinsic, a sudo extrinsic for elevated - permissions, or a multisig extrinsic if multiple signatures are - required. Optionally, the method can wait for the call's inclusion in a - block and/or its finalization. Make sure to pass all keys, - that are part of the multisignature. - - Args: - fn: The function name to call on the network. params: A dictionary - of parameters for the call. key: The keypair for signing the - extrinsic. signatories: List of SS58 addresses of the signatories. - Include ALL KEYS that are part of the multisig. threshold: The - minimum number of signatories required to execute the extrinsic. - module: The module containing the function to call. - wait_for_inclusion: Whether to wait for the call's inclusion in a - block. wait_for_finalization: Whether to wait for the transaction's - finalization. sudo: Execute the call as a sudo (superuser) - operation. era: Specifies the call's mortality in terms of blocks in - the format - {'period': amount_blocks}. If omitted, the extrinsic is - immortal. - - Returns: - The receipt of the submitted extrinsic if `wait_for_inclusion` is - True. Otherwise, returns a string identifier of the extrinsic. - - Raises: - ChainTransactionError: If the transaction fails. - """ - - # getting the call ready - with self.get_conn() as substrate: - if wait_for_finalization is None: - wait_for_finalization = self.wait_for_finalization - - # prepares the `GenericCall` object - call = substrate.compose_call( # type: ignore - call_module=module, call_function=fn, call_params=params - ) - if sudo: - call = substrate.compose_call( # type: ignore - call_module="Sudo", - call_function="sudo", - call_params={ - "call": call.value, # type: ignore - }, - ) - - # modify the rpc methods at runtime, to allow for correct payment - # fee calculation parity has a bug in this version, - # where the method has to be removed - rpc_methods = substrate.config.get("rpc_methods") # type: ignore - - if "state_call" in rpc_methods: # type: ignore - rpc_methods.remove("state_call") # type: ignore - - # create the multisig account - multisig_acc = substrate.generate_multisig_account( # type: ignore - signatories, threshold - ) - - # send the multisig extrinsic - extrinsic = substrate.create_multisig_extrinsic( # type: ignore - call=call, # type: ignore - keypair=key, - multisig_account=multisig_acc, # type: ignore - era=era, # type: ignore - ) # type: ignore - - response = substrate.submit_extrinsic( - extrinsic=extrinsic, - wait_for_inclusion=wait_for_inclusion, - wait_for_finalization=wait_for_finalization, - ) - - if wait_for_inclusion: - if not response.is_success: - raise ChainTransactionError( - response.error_message, response # type: ignore - ) - - return response - - def transfer( - self, - key: Keypair, - amount: int, - dest: Ss58Address, - ) -> ExtrinsicReceipt: - """ - Transfers a specified amount of tokens from the signer's account to the - specified account. - - Args: - key: The keypair associated with the sender's account. - amount: The amount to transfer, in nanotokens. - dest: The SS58 address of the recipient. - - Returns: - A receipt of the transaction. - - Raises: - InsufficientBalanceError: If the sender's account does not have - enough balance. - ChainTransactionError: If the transaction fails. - """ - - params = {"dest": dest, "value": amount} - - return self.compose_call( - module="Balances", fn="transfer_keep_alive", params=params, key=key - ) - - def transfer_multiple( - self, - key: Keypair, - destinations: list[Ss58Address], - amounts: list[int], - netuid: str | int = 0, - ) -> ExtrinsicReceipt: - """ - Transfers specified amounts of tokens from the signer's account to - multiple target accounts. - - The `destinations` and `amounts` lists must be of the same length. - - Args: - key: The keypair associated with the sender's account. - destinations: A list of SS58 addresses of the recipients. - amounts: Amount to transfer to each recipient, in nanotokens. - netuid: The network identifier. - - Returns: - A receipt of the transaction. - - Raises: - InsufficientBalanceError: If the sender's account does not have - enough balance for all transfers. - ChainTransactionError: If the transaction fails. - """ - - assert len(destinations) == len(amounts) - - # extract existential deposit from amounts - existential_deposit = self.get_existential_deposit() - amounts = [a - existential_deposit for a in amounts] - - params = { - "netuid": netuid, - "destinations": destinations, - "amounts": amounts, - } - - return self.compose_call( - module="SubspaceModule", fn="transfer_multiple", params=params, key=key - ) - - def stake( - self, - key: Keypair, - amount: int, - dest: Ss58Address, - ) -> ExtrinsicReceipt: - """ - Stakes the specified amount of tokens to a module key address. - - Args: - key: The keypair associated with the staker's account. - amount: The amount of tokens to stake, in nanotokens. - dest: The SS58 address of the module key to stake to. - netuid: The network identifier. - - Returns: - A receipt of the staking transaction. - - Raises: - InsufficientBalanceError: If the staker's account does not have - enough balance. - ChainTransactionError: If the transaction fails. - """ - - params = {"amount": amount, "module_key": dest} - - return self.compose_call(fn="add_stake", params=params, key=key) - - def unstake( - self, - key: Keypair, - amount: int, - dest: Ss58Address, - ) -> ExtrinsicReceipt: - """ - Unstakes the specified amount of tokens from a module key address. - - Args: - key: The keypair associated with the unstaker's account. - amount: The amount of tokens to unstake, in nanotokens. - dest: The SS58 address of the module key to unstake from. - netuid: The network identifier. - - Returns: - A receipt of the unstaking transaction. - - Raises: - InsufficientStakeError: If the staked key does not have enough - staked tokens by the signer key. - ChainTransactionError: If the transaction fails. - """ - - params = {"amount": amount, "module_key": dest} - return self.compose_call(fn="remove_stake", params=params, key=key) - - def update_module( - self, - key: Keypair, - name: str, - address: str, - metadata: str | None = None, - delegation_fee: int = 20, - netuid: int = 0, - ) -> ExtrinsicReceipt: - """ - Updates the parameters of a registered module. - - The delegation fee must be an integer between 0 and 100. - - Args: - key: The keypair associated with the module's account. - name: The new name for the module. If None, the name is not updated. - address: The new address for the module. - If None, the address is not updated. - delegation_fee: The new delegation fee for the module, - between 0 and 100. - netuid: The network identifier. - - Returns: - A receipt of the module update transaction. - - Raises: - InvalidParameterError: If the provided parameters are invalid. - ChainTransactionError: If the transaction fails. - """ - - assert isinstance(delegation_fee, int) - - params = { - "netuid": netuid, - "name": name, - "address": address, - "delegation_fee": delegation_fee, - "metadata": metadata, - } - - response = self.compose_call("update_module", params=params, key=key) - - return response - - def register_module( - self, - key: Keypair, - name: str, - address: str | None = None, - subnet: str = "Rootnet", - metadata: str | None = None, - ) -> ExtrinsicReceipt: - """ - Registers a new module in the network. - - Args: - key: The keypair used for registering the module. - name: The name of the module. If None, a default or previously - set name is used. # How does this work? - address: The address of the module. If None, a default or - previously set address is used. # How does this work? - subnet: The network subnet to register the module in. - min_stake: The minimum stake required for the module, in nanotokens. - If None, a default value is used. - - Returns: - A receipt of the registration transaction. - - Raises: - InvalidParameterError: If the provided parameters are invalid. - ChainTransactionError: If the transaction fails. - """ - - key_addr = key.ss58_address - - params = { - "network_name": subnet, - "address": address, - "name": name, - "module_key": key_addr, - "metadata": metadata, - } - - response = self.compose_call("register", params=params, key=key) - return response - - def deregister_module(self, key: Keypair, netuid: int) -> ExtrinsicReceipt: - """ - Deregisters a module from the network. - - Args: - key: The keypair associated with the module's account. - netuid: The network identifier. - - Returns: - A receipt of the module deregistration transaction. - - Raises: - ChainTransactionError: If the transaction fails. - """ - - params = {"netuid": netuid} - - response = self.compose_call("deregister", params=params, key=key) - - return response - - def register_subnet(self, key: Keypair, name: str, metadata: str | None = None) -> ExtrinsicReceipt: - """ - Registers a new subnet in the network. - - Args: - key (Keypair): The keypair used for registering the subnet. - name (str): The name of the subnet to be registered. - metadata (str | None, optional): Additional metadata for the subnet. Defaults to None. - - Returns: - ExtrinsicReceipt: A receipt of the subnet registration transaction. - - Raises: - ChainTransactionError: If the transaction fails. - """ - - params = { - "name": name, - "metadata": metadata, - } - - response = self.compose_call("register_subnet", params=params, key=key) - - return response - - def vote( - self, - key: Keypair, - uids: list[int], - weights: list[int], - netuid: int = 0, - ) -> ExtrinsicReceipt: - """ - Casts votes on a list of module UIDs with corresponding weights. - - The length of the UIDs list and the weights list should be the same. - Each weight corresponds to the UID at the same index. - - Args: - key: The keypair used for signing the vote transaction. - uids: A list of module UIDs to vote on. - weights: A list of weights corresponding to each UID. - netuid: The network identifier. - - Returns: - A receipt of the voting transaction. - - Raises: - InvalidParameterError: If the lengths of UIDs and weights lists - do not match. - ChainTransactionError: If the transaction fails. - """ - - assert len(uids) == len(weights) - - params = { - "uids": uids, - "weights": weights, - "netuid": netuid, - } - - response = self.compose_call("set_weights", params=params, key=key) - - return response - - def update_subnet( - self, - key: Keypair, - params: SubnetParams, - netuid: int = 0, - ) -> ExtrinsicReceipt: - """ - Update a subnet's configuration. - - It requires the founder key for authorization. - - Args: - key: The founder keypair of the subnet. - params: The new parameters for the subnet. - netuid: The network identifier. - - Returns: - A receipt of the subnet update transaction. - - Raises: - AuthorizationError: If the key is not authorized. - ChainTransactionError: If the transaction fails. - """ - - general_params = dict(params) - general_params["netuid"] = netuid - if "metadata" not in general_params: - general_params["metadata"] = None - - response = self.compose_call( - fn="update_subnet", - params=general_params, - key=key, - ) - - return response - - def transfer_stake( - self, - key: Keypair, - amount: int, - from_module_key: Ss58Address, - dest_module_address: Ss58Address, - ) -> ExtrinsicReceipt: - """ - Realocate staked tokens from one staked module to another module. - - Args: - key: The keypair associated with the account that is delegating the tokens. - amount: The amount of staked tokens to transfer, in nanotokens. - from_module_key: The SS58 address of the module you want to transfer from (currently delegated by the key). - dest_module_address: The SS58 address of the destination (newly delegated key). - netuid: The network identifier. - - Returns: - A receipt of the stake transfer transaction. - - Raises: - InsufficientStakeError: If the source module key does not have - enough staked tokens. ChainTransactionError: If the transaction - fails. - """ - - amount = amount - self.get_existential_deposit() - - params = { - "amount": amount, - "module_key": from_module_key, - "new_module_key": dest_module_address, - } - - response = self.compose_call("transfer_stake", key=key, params=params) - - return response - - def multiunstake( - self, - key: Keypair, - keys: list[Ss58Address], - amounts: list[int], - ) -> ExtrinsicReceipt: - """ - Unstakes tokens from multiple module keys. - - And the lists `keys` and `amounts` must be of the same length. Each - amount corresponds to the module key at the same index. - - Args: - key: The keypair associated with the unstaker's account. - keys: A list of SS58 addresses of the module keys to unstake from. - amounts: A list of amounts to unstake from each module key, - in nanotokens. - netuid: The network identifier. - - Returns: - A receipt of the multi-unstaking transaction. - - Raises: - MismatchedLengthError: If the lengths of keys and amounts lists do - not match. InsufficientStakeError: If any of the module keys do not - have enough staked tokens. ChainTransactionError: If the transaction - fails. - """ - - assert len(keys) == len(amounts) - - params = {"module_keys": keys, "amounts": amounts} - - response = self.compose_call("remove_stake_multiple", params=params, key=key) - - return response - - def multistake( - self, - key: Keypair, - keys: list[Ss58Address], - amounts: list[int], - ) -> ExtrinsicReceipt: - """ - Stakes tokens to multiple module keys. - - The lengths of the `keys` and `amounts` lists must be the same. Each - amount corresponds to the module key at the same index. - - Args: - key: The keypair associated with the staker's account. - keys: A list of SS58 addresses of the module keys to stake to. - amounts: A list of amounts to stake to each module key, - in nanotokens. - netuid: The network identifier. - - Returns: - A receipt of the multi-staking transaction. - - Raises: - MismatchedLengthError: If the lengths of keys and amounts lists - do not match. - ChainTransactionError: If the transaction fails. - """ - - assert len(keys) == len(amounts) - - params = { - "module_keys": keys, - "amounts": amounts, - } - - response = self.compose_call("add_stake_multiple", params=params, key=key) - - return response - - def add_profit_shares( - self, - key: Keypair, - keys: list[Ss58Address], - shares: list[int], - ) -> ExtrinsicReceipt: - """ - Allocates profit shares to multiple keys. - - The lists `keys` and `shares` must be of the same length, - with each share amount corresponding to the key at the same index. - - Args: - key: The keypair associated with the account - distributing the shares. - keys: A list of SS58 addresses to allocate shares to. - shares: A list of share amounts to allocate to each key, - in nanotokens. - - Returns: - A receipt of the profit sharing transaction. - - Raises: - MismatchedLengthError: If the lengths of keys and shares - lists do not match. - ChainTransactionError: If the transaction fails. - """ - - assert len(keys) == len(shares) - - params = {"keys": keys, "shares": shares} - - response = self.compose_call("add_profit_shares", params=params, key=key) - - return response - - def add_subnet_proposal( - self, key: Keypair, - params: dict[str, Any], - ipfs: str, - netuid: int = 0 - ) -> ExtrinsicReceipt: - """ - Submits a proposal for creating or modifying a subnet within the - network. - - The proposal includes various parameters like the name, founder, share - allocations, and other subnet-specific settings. - - Args: - key: The keypair used for signing the proposal transaction. - params: The parameters for the subnet proposal. - netuid: The network identifier. - - Returns: - A receipt of the subnet proposal transaction. - - Raises: - InvalidParameterError: If the provided subnet - parameters are invalid. - ChainTransactionError: If the transaction fails. - """ - - general_params = dict(params) - general_params["netuid"] = netuid - general_params["data"] = ipfs - if "metadata" not in general_params: - general_params["metadata"] = None - - # general_params["burn_config"] = json.dumps(general_params["burn_config"]) - response = self.compose_call( - fn="add_subnet_params_proposal", - params=general_params, - key=key, - module="GovernanceModule", - ) - - return response - - def add_custom_proposal( - self, - key: Keypair, - cid: str, - ) -> ExtrinsicReceipt: - - params = {"data": cid} - - response = self.compose_call( - fn="add_global_custom_proposal", - params=params, - key=key, - module="GovernanceModule", - ) - return response - - def add_custom_subnet_proposal( - self, - key: Keypair, - cid: str, - netuid: int = 0, - ) -> ExtrinsicReceipt: - """ - Submits a proposal for creating or modifying a custom subnet within the - network. - - The proposal includes various parameters like the name, founder, share - allocations, and other subnet-specific settings. - - Args: - key: The keypair used for signing the proposal transaction. - params: The parameters for the subnet proposal. - netuid: The network identifier. - - Returns: - A receipt of the subnet proposal transaction. - """ - - params = { - "data": cid, - "netuid": netuid, - } - - response = self.compose_call( - fn="add_subnet_custom_proposal", - params=params, - key=key, - module="GovernanceModule", - ) - - return response - - def add_global_proposal( - self, - key: Keypair, - params: NetworkParams, - cid: str | None, - ) -> ExtrinsicReceipt: - """ - Submits a proposal for altering the global network parameters. - - Allows for the submission of a proposal to - change various global parameters - of the network, such as emission rates, rate limits, and voting - thresholds. It is used to - suggest changes that affect the entire network's operation. - - Args: - key: The keypair used for signing the proposal transaction. - params: A dictionary containing global network parameters - like maximum allowed subnets, modules, - transaction rate limits, and others. - - Returns: - A receipt of the global proposal transaction. - - Raises: - InvalidParameterError: If the provided network - parameters are invalid. - ChainTransactionError: If the transaction fails. - """ - general_params = cast(dict[str, Any], params) - cid = cid or "" - general_params["data"] = cid - - response = self.compose_call( - fn="add_global_params_proposal", - params=general_params, - key=key, - module="GovernanceModule", - ) - - return response - - def vote_on_proposal( - self, - key: Keypair, - proposal_id: int, - agree: bool, - ) -> ExtrinsicReceipt: - """ - Casts a vote on a specified proposal within the network. - - Args: - key: The keypair used for signing the vote transaction. - proposal_id: The unique identifier of the proposal to vote on. - - Returns: - A receipt of the voting transaction in nanotokens. - - Raises: - InvalidProposalIDError: If the provided proposal ID does not - exist or is invalid. - ChainTransactionError: If the transaction fails. - """ - - params = {"proposal_id": proposal_id, "agree": agree} - - response = self.compose_call( - "vote_proposal", - key=key, - params=params, - module="GovernanceModule", - ) - - return response - - def unvote_on_proposal( - self, - key: Keypair, - proposal_id: int, - ) -> ExtrinsicReceipt: - """ - Retracts a previously cast vote on a specified proposal. - - Args: - key: The keypair used for signing the unvote transaction. - proposal_id: The unique identifier of the proposal to withdraw the - vote from. - - Returns: - A receipt of the unvoting transaction in nanotokens. - - Raises: - InvalidProposalIDError: If the provided proposal ID does not - exist or is invalid. - ChainTransactionError: If the transaction fails to be processed, or - if there was no prior vote to retract. - """ - - params = {"proposal_id": proposal_id} - - response = self.compose_call( - "remove_vote_proposal", - key=key, - params=params, - module="GovernanceModule", - ) - - return response - - def enable_vote_power_delegation(self, key: Keypair) -> ExtrinsicReceipt: - """ - Enables vote power delegation for the signer's account. - - Args: - key: The keypair used for signing the delegation transaction. - - Returns: - A receipt of the vote power delegation transaction. - - Raises: - ChainTransactionError: If the transaction fails. - """ - - response = self.compose_call( - "enable_vote_power_delegation", - params={}, - key=key, - module="GovernanceModule", - ) - - return response - - def disable_vote_power_delegation(self, key: Keypair) -> ExtrinsicReceipt: - """ - Disables vote power delegation for the signer's account. - - Args: - key: The keypair used for signing the delegation transaction. - - Returns: - A receipt of the vote power delegation transaction. - - Raises: - ChainTransactionError: If the transaction fails. - """ - - response = self.compose_call( - "disable_vote_power_delegation", - params={}, - key=key, - module="GovernanceModule", - ) - - return response - - def add_dao_application( - self, key: Keypair, application_key: Ss58Address, data: str - ) -> ExtrinsicReceipt: - """ - Submits a new application to the general subnet DAO. - - Args: - key: The keypair used for signing the application transaction. - application_key: The SS58 address of the application key. - data: The data associated with the application. - - Returns: - A receipt of the application transaction. - - Raises: - ChainTransactionError: If the transaction fails. - """ - - params = {"application_key": application_key, "data": data} - - response = self.compose_call( - "add_dao_application", module="GovernanceModule", key=key, - params=params - ) - - return response - - def query_map_curator_applications(self) -> dict[str, dict[str, str]]: - query_result = self.query_map( - "CuratorApplications", module="GovernanceModule", params=[], - extract_value=False - ) - applications = query_result.get("CuratorApplications", {}) - return applications - - def query_map_proposals( - self, extract_value: bool = False - ) -> dict[int, dict[str, Any]]: - """ - Retrieves a mappping of proposals from the network. - - Queries the network and returns a mapping of proposal IDs to - their respective parameters. - - Returns: - A dictionary mapping proposal IDs - to dictionaries of their parameters. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map( - "Proposals", extract_value=extract_value, module="GovernanceModule" - )["Proposals"] - - def query_map_weights( - self, netuid: int = 0, extract_value: bool = False - ) -> dict[int, list[tuple[int, int]]] | None: - """ - Retrieves a mapping of weights for keys on the network. - - Queries the network and returns a mapping of key UIDs to - their respective weights. - - Args: - netuid: The network UID from which to get the weights. - - Returns: - A dictionary mapping key UIDs to lists of their weights. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - weights_dict = self.query_map( - "Weights", - [netuid], - extract_value=extract_value - ).get("Weights") - return weights_dict - - def query_map_key( - self, - netuid: int = 0, - extract_value: bool = False, - ) -> dict[int, Ss58Address]: - """ - Retrieves a map of keys from the network. - - Fetches a mapping of key UIDs to their associated - addresses on the network. - The query can be targeted at a specific network UID if required. - - Args: - netuid: The network UID from which to get the keys. - - Returns: - A dictionary mapping key UIDs to their addresses. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - return self.query_map("Keys", [netuid], extract_value=extract_value)["Keys"] - - def query_map_address( - self, netuid: int = 0, extract_value: bool = False - ) -> dict[int, str]: - """ - Retrieves a map of key addresses from the network. - - Queries the network for a mapping of key UIDs to their addresses. - - Args: - netuid: The network UID from which to get the addresses. - - Returns: - A dictionary mapping key UIDs to their addresses. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("Address", [netuid], extract_value=extract_value)[ - "Address" - ] - - def query_map_emission(self, extract_value: bool = False) -> dict[int, list[int]]: - """ - Retrieves a map of emissions for keys on the network. - - Queries the network to get a mapping of - key UIDs to their emission values. - - Returns: - A dictionary mapping key UIDs to lists of their emission values. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("Emission", extract_value=extract_value)["Emission"] - - def query_map_pending_emission(self, extract_value: bool = False) -> int: - """ - Retrieves a map of pending emissions for the subnets. - - Queries the network for a mapping of subnet UIDs to their pending emission values. - - Returns: - A dictionary mapping subnet UIDs to their pending emission values. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - return self.query_map("PendingEmission", extract_value=extract_value, module="SubnetEmissionModule")["PendingEmission"] - - def query_map_subnet_emission(self, extract_value: bool = False) -> dict[int, int]: - """ - Retrieves a map of subnet emissions for the network. - - Queries the network for a mapping of subnet UIDs to their emission values. - - Returns: - A dictionary mapping subnet UIDs to their emission values. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("SubnetEmission", extract_value=extract_value, module="SubnetEmissionModule")["SubnetEmission"] - - def query_map_subnet_consensus(self, extract_value: bool = False) -> dict[int, str]: - """ - Retrieves a map of subnet consensus types for the network. - - Queries the network for a mapping of subnet UIDs to their consensus types. - - Returns: - A dictionary mapping subnet UIDs to their consensus types. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("SubnetConsensusType", extract_value=extract_value, module="SubnetEmissionModule")["SubnetConsensusType"] - - def query_map_incentive(self, extract_value: bool = False) -> dict[int, list[int]]: - """ - Retrieves a mapping of incentives for keys on the network. - - Queries the network and returns a mapping of key UIDs to - their respective incentive values. - - Returns: - A dictionary mapping key UIDs to lists of their incentive values. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("Incentive", extract_value=extract_value)["Incentive"] - - def query_map_dividend(self, extract_value: bool = False) -> dict[int, list[int]]: - """ - Retrieves a mapping of dividends for keys on the network. - - Queries the network for a mapping of key UIDs to - their dividend values. - - Returns: - A dictionary mapping key UIDs to lists of their dividend values. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("Dividends", extract_value=extract_value)["Dividends"] - - def query_map_regblock( - self, netuid: int = 0, extract_value: bool = False - ) -> dict[int, int]: - """ - Retrieves a mapping of registration blocks for keys on the network. - - Queries the network for a mapping of key UIDs to - the blocks where they were registered. - - Args: - netuid: The network UID from which to get the registration blocks. - - Returns: - A dictionary mapping key UIDs to their registration blocks. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map( - "RegistrationBlock", [netuid], extract_value=extract_value - )["RegistrationBlock"] - - def query_map_lastupdate(self, extract_value: bool = False) -> dict[int, list[int]]: - """ - Retrieves a mapping of the last update times for keys on the network. - - Queries the network for a mapping of key UIDs to their last update times. - - Returns: - A dictionary mapping key UIDs to lists of their last update times. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("LastUpdate", extract_value=extract_value)["LastUpdate"] - - def query_map_stakefrom( - self, extract_value: bool = False - ) -> dict[Ss58Address, list[tuple[Ss58Address, int]]]: - """ - Retrieves a mapping of stakes from various sources for keys on the network. - - Queries the network to obtain a mapping of key addresses to the sources - and amounts of stakes they have received. - - Args: - netuid: The network UID from which to get the stakes. - - Returns: - A dictionary mapping key addresses to lists of tuples - (module_key_address, amount). - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - result = self.query_map("StakeFrom", [], extract_value=extract_value)[ - "StakeFrom" - ] - - return transform_stake_dmap(result) - - def query_map_staketo( - self, extract_value: bool = False - ) -> dict[Ss58Address, list[tuple[Ss58Address, int]]]: - """ - Retrieves a mapping of stakes to destinations for keys on the network. - - Queries the network for a mapping of key addresses to the destinations - and amounts of stakes they have made. - - Args: - netuid: The network UID from which to get the stakes. - - Returns: - A dictionary mapping key addresses to lists of tuples - (module_key_address, amount). - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - result = self.query_map("StakeTo", [], extract_value=extract_value)[ - "StakeTo" - ] - return transform_stake_dmap(result) - - def query_map_delegationfee( - self, netuid: int = 0, extract_value: bool = False - ) -> dict[str, int]: - """ - Retrieves a mapping of delegation fees for keys on the network. - - Queries the network to obtain a mapping of key addresses to their - respective delegation fees. - - Args: - netuid: The network UID to filter the delegation fees. - - Returns: - A dictionary mapping key addresses to their delegation fees. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("DelegationFee", [netuid], extract_value=extract_value)[ - "DelegationFee" - ] - - def query_map_tempo(self, extract_value: bool = False) -> dict[int, int]: - """ - Retrieves a mapping of tempo settings for the network. - - Queries the network to obtain the tempo (rate of reward distributions) - settings for various network subnets. - - Returns: - A dictionary mapping network UIDs to their tempo settings. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("Tempo", extract_value=extract_value)["Tempo"] - - def query_map_immunity_period(self, extract_value: bool) -> dict[int, int]: - """ - Retrieves a mapping of immunity periods for the network. - - Queries the network for the immunity period settings, - which represent the time duration during which modules - can not get deregistered. - - Returns: - A dictionary mapping network UIDs to their immunity period settings. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("ImmunityPeriod", extract_value=extract_value)[ - "ImmunityPeriod" - ] - - def query_map_min_allowed_weights( - self, extract_value: bool = False - ) -> dict[int, int]: - """ - Retrieves a mapping of minimum allowed weights for the network. - - Queries the network to obtain the minimum allowed weights, - which are the lowest permissible weight values that can be set by - validators. - - Returns: - A dictionary mapping network UIDs to - their minimum allowed weight values. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("MinAllowedWeights", extract_value=extract_value)[ - "MinAllowedWeights" - ] - - def query_map_max_allowed_weights( - self, extract_value: bool = False - ) -> dict[int, int]: - """ - Retrieves a mapping of maximum allowed weights for the network. - - Queries the network for the maximum allowed weights, - which are the highest permissible - weight values that can be set by validators. - - Returns: - A dictionary mapping network UIDs to - their maximum allowed weight values. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("MaxAllowedWeights", extract_value=extract_value)[ - "MaxAllowedWeights" - ] - - def query_map_max_allowed_uids(self, extract_value: bool = False) -> dict[int, int]: - """ - Queries the network for the maximum number of allowed user IDs (UIDs) - for each network subnet. - - Fetches a mapping of network subnets to their respective - limits on the number of user IDs that can be created or used. - - Returns: - A dictionary mapping network UIDs (unique identifiers) to their - maximum allowed number of UIDs. - Each entry represents a network subnet - with its corresponding UID limit. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("MaxAllowedUids", extract_value=extract_value)[ - "MaxAllowedUids" - ] - - def query_map_min_stake(self, extract_value: bool = False) -> dict[int, int]: - """ - Retrieves a mapping of minimum allowed stake on the network. - - Queries the network to obtain the minimum number of stake, - which is represented in nanotokens. - - Returns: - A dictionary mapping network UIDs to - their minimum allowed stake values. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("MinStake", extract_value=extract_value)["MinStake"] - - def query_map_max_stake(self, extract_value: bool = False) -> dict[int, int]: - """ - Retrieves a mapping of the maximum stake values for the network. - - Queries the network for the maximum stake values across various s - ubnets of the network. - - Returns: - A dictionary mapping network UIDs to their maximum stake values. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("MaxStake", extract_value=extract_value)["MaxStake"] - - def query_map_founder(self, extract_value: bool = False) -> dict[int, str]: - """ - Retrieves a mapping of founders for the network. - - Queries the network to obtain the founders associated with - various subnets. - - Returns: - A dictionary mapping network UIDs to their respective founders. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("Founder", extract_value=extract_value)["Founder"] - - def query_map_founder_share(self, extract_value: bool = False) -> dict[int, int]: - """ - Retrieves a mapping of founder shares for the network. - - Queries the network for the share percentages - allocated to founders across different subnets. - - Returns: - A dictionary mapping network UIDs to their founder share percentages. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("FounderShare", extract_value=extract_value)[ - "FounderShare" - ] - - def query_map_incentive_ratio(self, extract_value: bool = False) -> dict[int, int]: - """ - Retrieves a mapping of incentive ratios for the network. - - Queries the network for the incentive ratios, - which are the proportions of rewards or incentives - allocated in different subnets of the network. - - Returns: - A dictionary mapping network UIDs to their incentive ratios. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("IncentiveRatio", extract_value=extract_value)[ - "IncentiveRatio" - ] - - def query_map_trust_ratio(self, extract_value: bool = False) -> dict[int, int]: - """ - Retrieves a mapping of trust ratios for the network. - - Queries the network for trust ratios, - indicative of the level of trust or credibility assigned - to different subnets of the network. - - Returns: - A dictionary mapping network UIDs to their trust ratios. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("TrustRatio", extract_value=extract_value)["TrustRatio"] - - def query_map_vote_mode_subnet(self, extract_value: bool = False) -> dict[int, str]: - """ - Retrieves a mapping of vote modes for subnets within the network. - - Queries the network for the voting modes used in different - subnets, which define the methodology or approach of voting within those - subnets. - - Returns: - A dictionary mapping network UIDs to their vote - modes for subnets. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("VoteModeSubnet", extract_value=extract_value)[ - "VoteModeSubnet" - ] - - def query_map_legit_whitelist( - self, extract_value: bool = False - ) -> dict[Ss58Address, int]: - """ - Retrieves a mapping of whitelisted addresses for the network. - - Queries the network for a mapping of whitelisted addresses - and their respective legitimacy status. - - Returns: - A dictionary mapping addresses to their legitimacy status. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map( - "LegitWhitelist", module="GovernanceModule", extract_value=extract_value)[ - "LegitWhitelist" - ] - - def query_map_subnet_names(self, extract_value: bool = False) -> dict[int, str]: - """ - Retrieves a mapping of subnet names within the network. - - Queries the network for the names of various subnets, - providing an overview of the different - subnets within the network. - - Returns: - A dictionary mapping network UIDs to their subnet names. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("SubnetNames", extract_value=extract_value)["SubnetNames"] - - def query_map_balances( - self, extract_value: bool = False, block_hash: str | None = None - ) -> dict[str, dict[str, int | dict[str, int | float]]]: - """ - Retrieves a mapping of account balances within the network. - - Queries the network for the balances associated with different accounts. - It provides detailed information including various types of - balances for each account. - - Returns: - A dictionary mapping account addresses to their balance details. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("Account", module="System", extract_value=extract_value, block_hash=block_hash)[ - "Account" - ] - - def query_map_registration_blocks( - self, netuid: int = 0, extract_value: bool = False - ) -> dict[int, int]: - """ - Retrieves a mapping of registration blocks for UIDs on the network. - - Queries the network to find the block numbers at which various - UIDs were registered. - - Args: - netuid: The network UID from which to get the registrations. - - Returns: - A dictionary mapping UIDs to their registration block numbers. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map( - "RegistrationBlock", [netuid], extract_value=extract_value - )["RegistrationBlock"] - - def query_map_name( - self, netuid: int = 0, extract_value: bool = False - ) -> dict[int, str]: - """ - Retrieves a mapping of names for keys on the network. - - Queries the network for the names associated with different keys. - It provides a mapping of key UIDs to their registered names. - - Args: - netuid: The network UID from which to get the names. - - Returns: - A dictionary mapping key UIDs to their names. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query_map("Name", [netuid], extract_value=extract_value)["Name"] - - #  == QUERY FUNCTIONS == # - - def get_immunity_period(self, netuid: int = 0) -> int: - """ - Queries the network for the immunity period setting. - - The immunity period is a time duration during which a module - can not be deregistered from the network. - Fetches the immunity period for a specified network subnet. - - Args: - netuid: The network UID for which to query the immunity period. - - Returns: - The immunity period setting for the specified network subnet. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query( - "ImmunityPeriod", - params=[netuid], - ) - - def get_max_set_weights_per_epoch(self): - return self.query("MaximumSetWeightCallsPerEpoch") - - def get_min_allowed_weights(self, netuid: int = 0) -> int: - """ - Queries the network for the minimum allowed weights setting. - - Retrieves the minimum weight values that are possible to set - by a validator within a specific network subnet. - - Args: - netuid: The network UID for which to query the minimum allowed - weights. - - Returns: - The minimum allowed weight values for the specified network - subnet. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query( - "MinAllowedWeights", - params=[netuid], - ) - - def get_dao_treasury_address(self) -> Ss58Address: - return self.query("DaoTreasuryAddress", module="GovernanceModule") - - def get_max_allowed_weights(self, netuid: int = 0) -> int: - """ - Queries the network for the maximum allowed weights setting. - - Retrieves the maximum weight values that are possible to set - by a validator within a specific network subnet. - - Args: - netuid: The network UID for which to query the maximum allowed - weights. - - Returns: - The maximum allowed weight values for the specified network - subnet. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query("MaxAllowedWeights", params=[netuid]) - - def get_max_allowed_uids(self, netuid: int = 0) -> int: - """ - Queries the network for the maximum allowed UIDs setting. - - Fetches the upper limit on the number of user IDs that can - be allocated or used within a specific network subnet. - - Args: - netuid: The network UID for which to query the maximum allowed UIDs. - - Returns: - The maximum number of allowed UIDs for the specified network subnet. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query("MaxAllowedUids", params=[netuid]) - - def get_name(self, netuid: int = 0) -> str: - """ - Queries the network for the name of a specific subnet. - - Args: - netuid: The network UID for which to query the name. - - Returns: - The name of the specified network subnet. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query("Name", params=[netuid]) - - def get_subnet_name(self, netuid: int = 0) -> str: - """ - Queries the network for the name of a specific subnet. - - Args: - netuid: The network UID for which to query the name. - - Returns: - The name of the specified network subnet. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query("SubnetNames", params=[netuid]) - - def get_global_dao_treasury(self): - return self.query("GlobalDaoTreasury", module="GovernanceModule") - - def get_n(self, netuid: int = 0) -> int: - """ - Queries the network for the 'N' hyperparameter, which represents how - many modules are on the network. - - Args: - netuid: The network UID for which to query the 'N' hyperparameter. - - Returns: - The value of the 'N' hyperparameter for the specified network - subnet. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query("N", params=[netuid]) - - def get_tempo(self, netuid: int = 0) -> int: - """ - Queries the network for the tempo setting, measured in blocks, for the - specified subnet. - - Args: - netuid: The network UID for which to query the tempo. - - Returns: - The tempo setting for the specified subnet. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query("Tempo", params=[netuid]) - - def get_total_stake(self, block_hash: str | None = None) -> int: - """ - Retrieves a mapping of total stakes for keys on the network. - - Queries the network for a mapping of key UIDs to their total stake amounts. - - Returns: - A dictionary mapping key UIDs to their total stake amounts. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query("TotalStake", block_hash=block_hash) - - def get_registrations_per_block(self): - """ - Queries the network for the number of registrations per block. - - Fetches the number of registrations that are processed per - block within the network. - - Returns: - The number of registrations processed per block. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query( - "RegistrationsPerBlock", - ) - - def max_registrations_per_block(self, netuid: int = 0): - """ - Queries the network for the maximum number of registrations per block. - - Retrieves the upper limit of registrations that can be processed in - each block within a specific network subnet. - - Args: - netuid: The network UID for which to query. - - Returns: - The maximum number of registrations per block for - the specified network subnet. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query( - "MaxRegistrationsPerBlock", - params=[netuid], - ) - - def get_proposal(self, proposal_id: int = 0): - """ - Queries the network for a specific proposal. - - Args: - proposal_id: The ID of the proposal to query. - - Returns: - The details of the specified proposal. - - Raises: - QueryError: If the query to the network fails, is invalid, - or if the proposal ID does not exist. - """ - - return self.query( - "Proposals", - params=[proposal_id], - ) - - def get_trust(self, netuid: int = 0): - """ - Queries the network for the trust setting of a specific network subnet. - - Retrieves the trust level or score, which may represent the - level of trustworthiness or reliability within a - particular network subnet. - - Args: - netuid: The network UID for which to query the trust setting. - - Returns: - The trust level or score for the specified network subnet. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query( - "Trust", - params=[netuid], - ) - - def get_uids(self, key: Ss58Address, netuid: int = 0) -> bool | None: - """ - Queries the network for module UIDs associated with a specific key. - - Args: - key: The key address for which to query UIDs. - netuid: The network UID within which to search for the key. - - Returns: - A list of UIDs associated with the specified key. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query( - "Uids", - params=[netuid, key], - ) - - def get_unit_emission(self) -> int: - """ - Queries the network for the unit emission setting. - - Retrieves the unit emission value, which represents the - emission rate or quantity for the $COMM token. - - Returns: - The unit emission value in nanos for the network. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query("UnitEmission", module="SubnetEmissionModule") - - def get_tx_rate_limit(self) -> int: - """ - Queries the network for the transaction rate limit. - - Retrieves the rate limit for transactions within the network, - which defines the maximum number of transactions that can be - processed within a certain timeframe. - - Returns: - The transaction rate limit for the network. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query( - "TxRateLimit", - ) - - def get_subnet_burn(self) -> int: - """Queries the network for the subnet burn value. - - Retrieves the subnet burn value from the network, which represents - the amount of tokens that are burned (permanently removed from - circulation) for subnet-related operations. - - Returns: - int: The subnet burn value. - - Raises: - QueryError: If the query to the network fails or returns invalid data. - """ - - return self.query( - "SubnetBurn", - ) - - def get_burn_rate(self) -> int: - """ - Queries the network for the burn rate setting. - - Retrieves the burn rate, which represents the rate at - which the $COMM token is permanently - removed or 'burned' from circulation. - - Returns: - The burn rate for the network. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query( - "BurnRate", - params=[], - ) - - def get_burn(self, netuid: int = 0) -> int: - """ - Queries the network for the burn setting. - - Retrieves the burn value, which represents the amount of the - $COMM token that is 'burned' or permanently removed from - circulation. - - Args: - netuid: The network UID for which to query the burn value. - - Returns: - The burn value for the specified network subnet. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query("Burn", params=[netuid]) - - def get_min_burn(self) -> int: - """ - Queries the network for the minimum burn setting. - - Retrieves the minimum burn value, indicating the lowest - amount of the $COMM tokens that can be 'burned' or - permanently removed from circulation. - - Returns: - The minimum burn value for the network. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query( - "BurnConfig", - params=[], - )["min_burn"] - - def get_min_weight_stake(self) -> int: - """ - Queries the network for the minimum weight stake setting. - - Retrieves the minimum weight stake, which represents the lowest - stake weight that is allowed for certain operations or - transactions within the network. - - Returns: - The minimum weight stake for the network. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query("MinWeightStake", params=[]) - - def get_vote_mode_global(self) -> str: - """ - Queries the network for the global vote mode setting. - - Retrieves the global vote mode, which defines the overall voting - methodology or approach used across the network in default. - - Returns: - The global vote mode setting for the network. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query( - "VoteModeGlobal", - ) - - def get_max_proposals(self) -> int: - """ - Queries the network for the maximum number of proposals allowed. - - Retrieves the upper limit on the number of proposals that can be - active or considered at any given time within the network. - - Returns: - The maximum number of proposals allowed on the network. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query( - "MaxProposals", - ) - - def get_max_registrations_per_block(self) -> int: - """ - Queries the network for the maximum number of registrations per block. - - Retrieves the maximum number of registrations that can - be processed in each block within the network. - - Returns: - The maximum number of registrations per block on the network. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query( - "MaxRegistrationsPerBlock", - params=[], - ) - - def get_max_name_length(self) -> int: - """ - Queries the network for the maximum length allowed for names. - - Retrieves the maximum character length permitted for names - within the network. Such as the module names - - Returns: - The maximum length allowed for names on the network. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query( - "MaxNameLength", - params=[], - ) - - def get_global_vote_threshold(self) -> int: - """ - Queries the network for the global vote threshold. - - Retrieves the global vote threshold, which is the critical value or - percentage required for decisions in the network's governance process. - - Returns: - The global vote threshold for the network. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query( - "GlobalVoteThreshold", - ) - - def get_max_allowed_subnets(self) -> int: - """ - Queries the network for the maximum number of allowed subnets. - - Retrieves the upper limit on the number of subnets that can - be created or operated within the network. - - Returns: - The maximum number of allowed subnets on the network. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query( - "MaxAllowedSubnets", - params=[], - ) - - def get_max_allowed_modules(self) -> int: - """ - Queries the network for the maximum number of allowed modules. - - Retrieves the upper limit on the number of modules that - can be registered within the network. - - Returns: - The maximum number of allowed modules on the network. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query( - "MaxAllowedModules", - params=[], - ) - - def get_min_stake(self, netuid: int = 0) -> int: - """ - Queries the network for the minimum stake required to register a key. - - Retrieves the minimum amount of stake necessary for - registering a key within a specific network subnet. - - Args: - netuid: The network UID for which to query the minimum stake. - - Returns: - The minimum stake required for key registration in nanos. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - return self.query("MinStake", params=[netuid]) - - def get_stakefrom( - self, - key: Ss58Address, - ) -> dict[str, int]: - """ - Retrieves the stake amounts from all stakers to a specific staked address. - - Queries the network for the stakes received by a particular staked address - from all stakers. - - Args: - key: The address of the key receiving the stakes. - - Returns: - A dictionary mapping staker addresses to their respective stake amounts. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - # Has to use query map in order to iterate through the storage prefix. - return self.query_map("StakeFrom", [key], extract_value=False).get("StakeFrom", {}) - - def get_staketo( - self, - key: Ss58Address, - ) -> dict[str, int]: - """ - Retrieves the stake amounts provided by a specific staker to all staked addresses. - - Queries the network for the stakes provided by a particular staker to - all staked addresses. - - Args: - key: The address of the key providing the stakes. - - Returns: - A dictionary mapping staked addresses to their respective received stake amounts. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - # Has to use query map in order to iterate through the storage prefix. - return self.query_map("StakeTo", [key], extract_value=False).get("StakeTo", {}) - - def get_balance( - self, - addr: Ss58Address, - ) -> int: - """ - Retrieves the balance of a specific key. - - Args: - addr: The address of the key to query the balance for. - - Returns: - The balance of the specified key. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - result = self.query("Account", module="System", params=[addr]) - - return result["data"]["free"] - - def get_block(self, block_hash: str | None = None) -> dict[Any, Any] | None: - """ - Retrieves information about a specific block in the network. - - Queries the network for details about a block, such as its number, - hash, and other relevant information. - - Returns: - The requested information about the block, - or None if the block does not exist - or the information is not available. - - Raises: - QueryError: If the query to the network fails or is invalid. - """ - - with self.get_conn() as substrate: - block: dict[Any, Any] | None = substrate.get_block( # type: ignore - block_hash # type: ignore - ) - - return block - - def get_existential_deposit(self, block_hash: str | None = None) -> int: - """ - Retrieves the existential deposit value for the network. - - The existential deposit is the minimum balance that must be maintained - in an account to prevent it from being purged. Denotated in nano units. - - Returns: - The existential deposit value in nano units. - Note: - The value returned is a fixed value defined in the - client and may not reflect changes in the network's configuration. - """ - - with self.get_conn() as substrate: - result: int = substrate.get_constant( #  type: ignore - "Balances", "ExistentialDeposit", block_hash - ).value #  type: ignore - - return result - - def get_voting_power_delegators(self) -> list[Ss58Address]: - result = self.query("NotDelegatingVotingPower", [], module="GovernanceModule") - return result - - def add_transfer_dao_treasury_proposal( - self, - key: Keypair, - data: str, - amount_nano: int, - dest: Ss58Address, - ): - params = {"dest": dest, "value": amount_nano, "data": data} - - return self.compose_call( - module="GovernanceModule", - fn="add_transfer_dao_treasury_proposal", - params=params, - key=key, - ) - - def delegate_rootnet_control(self, key: Keypair, dest: Ss58Address): - params = {"origin": key, "target": dest} - - return self.compose_call( - module="SubspaceModule", - fn="delegate_rootnet_control", - params=params, - key=key, - ) diff --git a/commune/subspace/subspace.py b/commune/subspace/subspace.py index 72bf6f78..dd6f097d 100644 --- a/commune/subspace/subspace.py +++ b/commune/subspace/subspace.py @@ -6,7 +6,6 @@ import commune as c import requests from substrateinterface import SubstrateInterface - class Subspace(c.Module): """ Handles interactions with the subspace chain. @@ -37,7 +36,7 @@ def __init__(self, **kwargs, ): self.config = self.set_config(locals()) - self.url_path = self.dirpath() + '/urls.yaml' + # merge the config with the subspace config self.config = c.dict2munch({**Subspace.config(), **self.config}) self.set_network(network ) @@ -196,7 +195,7 @@ def resolve_url(self, return url network = self.resolve_network(network) if url == None: - urls_map = getattr(self.urls(), network) + urls_map = self.urls() urls = urls_map.get(mode, []) assert len(urls) > 0, f'No urls found for network {network} and mode {mode}' if len(urls) > 1: @@ -220,8 +219,10 @@ def substrate(self): if self._substrate == None: self.set_network() return self._substrate - + def urls(self): + return c.get_yaml(self.dirpath() + '/urls.yaml').get(self.network) + @substrate.setter def substrate(self, value): self._substrate = value @@ -816,9 +817,6 @@ def global_params(self, subnet_params[k] = self.format_amount(subnet_params[k], fmt=fmt) return subnet_params - def urls(self): - return c.dict2munch(c.load_yaml(self.url_path)) - def global_state(self, max_age=None, update=False): max_age = max_age or self.config.max_age @@ -2724,8 +2722,7 @@ def subnet2netuid(self, subnet=None, update=False, **kwargs ) -> Dict[str, str return subnet2netuid name2netuid = subnet2netuid - - + def get_uid( self, key: str, netuid: int = 0, block: Optional[int] = None, update=False, **kwargs) -> int: return self.query( 'Uids', block=block, params=[ netuid, key ] , update=update, **kwargs) @@ -2811,7 +2808,6 @@ def netuid2emission(self, fmt='j', period='day', names=None, **kwargs): netuid2tempo = None emissions = self.query_vector('Emission', netuid='all', **kwargs) for netuid, netuid_emissions in emissions.items(): - if period == 'day': if netuid2tempo == None: netuid2tempo = self.query_map('Tempo', netuid='all', **kwargs) @@ -2820,7 +2816,6 @@ def netuid2emission(self, fmt='j', period='day', names=None, **kwargs): else: multiplier = 1 netuid2emission[netuid] = self.format_amount(sum(netuid_emissions), fmt=fmt) * multiplier - netuid2emission = {k: v for k,v in netuid2emission.items()} if names: netuid2emission = {self.netuid2name(netuid=k): v for k,v in netuid2emission.items()} diff --git a/commune/subspace/utils.py b/commune/subspace/utils.py deleted file mode 100644 index d0e5a39e..00000000 --- a/commune/subspace/utils.py +++ /dev/null @@ -1,121 +0,0 @@ -import random -import re -from collections import defaultdict -from enum import Enum -from typing import Mapping, TypeVar - -from pydantic_settings import BaseSettings, SettingsConfigDict - -from communex.balance import from_nano -from communex.types import Ss58Address - -IPFS_REGEX = re.compile(r"^Qm[1-9A-HJ-NP-Za-km-z]{44}$") - - -class ComxSettings(BaseSettings): - model_config = SettingsConfigDict(env_prefix="COMX_") - # TODO: improve node lists - NODE_URLS: list[str] = [ - "wss://commune-api-node-0.communeai.net", - "wss://commune-api-node-1.communeai.net", - "wss://commune-api-node-2.communeai.net", - "wss://commune-api-node-3.communeai.net", - "wss://commune-api-node-4.communeai.net", - "wss://commune-api-node-5.communeai.net", - "wss://commune-api-node-6.communeai.net", - "wss://commune-api-node-7.communeai.net", - "wss://commune-api-node-8.communeai.net", - "wss://commune-api-node-9.communeai.net", - "wss://commune-api-node-10.communeai.net", - "wss://commune-api-node-11.communeai.net", - "wss://commune-api-node-12.communeai.net", - "wss://commune-api-node-13.communeai.net", - "wss://commune-api-node-14.communeai.net", - "wss://commune-api-node-15.communeai.net", - "wss://commune-api-node-16.communeai.net", - "wss://commune-api-node-17.communeai.net", - "wss://commune-api-node-18.communeai.net", - "wss://commune-api-node-19.communeai.net", - "wss://commune-api-node-20.communeai.net", - "wss://commune-api-node-21.communeai.net", - "wss://commune-api-node-22.communeai.net", - "wss://commune-api-node-23.communeai.net", - "wss://commune-api-node-24.communeai.net", - "wss://commune-api-node-25.communeai.net", - "wss://commune-api-node-26.communeai.net", - "wss://commune-api-node-27.communeai.net", - "wss://commune-api-node-28.communeai.net", - "wss://commune-api-node-29.communeai.net", - "wss://commune-api-node-30.communeai.net", - "wss://commune-api-node-31.communeai.net", - ] - TESTNET_NODE_URLS: list[str] = ["wss://testnet-commune-api-node-0.communeai.net"] - - -def get_node_url( - comx_settings: ComxSettings | None = None, *, use_testnet: bool = False -) -> str: - comx_settings = comx_settings or ComxSettings() - match use_testnet: - case True: - node_url = random.choice(comx_settings.TESTNET_NODE_URLS) - case False: - node_url = random.choice(comx_settings.NODE_URLS) - return node_url - - -def get_available_nodes( - comx_settings: ComxSettings | None = None, *, use_testnet: bool = False -) -> list[str]: - comx_settings = comx_settings or ComxSettings() - - match use_testnet: - case True: - node_urls = comx_settings.TESTNET_NODE_URLS - case False: - node_urls = comx_settings.NODE_URLS - return node_urls - - -class BalanceUnit(str, Enum): - joule = "joule" - j = "j" - nano = "nano" - n = "n" - - -def format_balance(balance: int, unit: BalanceUnit = BalanceUnit.nano) -> str: - """ - Formats a balance. - """ - - match unit: - case BalanceUnit.nano | BalanceUnit.n: - return f"{balance}" - case BalanceUnit.joule | BalanceUnit.j: - in_joules = from_nano(balance) - round_joules = round(in_joules, 4) - return f"{round_joules:,} J" - - -K = TypeVar("K") -V = TypeVar("V") -Z = TypeVar("Z") - - -def intersection_update(base: dict[K, V], update: dict[K, Z]) -> Mapping[K, V | Z]: - """ - Update a dictionary with another dictionary, but only with keys that are already present. - """ - updated = {k: update[k] for k in base if k in update} - return updated - - -def transform_stake_dmap(stake_storage: dict[tuple[Ss58Address, Ss58Address], int]) -> dict[Ss58Address, list[tuple[Ss58Address, int]]]: - """ - Transforms either the StakeTo or StakeFrom storage into the stake legacy data type. - """ - transformed: dict[Ss58Address, list[tuple[Ss58Address, int]]] = defaultdict(list) - [transformed[k1].append((k2, v)) for (k1, k2), v in stake_storage.items()] - - return dict(transformed) diff --git a/commune/subspace/wallet.py b/commune/subspace/wallet.py deleted file mode 100644 index 7ce13a0c..00000000 --- a/commune/subspace/wallet.py +++ /dev/null @@ -1,8 +0,0 @@ -import commune as c -from typing import * -import json -import typer - -class SubspaceWallet: - - \ No newline at end of file diff --git a/commune/utils/metric.py b/commune/utils/metric.py deleted file mode 100644 index aa8b8175..00000000 --- a/commune/utils/metric.py +++ /dev/null @@ -1,120 +0,0 @@ - -from typing import Union, Dict, List, Tuple, Optional - - -def round_sig(x, sig=6, small_value=1.0e-9): - import math - """ - Rounds x to the number of {sig} digits - :param x: - :param sig: signifant digit - :param small_value: smallest possible value - :return: - """ - return round(x, sig - int(math.floor(math.log10(max(abs(x), abs(small_value))))) - 1) - - - - -class RunningMean: - def __init__(self, value=0, count=0): - self.total_value = value * count - self.count = count - - def update(self, value, count=1): - self.total_value += value * count - self.count += count - - @property - def value(self): - if self.count == 0: - return self.total_value / self.count - else: - return float("inf") - - def __str__(self): - return str(self.value) - - def to_dict(self): - return self.__dict__() - - - def from_dict( self, - d: Dict, - ): - for key, value in d.items(): - assert hasattr(self, key), f'key {key} not in {self.__class__.__name__}' - setattr(self, key, value) - return self - - - - -class MovingWindowAverage: - def __init__(self,value: Union[int, float] = None, window_size:int=100): - self.set_window( value=value, window_size=window_size - - - def set_window(self,value: Union[int, float] = None, window_size:int=100) -> List[Union[int, float]]: - assert type(value) in [int, float], f'default_value must be int or float, got {type(default_value)}' - self.window_size = window_size - self.update(value) - return self.window_values - - def update(self, *values): - ''' - Update the moving window average with a new value. - ''' - if hasattr(self, 'window_values') == False: - self.window_values = [] - - for value in values: - self.window_values += [value] - if len(self.window_values) > self.window_size: - self.window_values = self.window_values[-self.window_size:] - - self.value = sum(self.window_values) / len(self.window_values) - - def __str__(self): - return str(self.value) - - def to_dict(self): - return self.__dict__() - - def from_dict(self, d: Dict): - for key, value in d.items(): - assert hasattr(self, key), f'key {key} not in {self.__class__.__name__}' - setattr(self, key, value) - return self - - def to_json(self): - return json.dumps(self.to_dict()) - - def from_json(self, json_str:str): - state_dict = json.loads(json_str) - self.__dict__.update(state_dict) - return state_dict - - def state_dict(self): - return self.to_dict() - - @classmethod - def test(cls): - - # testing constant value - constant = 10 - self = cls(value=constant) - - for i in range(10): - self.update(10) - assert constant == self.value - - variable_value = 100 - window_size = 10 - self = cls(value=variable_value, window_size=window_size+1) - for i in range(variable_value+1): - self.update(i) - print(self.value) - assert self.value == (variable_value - window_size/2) - print(self.window_values) - \ No newline at end of file diff --git a/commune/utils/misc.py b/commune/utils/misc.py deleted file mode 100644 index 3b16cf55..00000000 --- a/commune/utils/misc.py +++ /dev/null @@ -1,997 +0,0 @@ - - -def cache(path='/tmp/cache.pkl', mode='memory'): - - def cache_fn(fn): - def wrapped_fn(*args, **kwargs): - cache_object = None - self = args[0] - - - if mode in ['local', 'local.json']: - try: - cache_object = self.client.local.get_pickle(path, handle_error=False) - except FileNotFoundError as e: - pass - elif mode in ['memory', 'main.memory']: - if not hasattr(self, '_cache'): - self._cache = {} - else: - assert isinstance(self._cache, dict) - cache_object = self._cache.get(path) - force_update = kwargs.get('force_update', False) - if not isinstance(cache_object,type(None)) or force_update: - return cache_object - - cache_object = fn(*args, **kwargs) - - # write - if mode in ['local']: - - st.write(cache_object) - self.client.local.put_pickle(data=cache_object,path= path) - elif mode in ['memory', 'main.memory']: - ''' - supports main memory caching within self._cache - ''' - self._cache[path] = cache_object - return cache_object - return wrapped_fn - return cache_fn - - - - - -""" - -Methods for Getting Abstractions -=-- - -""" - -def get_module(path,prefix = 'commune'): - ''' - gets the object - {module_path}.{object_name} - ie. - {model.block.nn.rnn}.{LSTM} - ''' - assert isinstance(prefix, str) - - if prefix != path[:len(prefix)]: - path = '.'.join([prefix, path]) - - module_path = '.'.join(path.split('.')) - - try: - module = import_module(module_path) - except (ModuleNotFoundError) as e: - if handle_failure : - return None - else: - raise e - - return module - -get_module_file = get_module - - - -from typing import * -import asyncio -from functools import partial -import random -import os -from copy import deepcopy -import concurrent - -class Misc: - -@staticmethod -def chunk(sequence:list = [0,2,3,4,5,6,6,7], - chunk_size:int=4, - num_chunks:int= None): - assert chunk_size != None or num_chunks != None, 'must specify chunk_size or num_chunks' - if chunk_size == None: - chunk_size = len(sequence) / num_chunks - if chunk_size > len(sequence): - return [sequence] - if num_chunks == None: - num_chunks = int(len(sequence) / chunk_size) - if num_chunks == 0: - num_chunks = 1 - chunks = [[] for i in range(num_chunks)] - for i, element in enumerate(sequence): - idx = i % num_chunks - chunks[idx].append(element) - return chunks - - -def batch(cls, x: list, batch_size:int=8): - return cls.chunk(x, chunk_size=batch_size) - -def cancel(self, futures): - for f in futures: - f.cancel() - return {'success': True, 'msg': 'cancelled futures'} - - - -def cachefn(cls, func, max_age=60, update=False, cache=True, cache_folder='cachefn'): - import functools - path_name = cache_folder+'/'+func.__name__ - def wrapper(*args, **kwargs): - fn_name = func.__name__ - cache_params = {'max_age': max_age, 'cache': cache} - for k, v in cache_params.items(): - cache_params[k] = kwargs.pop(k, v) - - - if not update: - result = cls.get(fn_name, **cache_params) - if result != None: - return result - - result = func(*args, **kwargs) - - if cache: - cls.put(fn_name, result, cache=cache) - return result - return wrapper - - -@staticmethod -def round(x:Union[float, int], sig: int=6, small_value: float=1.0e-9): - from commune.utils.math import round_sig - return round_sig(x, sig=sig, small_value=small_value) - - -def round_decimals(cls, x:Union[float, int], decimals: int=6, small_value: float=1.0e-9): - - import math - """ - Rounds x to the number of {sig} digits - :param x: - :param sig: signifant digit - :param small_value: smallest possible value - :return: - """ - x = float(x) - return round(x, decimals) - - - - -@staticmethod -def num_words( text): - return len(text.split(' ')) - - -def random_word(cls, *args, n=1, seperator='_', **kwargs): - import commune as c - random_words = cls.module('key').generate_mnemonic(*args, **kwargs).split(' ')[0] - random_words = random_words.split(' ')[:n] - if n == 1: - return random_words[0] - else: - return seperator.join(random_words.split(' ')[:n]) - - -def filter(cls, text_list: List[str], filter_text: str) -> List[str]: - return [text for text in text_list if filter_text in text] - - - -@staticmethod -def tqdm(*args, **kwargs): - from tqdm import tqdm - return tqdm(*args, **kwargs) - -progress = tqdm - -emojis = { - 'smile': '😊', - 'sad': '😞', - 'heart': '❤️', - 'star': '⭐', - 'fire': '🔥', - 'check': '✅', - 'cross': '❌', - 'warning': '⚠️', - 'info': 'ℹ️', - 'question': '❓', - 'exclamation': '❗', - 'plus': '➕', - 'minus': '➖', - -} - - - -def emoji(cls, name:str): - return cls.emojis.get(name, '❓') - -@staticmethod -def tqdm(*args, **kwargs): - from tqdm import tqdm - return tqdm(*args, **kwargs) -progress = tqdm - - - - - -def jload(cls, json_string): - import json - return json.loads(json_string.replace("'", '"')) - - -def partial(cls, fn, *args, **kwargs): - return partial(fn, *args, **kwargs) - - - -def sizeof(cls, obj): - import sys - sizeof = 0 - if isinstance(obj, dict): - for k,v in obj.items(): - sizeof += cls.sizeof(k) + cls.sizeof(v) - elif isinstance(obj, list): - for v in obj: - sizeof += cls.sizeof(v) - elif any([k.lower() in cls.type_str(obj).lower() for k in ['torch', 'Tensor'] ]): - - sizeof += cls.get_tensor_size(obj) - else: - sizeof += sys.getsizeof(obj) - - return sizeof - - - -def put_torch(cls, path:str, data:Dict, **kwargs): - import torch - path = cls.resolve_path(path=path, extension='pt') - torch.save(data, path) - return path - -def init_nn(self): - import torch - torch.nn.Module.__init__(self) - - - -def locals2hash(self, kwargs:dict = {'a': 1}, keys=['kwargs']) -> str: - kwargs.pop('cls', None) - kwargs.pop('self', None) - return self.dict2hash(kwargs) - - -def dict2hash(cls, d:dict) -> str: - for k in d.keys(): - assert cls.jsonable(d[k]), f'{k} is not jsonable' - return cls.hash(d) - - -def dict_put(cls, *args, **kwargs): - from commune.utils.dict import dict_put - return dict_put(*args, **kwargs) - - -def dict_get(cls, *args, **kwargs): - from commune.utils.dict import dict_get - return dict_get(*args, **kwargs) - - - -def is_address(cls, address:str) -> bool: - if not isinstance(address, str): - return False - if '://' in address: - return True - conds = [] - conds.append(len(address.split('.')) >= 3) - conds.append(isinstance(address, str)) - conds.append(':' in address) - conds.append(cls.is_int(address.split(':')[-1])) - return all(conds) - - - -def new_event_loop(cls, nest_asyncio:bool = True) -> 'asyncio.AbstractEventLoop': - import asyncio - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - if nest_asyncio: - cls.nest_asyncio() - - return loop - - -def set_event_loop(self, loop=None, new_loop:bool = False) -> 'asyncio.AbstractEventLoop': - import asyncio - try: - if new_loop: - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - else: - loop = loop if loop else asyncio.get_event_loop() - except RuntimeError as e: - self.new_event_loop() - - self.loop = loop - return self.loop - - -def get_event_loop(cls, nest_asyncio:bool = True) -> 'asyncio.AbstractEventLoop': - try: - loop = asyncio.get_event_loop() - except Exception as e: - loop = cls.new_event_loop(nest_asyncio=nest_asyncio) - return loop - - - - - -def merge(cls, from_obj= None, - to_obj = None, - include_hidden:bool=True, - allow_conflicts:bool=True, - verbose: bool = False): - - ''' - Merge the functions of a python object into the current object (a) - ''' - from_obj = from_obj or cls - to_obj = to_obj or cls - - for fn in dir(from_obj): - if fn.startswith('_') and not include_hidden: - continue - if hasattr(to_obj, fn) and not allow_conflicts: - continue - if verbose: - cls.print(f'Adding {fn}') - setattr(to_obj, fn, getattr(from_obj, fn)) - - return to_obj - - -# JUPYTER NOTEBOOKS - -def enable_jupyter(cls): - cls.nest_asyncio() - - - -jupyter = enable_jupyter - - - -def pip_list(cls, lib=None): - pip_list = cls.cmd(f'pip list', verbose=False, bash=True).split('\n') - if lib != None: - pip_list = [l for l in pip_list if l.startswith(lib)] - return pip_list - - - -def pip_libs(cls): - return list(cls.lib2version().values()) - - -def ensure_lib(cls, lib:str, verbose:bool=False): - if cls.pip_exists(lib): - return {'lib':lib, 'version':cls.version(lib), 'status':'exists'} - elif cls.pip_exists(lib) == False: - cls.pip_install(lib, verbose=verbose) - return {'lib':lib, 'version':cls.version(lib), 'status':'installed'} - -required_libs = [] - -def ensure_libs(cls, libs: List[str] = None, verbose:bool=False): - if hasattr(cls, 'libs'): - libs = cls.libs - results = [] - for lib in libs: - results.append(cls.ensure_lib(lib, verbose=verbose)) - return results - - -def install(cls, libs: List[str] = None, verbose:bool=False): - return cls.ensure_libs(libs, verbose=verbose) - - -def ensure_env(cls): - cls.ensure_libs(cls.libs) - -ensure_package = ensure_lib - - -def queue(cls, size:str=-1, *args, mode='queue', **kwargs): - if mode == 'queue': - return cls.import_object('queue.Queue')(size, *args, **kwargs) - elif mode in ['multiprocessing', 'mp', 'process']: - return cls.module('process')(size, *args, **kwargs) - elif mode == 'ray': - return cls.import_object('ray.util.queue.Queue')(size, *args, **kwargs) - elif mode == 'redis': - return cls.import_object('redis.Queue')(size, *args, **kwargs) - elif mode == 'rabbitmq': - return cls.import_object('pika.Queue')(size, *args, **kwargs) - else: - raise NotImplementedError(f'mode {mode} not implemented') - - - -def is_class(module: Any) -> bool: - return type(module).__name__ == 'type' - - - -def param_keys(cls, model:'nn.Module' = None)->List[str]: - model = cls.resolve_model(model) - return list(model.state_dict().keys()) - - -def params_map(cls, model, fmt='b'): - params_map = {} - state_dict = cls.resolve_model(model).state_dict() - for k,v in state_dict.items(): - params_map[k] = {'shape': list(v.shape) , - 'size': cls.get_tensor_size(v, fmt=fmt), - 'dtype': str(v.dtype), - 'requires_grad': v.requires_grad, - 'device': v.device, - 'numel': v.numel(), - - } - - return params_map - - - - -def get_shortcut(cls, shortcut:str) -> dict: - return cls.shortcuts().get(shortcut) - - -def rm_shortcut(cls, shortcut) -> str: - shortcuts = cls.shortcuts() - if shortcut in shortcuts: - cls.shortcuts.pop(shortcut) - cls.put_json('shortcuts', cls.shortcuts) - return shortcut - - - - -def repo_url(cls, *args, **kwargs): - return cls.module('git').repo_url(*args, **kwargs) - - - - - - -def compose(cls, *args, **kwargs): - return cls.module('docker').compose(*args, **kwargs) - - - -def ps(cls, *args, **kwargs): - return cls.get_module('docker').ps(*args, **kwargs) - - -def has_gpus(cls): - return bool(len(cls.gpus())>0) - - - -def split_gather(cls,jobs:list, n=3, **kwargs)-> list: - if len(jobs) < n: - return cls.gather(jobs, **kwargs) - gather_jobs = [asyncio.gather(*job_chunk) for job_chunk in cls.chunk(jobs, num_chunks=n)] - gather_results = cls.gather(gather_jobs, **kwargs) - results = [] - for gather_result in gather_results: - results += gather_result - return results - - -def addresses(cls, *args, **kwargs) -> List[str]: - return list(cls.namespace(*args,**kwargs).values()) - - -def address_exists(cls, address:str) -> List[str]: - addresses = cls.addresses() - return address in addresses - - - - -def task(cls, fn, timeout=1, mode='asyncio'): - - if mode == 'asyncio': - assert callable(fn) - future = asyncio.wait_for(fn, timeout=timeout) - return future - else: - raise NotImplemented - - - -def shuffle(cls, x:list)->list: - if len(x) == 0: - return x - random.shuffle(x) - return x - -@staticmethod -def detailed_error(e) -> dict: - import traceback - tb = traceback.extract_tb(e.__traceback__) - file_name = tb[-1].filename - line_no = tb[-1].lineno - line_text = tb[-1].line - response = { - 'success': False, - 'error': str(e), - 'file_name': file_name, - 'line_no': line_no, - 'line_text': line_text - } - return response - -def retry(fn, trials:int = 3, verbose:bool = True): - # if fn is a self method, then it will be a bound method, and we need to get the function - if hasattr(fn, '__self__'): - fn = fn.__func__ - def wrapper(*args, **kwargs): - for i in range(trials): - try: - print(fn) - return fn(*args, **kwargs) - except Exception as e: - if verbose: - print(detailed_error(e)) - print(f'Retrying {fn.__name__} {i+1}/{trials}') - - return wrapper - - -@staticmethod -def reverse_map(x:dict)->dict: - ''' - reverse a dictionary - ''' - return {v:k for k,v in x.items()} - - -def df(cls, x, **kwargs): - return cls.import_object('pandas.DataFrame')(x, **kwargs) - - -def torch(cls): - return cls.import_module('torch') - - -def tensor(cls, *args, **kwargs): - return cls.import_object('torch.tensor')(*args, **kwargs) - - -@staticmethod -def random_int(start_value=100, end_value=None): - if end_value == None: - end_value = start_value - start_value, end_value = 0 , start_value - - assert start_value != None, 'start_value must be provided' - assert end_value != None, 'end_value must be provided' - return random.randint(start_value, end_value) - - - -def mean(self, x:list=[0,1,2,3,4,5,6,7,8,9,10]): - if not isinstance(x, list): - x = list(x) - return sum(x) / len(x) - -def median(self, x:list=[0,1,2,3,4,5,6,7,8,9,10]): - if not isinstance(x, list): - x = list(x) - x = sorted(x) - n = len(x) - if n % 2 == 0: - return (x[n//2] + x[n//2 - 1]) / 2 - else: - return x[n//2] - - -def stdev(cls, x:list= [0,1,2,3,4,5,6,7,8,9,10], p=2): - if not isinstance(x, list): - x = list(x) - mean = cls.mean(x) - return (sum([(i - mean)**p for i in x]) / len(x))**(1/p) -std = stdev - - -def set_env(cls, key:str, value:str)-> None: - ''' - Pay attention to this function. It sets the environment variable - ''' - os.environ[key] = value - return value - - - -def pwd(cls): - pwd = os.getenv('PWD', cls.libpath) # the current wor king directory from the process starts - return pwd - - -def choice(cls, options:Union[list, dict])->list: - options = deepcopy(options) # copy to avoid changing the original - if len(options) == 0: - return None - if isinstance(options, dict): - options = list(options.values()) - assert isinstance(options, list),'options must be a list' - return random.choice(options) - - -def sample(cls, options:list, n=2): - if isinstance(options, int): - options = list(range(options)) - options = cls.shuffle(options) - return options[:n] - - -def chown(cls, path:str = None, sudo:bool =True): - path = cls.resolve_path(path) - user = cls.env('USER') - cmd = f'chown -R {user}:{user} {path}' - cls.cmd(cmd , sudo=sudo, verbose=True) - return {'success':True, 'message':f'chown cache {path}'} - - -def chown_cache(cls, sudo:bool = True): - return cls.chown(cls.cache_path, sudo=sudo) - - -def colors(cls): - return ['black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white', 'bright_black', 'bright_red', 'bright_green', 'bright_yellow', 'bright_blue', 'bright_magenta', 'bright_cyan', 'bright_white'] -colours = colors - -def random_color(cls): - return random.choice(cls.colors()) -randcolor = randcolour = colour = color = random_colour = random_color - - -def get_util(self, util:str): - return self.get_module(util) - - -def random_float(cls, min=0, max=1): - return random.uniform(min, max) - - -def random_ratio_selection(cls, x:list, ratio:float = 0.5)->list: - if type(x) in [float, int]: - x = list(range(int(x))) - assert len(x)>0 - if ratio == 1: - return x - assert ratio > 0 and ratio <= 1 - random.shuffle(x) - k = max(int(len(x) * ratio),1) - return x[:k] - - -def link_cmd(cls, old, new): - - link_cmd = cls.get('link_cmd', {}) - assert isinstance(old, str), old - assert isinstance(new, str), new - link_cmd[new] = old - - cls.put('link_cmd', link_cmd) - - - - - -def resolve_memory(cls, memory: Union[str, int, float]) -> str: - - scale_map = { - 'kb': 1e3, - 'mb': 1e6, - 'gb': 1e9, - 'b': 1, - } - if isinstance(memory, str): - scale_found = False - for scale_key, scale_value in scale_map.items(): - - - if isinstance(memory, str) and memory.lower().endswith(scale_key): - memory = int(int(memory[:-len(scale_key)].strip())*scale_value) - - - if type(memory) in [float, int]: - scale_found = True - break - - assert type(memory) in [float, int], f'memory must be a float or int, got {type(memory)}' - return memory - - - - -def filter(cls, text_list: List[str], filter_text: str) -> List[str]: - return [text for text in text_list if filter_text in text] - - - -def is_success(cls, x): - # assume that if the result is a dictionary, and it has an error key, then it is an error - if isinstance(x, dict): - if 'error' in x: - return False - if 'success' in x and x['success'] == False: - return False - - return True - - -def is_error(cls, x:Any): - """ - The function checks if the result is an error - The error is a dictionary with an error key set to True - """ - if isinstance(x, dict): - if 'error' in x and x['error'] == True: - return True - if 'success' in x and x['success'] == False: - return True - return False - - -def is_int(cls, value) -> bool: - o = False - try : - int(value) - if '.' not in str(value): - o = True - except: - pass - return o - - - -def is_float(cls, value) -> bool: - o = False - try : - float(value) - if '.' in str(value): - o = True - except: - pass - - return o - - - - -def timer(cls, *args, **kwargs): - from commune.utils.time import Timer - return Timer(*args, **kwargs) - - -def timeit(cls, fn, *args, include_result=False, **kwargs): - - t = cls.time() - if isinstance(fn, str): - fn = cls.get_fn(fn) - result = fn(*args, **kwargs) - response = { - 'latency': cls.time() - t, - 'fn': fn.__name__, - - } - if include_result: - print(response) - return result - return response - -@staticmethod -def remotewrap(fn, remote_key:str = 'remote'): - ''' - calls your function if you wrap it as such - - @c.remotewrap - def fn(): - pass - - # deploy it as a remote function - fn(remote=True) - ''' - - def remotewrap(self, *args, **kwargs): - remote = kwargs.pop(remote_key, False) - if remote: - return self.remote_fn(module=self, fn=fn.__name__, args=args, kwargs=kwargs) - else: - return fn(self, *args, **kwargs) - - return remotewrap - - -@staticmethod -def is_mnemonic(s: str) -> bool: - import re - # Match 12 or 24 words separated by spaces - return bool(re.match(r'^(\w+ ){11}\w+$', s)) or bool(re.match(r'^(\w+ ){23}\w+$', s)) - -@staticmethod -def is_private_key(s: str) -> bool: - import re - # Match a 64-character hexadecimal string - pattern = r'^[0-9a-fA-F]{64}$' - return bool(re.match(pattern, s)) - - - -@staticmethod -def address2ip(address:str) -> str: - return str('.'.join(address.split(':')[:-1])) - -@staticmethod -def as_completed( futures, timeout=10, **kwargs): - return concurrent.futures.as_completed(futures, timeout=timeout, **kwargs) - - - -def dict2munch(cls, x:dict, recursive:bool=True)-> 'Munch': - from munch import Munch - ''' - Turn dictionary into Munch - ''' - if isinstance(x, dict): - for k,v in x.items(): - if isinstance(v, dict) and recursive: - x[k] = cls.dict2munch(v) - x = Munch(x) - return x - - -def munch2dict(cls, x:'Munch', recursive:bool=True)-> dict: - from munch import Munch - ''' - Turn munch object into dictionary - ''' - if isinstance(x, Munch): - x = dict(x) - for k,v in x.items(): - if isinstance(v, Munch) and recursive: - x[k] = cls.munch2dict(v) - - return x - - - -def munch(cls, x:Dict) -> 'Munch': - ''' - Converts a dict to a munch - ''' - return cls.dict2munch(x) - - - -def time( cls, t=None) -> float: - import time - if t is not None: - return time.time() - t - else: - return time.time() - - -def datetime(cls): - import datetime - # UTC - return datetime.datetime.utcnow().strftime("%Y-%m-%d_%H:%M:%S") - - -def time2datetime(cls, t:float): - import datetime - return datetime.datetime.fromtimestamp(t).strftime("%Y-%m-%d_%H:%M:%S") - -time2date = time2datetime - - -def datetime2time(cls, x:str): - import datetime - return datetime.datetime.strptime(x, "%Y-%m-%d %H:%M:%S").timestamp() - -date2time = datetime2time - - -def delta_t(cls, t): - return t - cls.time() - -def timestamp(cls) -> float: - return int(cls.time()) - -def sleep(cls, seconds:float) -> None: - import time - time.sleep(seconds) - return None - - -def search_dict(self, d:dict = 'k,d', search:str = {'k.d': 1}) -> dict: - search = search.split(',') - new_d = {} - - for k,v in d.items(): - if search in k.lower(): - new_d[k] = v - - return new_d - - -def path2text(cls, path:str, relative=False): - - path = cls.resolve_path(path) - assert os.path.exists(path), f'path {path} does not exist' - if os.path.isdir(path): - filepath_list = cls.glob(path + '/**') - else: - assert os.path.exists(path), f'path {path} does not exist' - filepath_list = [path] - path2text = {} - for filepath in filepath_list: - try: - path2text[filepath] = cls.get_text(filepath) - except Exception as e: - pass - if relative: - pwd = cls.pwd() - path2text = {os.path.relpath(k, pwd):v for k,v in path2text.items()} - return path2text - - -def root_key(cls): - return cls.get_key() - -def root_key_address(cls) -> str: - return cls.root_key().ss58_address - -def is_root_key(cls, address:str)-> str: - return address == cls.root_key().ss58_address - -# time within the context - -def context_timer(cls, *args, **kwargs): - return cls.timer(*args, **kwargs) - -def folder_structure(cls, path:str='./', search='py', max_depth:int=5, depth:int=0)-> dict: - import glob - files = cls.glob(path + '/**') - results = [] - for file in files: - if os.path.isdir(file): - cls.folder_structure(file, search=search, max_depth=max_depth, depth=depth+1) - else: - if search in file: - results.append(file) - - return results - - - -def copy(cls, data: Any) -> Any: - import copy - return copy.deepcopy(data) diff --git a/commune/utils/schema.py b/commune/utils/schema.py new file mode 100644 index 00000000..65d5445a --- /dev/null +++ b/commune/utils/schema.py @@ -0,0 +1,17 @@ +from typing import * + +def find_lines(text:str, search:str) -> List[str]: + """ + Finds the lines in text with search + """ + found_lines = [] + lines = text.split('\n') + for line in lines: + if search in line: + found_lines += [line] + return found_lines + +def find_code_lines( search:str = None , module=None) -> List[str]: + import commune as c + code = c.module(module).code() + return find_lines(search=search, text=code) diff --git a/docs/3_cli_basics.md b/docs/3_cli_basics.md index 8f9af828..f1a11258 100644 --- a/docs/3_cli_basics.md +++ b/docs/3_cli_basics.md @@ -70,7 +70,7 @@ import commune as c class Agi(c.Module): def __init__(self, a=1, b=2): - self.set_config(kwargs=locals()) + self.set_config(locals()) def call(self, x:int = 1, y:int = 2) -> int: c.print(self.config)