diff --git a/.github/workflows/cadcad-ci.yml b/.github/workflows/cadcad-ci.yml index 5bf094b8..b6ed6ecb 100644 --- a/.github/workflows/cadcad-ci.yml +++ b/.github/workflows/cadcad-ci.yml @@ -7,6 +7,7 @@ on: branches: [ "master" ] pull_request: branches: [ "master" ] + workflow_dispatch: permissions: contents: read diff --git a/cadCAD/configuration/utils/__init__.py b/cadCAD/configuration/utils/__init__.py index 8359c1b9..f1f1d3e1 100644 --- a/cadCAD/configuration/utils/__init__.py +++ b/cadCAD/configuration/utils/__init__.py @@ -186,7 +186,7 @@ def config_sim(config_dict: ConfigurationDict): raise Exception('When sweeping, `M` list lengths should either be 1 and/or equal. More than two distinct lengths are not allowed') elif (distinct_param_value_lengths == 1) and (0 in param_values_length_set): return config_dict - elif (1 in param_values_length_set): + elif (distinct_param_value_lengths == 1) or (1 in param_values_length_set): return [{**config_dict, "M": M} for M in flatten_tabulated_dict(tabulate_dict(params))] diff --git a/cadCAD/tools/execution/easy_run.py b/cadCAD/tools/execution/easy_run.py index a449bf41..5ea737e1 100644 --- a/cadCAD/tools/execution/easy_run.py +++ b/cadCAD/tools/execution/easy_run.py @@ -2,7 +2,7 @@ import types from typing import Dict, Union -import pandas as pd # type: ignore +import pandas as pd # type: ignore from cadCAD.configuration import Experiment from cadCAD.configuration.utils import config_sim from cadCAD.engine import ExecutionContext, ExecutionMode, Executor @@ -41,14 +41,16 @@ def easy_run( assign_params: Union[bool, set] = True, drop_substeps=True, exec_mode='local', + deepcopy_off=False, ) -> pd.DataFrame: """ Run cadCAD simulations without headaches. """ # Set-up sim_config - simulation_parameters = {'N': N_samples, 'T': range(N_timesteps), 'M': params} - sim_config = config_sim(simulation_parameters) # type: ignore + simulation_parameters = {'N': N_samples, + 'T': range(N_timesteps), 'M': params} + sim_config = config_sim(simulation_parameters) # type: ignore # Create a new experiment exp = Experiment() @@ -64,7 +66,7 @@ def easy_run( _exec_mode = ExecutionMode().local_mode elif exec_mode == 'single': _exec_mode = ExecutionMode().single_mode - exec_context = ExecutionContext(_exec_mode) + exec_context = ExecutionContext(_exec_mode, additional_objs={'deepcopy_off': deepcopy_off}) executor = Executor(exec_context=exec_context, configs=configs) # Execute the cadCAD experiment @@ -91,22 +93,27 @@ def easy_run( if assign_params == True: pass else: - params_set &= assign_params # type: ignore + params_set &= assign_params # type: ignore # Logic for getting the assign params criteria if type(assign_params) is list: - selected_params = set(assign_params) & params_set # type: ignore + selected_params = set(assign_params) & params_set # type: ignore elif type(assign_params) is set: selected_params = assign_params & params_set else: selected_params = params_set + # Attribute parameters to each row* + params_dict = select_config_M_dict(configs, 0, selected_params) + + # Handles all cases of parameter types including list + for key, value in params_dict.items(): + df[key] = df.apply(lambda _: value, axis=1) - # Attribute parameters to each row - df = df.assign(**select_config_M_dict(configs, 0, selected_params)) for i, (_, n_df) in enumerate(df.groupby(['simulation', 'subset', 'run'])): - df.loc[n_df.index] = n_df.assign( - **select_config_M_dict(configs, i, selected_params) - ) + params_dict = select_config_M_dict(configs, i, selected_params) + for key, value in params_dict.items(): + df.loc[n_df.index, key] = df.loc[n_df.index].apply( + lambda _: value, axis=1) # Based on Vitor Marthendal (@marthendalnunes) snippet if use_label == True: diff --git a/setup.py b/setup.py index f62f37e8..28a85734 100644 --- a/setup.py +++ b/setup.py @@ -33,6 +33,6 @@ author_email="info@block.science", license="LICENSE.txt", packages=find_packages(), - install_requires=["pandas", "funcy", "dill", "pathos", "numpy", "pytz", "six"], + install_requires=["pandas", "funcy", "dill", "pathos", "numpy", "pytz", "six", "tqdm"], python_requires=">=3.9.0", ) diff --git a/testing/test_param_count.py b/testing/test_param_count.py new file mode 100644 index 00000000..513c07d0 --- /dev/null +++ b/testing/test_param_count.py @@ -0,0 +1,77 @@ +from cadCAD.configuration import Experiment +from cadCAD.configuration.utils import config_sim +from cadCAD.engine import Executor, ExecutionContext, ExecutionMode +import pytest + +P_no_lst = {'pA': 1, 'pB': 2, 'pC': 3} +P_single_lst = {'pA': [1], 'pB': [1], 'pC': [3]} +P_single_swp = {'pA': [4, 5, 6], 'pB': [1], 'pC': [3]} +P_all_swp = {'pA': [7, 8, 9], 'pB': [1, 2, 3], 'pC': [1, 2, 3]} +P_all_but_one_swp = {'pA': [7, 8, 9], 'pB': [1, 2, 3], 'pC': [1]} +Ps = [P_no_lst, P_single_lst, P_single_swp, P_all_swp, P_all_but_one_swp] + +CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3), (1, 3, 3, 3, 3), + (3, 1, 3, 3, 3), (1, 1, 3, 3, 3), + (3, 3, 1, 3, 3), (1, 3, 1, 3, 3), (1, 1, 1, 3, 3)] + + +def run_experiment(exp: Experiment, mode: str): + exec_context = ExecutionContext(mode) + executor = Executor(exec_context=exec_context, configs=exp.configs) + (records, tensor_field, _) = executor.execute() + return records + + +def param_count_test_suf_generator(provided_params): + def s_test_param_count(params, _2, _3, _4, _5): + assert params.keys() == provided_params.keys(), 'Params are not matching' + return ('varA', None) + return s_test_param_count + + +def param_count_test_policy_generator(provided_params): + def p_test_param_count(params, _2, _3, _4): + assert params.keys() == provided_params.keys(), 'Params are not matching' + return {'sigA': None} + return p_test_param_count + +def create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3, params={}) -> Experiment: + + INITIAL_STATE = {'varA': None} + PSUBs = [{'policies': {'sigA': param_count_test_policy_generator(params)}, 'variables': {'varA': param_count_test_suf_generator(params)}}] * N_substeps + + SIM_CONFIG = config_sim( + { + "N": N_runs, + "T": range(N_timesteps), + "M": params, # Optional + } + ) + + exp = Experiment() + for i_sim in range(N_simulations): + exp.append_model( + sim_configs=SIM_CONFIG, + initial_state=INITIAL_STATE, + partial_state_update_blocks=PSUBs + ) + return exp + + +def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps,P) -> int: + return N_simulations * N_sweeps * N_runs * (N_timesteps * N_substeps + 1) + + +@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) +@pytest.mark.parametrize("P", Ps) +def test_row_count_single(N_sim, N_sw, N_r, N_t, N_s, P): + args = (N_sim, N_sw, N_r, N_t, N_s, P) + len(run_experiment(create_experiments(*args), 'single_proc')) + + + +@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST) +@pytest.mark.parametrize("P", Ps) +def test_row_count_local(N_sim, N_sw, N_r, N_t, N_s, P): + args = (N_sim, N_sw, N_r, N_t, N_s, P) + len(run_experiment(create_experiments(*args), 'local_proc')) diff --git a/testing/tests/cadCAD_memory_address.json b/testing/tests/cadCAD_memory_address.json index c60ead7b..322a5c64 100644 --- a/testing/tests/cadCAD_memory_address.json +++ b/testing/tests/cadCAD_memory_address.json @@ -1 +1 @@ -{"memory_address": "0x111857380"} \ No newline at end of file +{"memory_address": "0x111444900"} \ No newline at end of file diff --git a/testing/tools/test_tools.py b/testing/tools/test_tools.py new file mode 100644 index 00000000..93d07fcf --- /dev/null +++ b/testing/tools/test_tools.py @@ -0,0 +1,66 @@ +from cadCAD.tools.execution import easy_run + +# def test_empty_easy_run(): +# state_variables = { +# } +# +# params = { +# } +# +# psubs = [ +# { +# 'policies': {}, +# 'variables': {}, +# }, +# ] +# +# N_timesteps = 2 +# +# N_samples = 1 +# +# results = easy_run( +# state_variables, +# params, +# psubs, +# N_timesteps, +# N_samples, +# ) +# print(results) + + + +def test_easy_run(): + state_variables = { + 'a':0.5, + } + + params = { + 'c':[1, 2], + 'd':[1, 2], + } + + def p(params, substep, state_history, previous_state): + a_delta = 1 - params['c'] * previous_state['a'] + return {'a_delta': a_delta} + + def s(params, substep, state_history, previous_state, policy_input): + return 'a', previous_state['a'] + policy_input['a_delta'] + + psubs = [ + { + 'policies': {'p': p}, + 'variables': {'a': s}, + }, + ] + + N_timesteps = 2 + + N_samples = 1 + + results = easy_run( + state_variables, + params, + psubs, + N_timesteps, + N_samples, + )