From c0aea94833547f70bd6a22c0d819010d8388a7aa Mon Sep 17 00:00:00 2001 From: GiulioRossetti Date: Thu, 21 Nov 2024 11:17:34 +0100 Subject: [PATCH] :arrow_up: LLM parameters exposed in config.json --- config_files/config.json | 13 ++++++++----- y_client/classes/annotator.py | 4 ++-- y_client/classes/base_agent.py | 6 ++++-- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/config_files/config.json b/config_files/config.json index d8753e7..b786103 100644 --- a/config_files/config.json +++ b/config_files/config.json @@ -2,14 +2,18 @@ "servers": { "llm": "http://127.0.0.1:11434/v1", "llm_api_key": "NULL", + "llm_max_tokens": -1, + "llm_temperature": 1.5, "llm_v": "http://127.0.0.1:11434/v1", "llm_v_api_key": "NULL", + "llm_v_max_tokens": 300, + "llm_v_temperature": 0.5, "api": "http://127.0.0.1:5010/" }, "simulation": { "name": "simulation", "client": "YClientWithPages", - "days": 3, + "days": 30, "slots": 24, "starting_agents": 180, "percentage_new_agents_iteration": 0.07, @@ -42,12 +46,11 @@ }, "actions_likelihood": { "post": 0.2, - "image": 0, - "news": 0, + "image": 0.15, + "news": 0.15, "comment": 0.5, "read": 0.2, - "share": 0.0, - "reply": 0, + "share": 0.2, "search": 0.1, "cast": 0.0 } diff --git a/y_client/classes/annotator.py b/y_client/classes/annotator.py index 00106ef..2140850 100644 --- a/y_client/classes/annotator.py +++ b/y_client/classes/annotator.py @@ -22,8 +22,8 @@ def __init__(self, config): max_consecutive_auto_reply=1, llm_config={ "config_list": self.config_list, - "temperature": 0.5, - "max_tokens": 300, + "temperature": config['temperature'], + "max_tokens": config['max_tokens'], }, human_input_mode="NEVER", ) diff --git a/y_client/classes/base_agent.py b/y_client/classes/base_agent.py index 74a085d..e1550ab 100644 --- a/y_client/classes/base_agent.py +++ b/y_client/classes/base_agent.py @@ -81,6 +81,8 @@ def __init__( "url": config["servers"]["llm_v"], "api_key": config["servers"]["llm_v_api_key"], "model": config["agents"]["llm_v_agent"], + "temperature": config["servers"]["llm_v_temperature"], + "max_tokens": config["servers"]["llm_v_max_tokens"] } self.is_page = is_page @@ -157,8 +159,8 @@ def __init__( self.llm_config = { "config_list": [config_list], "seed": np.random.randint(0, 100000), - "max_tokens": -1, # max response length, -1 no limits. Imposing limits may lead to truncated responses - "temperature": 1.5, + "max_tokens": config['servers']['llm_max_tokens'], # max response length, -1 no limits. Imposing limits may lead to truncated responses + "temperature": config['servers']['llm_temperature'], } # add and configure the content recsys