From 6b1c8d39424a8bac6f9306965e85ae40d0f94eab Mon Sep 17 00:00:00 2001 From: Sverre Nystad Date: Tue, 19 Sep 2023 02:03:28 +0200 Subject: [PATCH] Refactor: Make use of get_default_text_generator instead of llm --- src/referee.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/referee.py b/src/referee.py index 38f735b..ebecffc 100644 --- a/src/referee.py +++ b/src/referee.py @@ -1,8 +1,5 @@ from enum import Enum - -from langchain import OpenAI - -from src.text_generation.config import GPTConfig +from src.text_generation.text_generator import get_default_text_generator class Difficulty(Enum): """The difficulty of a action or task.""" @@ -21,14 +18,11 @@ class Difficulty(Enum): NEARLY_IMPOSSIBLE = 1.0 """A task of this difficulty is almost impossible for adventurers of an appropriate skill.""" -api_key = GPTConfig.API_KEY -llm: OpenAI = OpenAI(openai_api_key=api_key) if api_key is not None else None - def decide_difficulty(context: str) -> float: """Decide the difficulty of the challenge based on the context.""" prompt = get_difficulty_template(context) - raw_difficulty = llm.predict(prompt) + raw_difficulty = get_default_text_generator().predict(prompt) print(raw_difficulty) difficulty = float(raw_difficulty) return difficulty