diff --git a/Evaluator/TA/ai_evaluator/ai.py b/Evaluator/TA/ai_evaluator/ai.py index a29cab661..93ebbeede 100644 --- a/Evaluator/TA/ai_evaluator/ai.py +++ b/Evaluator/TA/ai_evaluator/ai.py @@ -55,7 +55,7 @@ def __init__(self, tentacles_setup_config): self.indicator = None self.source = None self.period = None - self.min_confidence_threshold = 0 + self.min_confidence_threshold = 100 self.gpt_model = gpt_service.GPTService.DEFAULT_MODEL self.is_backtesting = False self.min_allowed_timeframe = os.getenv("MIN_GPT_TIMEFRAME", None) diff --git a/Evaluator/TA/ai_evaluator/tests/test_ai.py b/Evaluator/TA/ai_evaluator/tests/test_ai.py index cf644546c..145573cce 100644 --- a/Evaluator/TA/ai_evaluator/tests/test_ai.py +++ b/Evaluator/TA/ai_evaluator/tests/test_ai.py @@ -37,7 +37,8 @@ def test_indicators(GPT_evaluator): def test_get_candles_data_api(GPT_evaluator): for source in GPT_evaluator.SOURCES: GPT_evaluator.source = source - assert isinstance(GPT_evaluator.get_candles_data_api(), types.FunctionType) + if GPT_evaluator.source not in GPT_evaluator.get_unformated_sources(): + assert isinstance(GPT_evaluator.get_candles_data_api(), types.FunctionType) def test_parse_prediction_side(GPT_evaluator): @@ -57,3 +58,7 @@ def test_parse_confidence(GPT_evaluator): assert GPT_evaluator._parse_confidence("up 54.33%") == 54.33 assert GPT_evaluator._parse_confidence("down 70% confidence blablabla") == 70 assert GPT_evaluator._parse_confidence("Prediction: down 70%") == 70 + GPT_evaluator.min_confidence_threshold = 60 + assert GPT_evaluator._parse_confidence("up 70%") == 100 + assert GPT_evaluator._parse_confidence("up 60%") == 100 + assert GPT_evaluator._parse_confidence("up 59%") == 59