Skip to content

Commit

Permalink
[GPT] ask gpt in async_evaluation
Browse files Browse the repository at this point in the history
  • Loading branch information
GuillaumeDSM committed Oct 27, 2023
1 parent 505eae5 commit f016c10
Show file tree
Hide file tree
Showing 2 changed files with 41 additions and 40 deletions.
79 changes: 40 additions & 39 deletions Evaluator/TA/ai_evaluator/ai.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,42 +153,43 @@ async def ohlcv_callback(self, exchange: str, exchange_id: str,
await self.evaluate(cryptocurrency, symbol, time_frame, candle_data, candle)

async def evaluate(self, cryptocurrency, symbol, time_frame, candle_data, candle):
self.eval_note = commons_constants.START_PENDING_EVAL_NOTE
if self._check_timeframe(time_frame):
try:
candle_time = candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value]
computed_data = self.call_indicator(candle_data)
reduced_data = computed_data[-self.PASSED_DATA_LEN:]
formatted_data = ", ".join(str(datum).replace('[', '').replace(']', '') for datum in reduced_data)
prediction = await self.ask_gpt(self.PREPROMPT, formatted_data, symbol, time_frame, candle_time)
cleaned_prediction = prediction.strip().replace("\n", "").replace(".", "").lower()
prediction_side = self._parse_prediction_side(cleaned_prediction)
if prediction_side == 0 and not self.is_backtesting:
self.logger.error(f"Error when reading GPT answer: {cleaned_prediction}")
return
confidence = self._parse_confidence(cleaned_prediction) / 100
self.eval_note = prediction_side * confidence
except services_errors.InvalidRequestError as e:
self.logger.error(f"Invalid GPT request: {e}")
except services_errors.RateLimitError as e:
self.logger.error(f"Too many requests: {e}")
except services_errors.UnavailableInBacktestingError:
# error already logged error for backtesting in use_backtesting_init_timeout
pass
except evaluators_errors.UnavailableEvaluatorError as e:
self.logger.exception(e, True, f"Evaluation error: {e}")
except tulipy.lib.InvalidOptionError as e:
self.logger.warning(
f"Error when computing {self.indicator} on {self.period} period with {len(candle_data)} "
f"candles: {e}"
)
self.logger.exception(e, False)
else:
self.logger.debug(f"Ignored {time_frame} time frame as the shorted allowed time frame is "
f"{self.min_allowed_timeframe}")
await self.evaluation_completed(cryptocurrency, symbol, time_frame,
eval_time=evaluators_util.get_eval_time(full_candle=candle,
time_frame=time_frame))
async with self.async_evaluation():
self.eval_note = commons_constants.START_PENDING_EVAL_NOTE
if self._check_timeframe(time_frame):
try:
candle_time = candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value]
computed_data = self.call_indicator(candle_data)
reduced_data = computed_data[-self.PASSED_DATA_LEN:]
formatted_data = ", ".join(str(datum).replace('[', '').replace(']', '') for datum in reduced_data)
prediction = await self.ask_gpt(self.PREPROMPT, formatted_data, symbol, time_frame, candle_time)
cleaned_prediction = prediction.strip().replace("\n", "").replace(".", "").lower()
prediction_side = self._parse_prediction_side(cleaned_prediction)
if prediction_side == 0 and not self.is_backtesting:
self.logger.error(f"Error when reading GPT answer: {cleaned_prediction}")
return
confidence = self._parse_confidence(cleaned_prediction) / 100
self.eval_note = prediction_side * confidence
except services_errors.InvalidRequestError as e:
self.logger.error(f"Invalid GPT request: {e}")
except services_errors.RateLimitError as e:
self.logger.error(f"Too many requests: {e}")
except services_errors.UnavailableInBacktestingError:
# error already logged error for backtesting in use_backtesting_init_timeout
pass
except evaluators_errors.UnavailableEvaluatorError as e:
self.logger.exception(e, True, f"Evaluation error: {e}")
except tulipy.lib.InvalidOptionError as e:
self.logger.warning(
f"Error when computing {self.indicator} on {self.period} period with {len(candle_data)} "
f"candles: {e}"
)
self.logger.exception(e, False)
else:
self.logger.debug(f"Ignored {time_frame} time frame as the shorted allowed time frame is "
f"{self.min_allowed_timeframe}")
await self.evaluation_completed(cryptocurrency, symbol, time_frame,
eval_time=evaluators_util.get_eval_time(full_candle=candle,
time_frame=time_frame))

async def ask_gpt(self, preprompt, inputs, symbol, time_frame, candle_time) -> str:
try:
Expand All @@ -214,11 +215,11 @@ async def ask_gpt(self, preprompt, inputs, symbol, time_frame, candle_time) -> s
return resp
except services_errors.CreationError as err:
raise evaluators_errors.UnavailableEvaluatorError(f"Impossible to get ChatGPT prediction: {err}") from err
except Exception as err:
print(err)

def get_version(self):
return f"{self.gpt_model}-{self.source}-{self.indicator}-{self.period}-{self.GLOBAL_VERSION}"
# later on, identify by its specs
# return f"{self.gpt_model}-{self.source}-{self.indicator}-{self.period}-{self.GLOBAL_VERSION}"
return "0.0.0"

def call_indicator(self, candle_data):
return data_util.drop_nan(self.INDICATORS[self.indicator](candle_data, self.period))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ async def fake_backtesting(backtesting_config):
config=backtesting_config,
exchange_ids=[],
matrix_id="",
backtesting_files=[]
backtesting_files=[],
)


Expand Down

0 comments on commit f016c10

Please sign in to comment.