Skip to content

Commit

Permalink
Set relevance to zero when completion is empty, and lower the logging…
Browse files Browse the repository at this point in the history
… level of save state to debug
  • Loading branch information
steffencruz committed Jan 18, 2024
1 parent 5b14712 commit a8c2c1e
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 5 deletions.
4 changes: 2 additions & 2 deletions prompting/base/neuron.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,11 +165,11 @@ def should_set_weights(self) -> bool:
) > self.config.neuron.epoch_length

def save_state(self):
bt.logging.warning(
bt.logging.debug(
"save_state() not implemented for this neuron. You can implement this function to save model checkpoints or other useful data."
)

def load_state(self):
bt.logging.warning(
bt.logging.debug(
"load_state() not implemented for this neuron. You can implement this function to load model checkpoints or other useful data."
)
10 changes: 7 additions & 3 deletions prompting/rewards/relevance.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,17 @@ def reward(
self, reference: str, completions: List[str]
) -> BatchRewardOutput:
reference_embedding = self.model.encode(reference, to_numpy=False)
completions_embeddings = self.model.encode(completions, to_numpy=False)
rewards = []
timings = []

for emb in completions_embeddings:
for comp in completions:
t0 = time.time()
rewards.append(cosine_similarity(reference_embedding.reshape(1, -1), emb.reshape(1, -1)))
score = 0
if comp:
emb = self.model.encode(completions, to_numpy=False)
score = cosine_similarity(reference_embedding.reshape(1, -1), emb.reshape(1, -1))

rewards.append(score)
timings.append(time.time() - t0)

output = BatchRewardOutput(
Expand Down

0 comments on commit a8c2c1e

Please sign in to comment.