Skip to content

Commit

Permalink
Merge pull request #38 from opentensor/hotfix/pre-release
Browse files Browse the repository at this point in the history
Hotfix/pre release
  • Loading branch information
steffencruz authored Jan 22, 2024
2 parents 22dbb05 + bd4e52a commit 5b57782
Show file tree
Hide file tree
Showing 6 changed files with 135 additions and 99 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/python-package.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ jobs:
run: |
python -m pip install --upgrade pip
python -m pip install flake8 pytest black
pip install -e .
pip install -e .
- name: Lint with flake8
run: |
Expand Down
4 changes: 3 additions & 1 deletion neurons/miner.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ async def priority(self, synapse: PromptingSynapse) -> float:
)
return prirority

def log_event(self, timing: float, prompt: str, completion: str, system_prompt: str):
def log_event(self, timing: float, prompt: str, completion: str, system_prompt: str, extra_info: dict):
step_log = {
"epoch_time": timing,
# "block": self.last_epoch_block,
Expand All @@ -140,8 +140,10 @@ def log_event(self, timing: float, prompt: str, completion: str, system_prompt:
"incentive": self.metagraph.I[self.uid].item(),
"consensus": self.metagraph.C[self.uid].item(),
"dividends": self.metagraph.D[self.uid].item(),
**extra_info
}

bt.logging.info('Logging event to wandb...', step_log)
wandb.log(step_log)


Expand Down
107 changes: 56 additions & 51 deletions neurons/miners/openai/miner.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@
from langchain_core.output_parsers import StrOutputParser
from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv, find_dotenv
from langchain.callbacks import get_openai_callback



class OpenAIMiner(Miner):
Expand All @@ -41,33 +43,10 @@ def add_args(cls, parser: argparse.ArgumentParser):
parser.add_argument(
"--openai.model_name",
type=str,
default="gpt-4",
default="gpt-4-1106-preview",
help="OpenAI model to use for completion.",
)

parser.add_argument(
"--wandb.on",
type=bool,
default=False,
help="Enable wandb logging.",
)

parser.add_argument(
"--wandb.entity",
type=str,
default="<<Add your wandb entity here>>",
help="Wandb entity to log to.",
)

parser.add_argument(
"--wandb.project_name",
type=str,
default="<<Add your wandb project name here>>",
help="Wandb project to log to.",
)



def __init__(self, config=None):
super().__init__(config=config)

Expand All @@ -86,6 +65,32 @@ def __init__(self, config=None):
)

self.system_prompt = "You are a friendly chatbot who always responds concisely and helpfully. You are honest about things you don't know."
self.accumulated_total_tokens = 0
self.accumulated_prompt_tokens = 0
self.accumulated_completion_tokens = 0
self.accumulated_total_cost = 0

def get_cost_logging(self, cb):
bt.logging.info(f"Total Tokens: {cb.total_tokens}")
bt.logging.info(f"Prompt Tokens: {cb.prompt_tokens}")
bt.logging.info(f"Completion Tokens: {cb.completion_tokens}")
bt.logging.info(f"Total Cost (USD): ${cb.total_cost}")

self.accumulated_total_tokens += cb.total_tokens
self.accumulated_prompt_tokens += cb.prompt_tokens
self.accumulated_completion_tokens += cb.completion_tokens
self.accumulated_total_cost += cb.total_cost

return {
'total_tokens': cb.total_tokens,
'prompt_tokens': cb.prompt_tokens,
'completion_tokens': cb.completion_tokens,
'total_cost': cb.total_cost,
'accumulated_total_tokens': self.accumulated_total_tokens,
'accumulated_prompt_tokens': self.accumulated_prompt_tokens,
'accumulated_completion_tokens': self.accumulated_completion_tokens,
'accumulated_total_cost': self.accumulated_total_cost,
}

async def forward(
self, synapse: PromptingSynapse
Expand All @@ -105,35 +110,35 @@ async def forward(
"""
# TODO(developer): Replace with actual implementation logic.
try:

t0 = time.time()
bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")

prompt = ChatPromptTemplate.from_messages([
("system", self.system_prompt),
("user", "{input}")
])
chain = prompt | self.model | StrOutputParser()

role = synapse.roles[-1]
message = synapse.messages[-1]

bt.logging.debug(f"💬 Querying openai: {prompt}")
response = chain.invoke(
{"role": role, "input": message}
)

synapse.completion = response
synapse_latency = time.time() - t0

if self.config.wandb.on:
self.log_event(
timing=synapse_latency,
prompt=message,
completion=response,
system_prompt=self.system_prompt
with get_openai_callback() as cb:
t0 = time.time()
bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")

prompt = ChatPromptTemplate.from_messages([
("system", self.system_prompt),
("user", "{input}")
])
chain = prompt | self.model | StrOutputParser()

role = synapse.roles[-1]
message = synapse.messages[-1]

bt.logging.debug(f"💬 Querying openai: {prompt}")
response = chain.invoke(
{"role": role, "input": message}
)

synapse.completion = response
synapse_latency = time.time() - t0

if self.config.wandb.on:
self.log_event(
timing=synapse_latency,
prompt=message,
completion=response,
system_prompt=self.system_prompt,
extra_info=self.get_cost_logging(cb)
)

bt.logging.debug(f"✅ Served Response: {response}")
return synapse
Expand Down
96 changes: 52 additions & 44 deletions neurons/miners/wiki_agent/miner.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
from langchain import OpenAI
from langchain.agents import Tool, initialize_agent
from agent import WikiAgent
from langchain.callbacks import get_openai_callback


class WikipediaAgentMiner(Miner):
Expand All @@ -45,31 +46,9 @@ def add_args(cls, parser: argparse.ArgumentParser):
parser.add_argument(
"--openai.model_name",
type=str,
default="gpt-4",
default="gpt-4-1106-preview",
help="OpenAI model to use for completion.",
)

parser.add_argument(
"--wandb.on",
type=bool,
default=False,
help="Enable wandb logging.",
)

parser.add_argument(
"--wandb.entity",
type=str,
default="<<Add your wandb entity here>>",
help="Wandb entity to log to.",
)

parser.add_argument(
"--wandb.project_name",
type=str,
default="<<Add your wandb project name here>>",
help="Wandb project to log to.",
)

)

def __init__(self, config=None):
super().__init__(config=config)
Expand All @@ -80,9 +59,35 @@ def __init__(self, config=None):
self.wandb_run.tags = self.wandb_run.tags + ("wikipedia_agent_miner", ) + (self.config.openai.model_name, )

_ = load_dotenv(find_dotenv())



self.agent = WikiAgent()
self.accumulated_total_tokens = 0
self.accumulated_prompt_tokens = 0
self.accumulated_completion_tokens = 0
self.accumulated_total_cost = 0


def get_cost_logging(self, cb):
bt.logging.info(f"Total Tokens: {cb.total_tokens}")
bt.logging.info(f"Prompt Tokens: {cb.prompt_tokens}")
bt.logging.info(f"Completion Tokens: {cb.completion_tokens}")
bt.logging.info(f"Total Cost (USD): ${cb.total_cost}")

self.accumulated_total_tokens += cb.total_tokens
self.accumulated_prompt_tokens += cb.prompt_tokens
self.accumulated_completion_tokens += cb.completion_tokens
self.accumulated_total_cost += cb.total_cost

return {
'total_tokens': cb.total_tokens,
'prompt_tokens': cb.prompt_tokens,
'completion_tokens': cb.completion_tokens,
'total_cost': cb.total_cost,
'accumulated_total_tokens': self.accumulated_total_tokens,
'accumulated_prompt_tokens': self.accumulated_prompt_tokens,
'accumulated_completion_tokens': self.accumulated_completion_tokens,
'accumulated_total_cost': self.accumulated_total_cost,
}


async def forward(
Expand All @@ -103,24 +108,27 @@ async def forward(
"""
# TODO(developer): Replace with actual implementation logic.
try:
t0 = time.time()
bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")

message = synapse.messages[-1]

bt.logging.debug(f"💬 Querying openai and wikipedia: {message}")

response = self.agent.run(message)

synapse.completion = response
synapse_latency = time.time() - t0

self.log_event(
timing=synapse_latency,
prompt=message,
completion=response,
system_prompt=None
)
with get_openai_callback() as cb:
t0 = time.time()
bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")

message = synapse.messages[-1]

bt.logging.debug(f"💬 Querying openai and wikipedia: {message}")

response = self.agent.run(message)

synapse.completion = response
synapse_latency = time.time() - t0

if self.config.wandb.on:
self.log_event(
timing=synapse_latency,
prompt=message,
completion=response,
system_prompt='',
extra_info=self.get_cost_logging(cb)
)

bt.logging.debug(f"✅ Served Response: {response}")
return synapse
Expand Down
23 changes: 22 additions & 1 deletion prompting/utils/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,27 @@ def add_miner_args(cls, parser):
help="Nucleus sampling parameter, top_p probability mass.",
)

parser.add_argument(
"--wandb.on",
type=bool,
default=False,
help="Enable wandb logging.",
)

parser.add_argument(
"--wandb.entity",
type=str,
default="opentensor-dev",
help="Wandb entity to log to.",
)

parser.add_argument(
"--wandb.project_name",
type=str,
default="alpha-miners",
help="Wandb project to log to.",
)

def add_validator_args(cls, parser):
"""Add validator specific arguments to the parser."""

Expand Down Expand Up @@ -221,7 +242,7 @@ def add_validator_args(cls, parser):
type=float,
nargs="+",
help="The probability of sampling each task.",
default=[0.3, 0.3, 0.1, 0.1, 0.2],
default=[0.5, 0.5, 0.0, 0.0, 0.0],
)

parser.add_argument(
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@ scipy==1.10.1
sentencepiece
wandb==0.15.10
tenacity
antlr4-python3-runtime==4.11
antlr4-python3-runtime==4.11

0 comments on commit 5b57782

Please sign in to comment.