-
Notifications
You must be signed in to change notification settings - Fork 47
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add more extraction code
- Loading branch information
Showing
8 changed files
with
124 additions
and
30 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,27 @@ | ||
from langchain.smith import RunEvalConfig | ||
from pydantic import BaseModel | ||
|
||
|
||
def get_eval_config(eval_llm: BaseModel) -> RunEvalConfig: | ||
"""Get the evaluation configuration for the email task.""" | ||
return RunEvalConfig( | ||
evaluators=[ | ||
RunEvalConfig.LabeledScoreString( | ||
criteria={ | ||
"accuracy": """ | ||
Score 1: The answer is incorrect and unrelated to the question or reference document. | ||
Score 3: The answer is partially correct but has more than one omission or major errors. | ||
Score 5: The answer is mostly correct but has more than one omission or major error. | ||
Score 7: The answer is mostly correct but has at most one omission or major error. | ||
Score 9: The answer is mostly correct with no omissions and only minor errors, and aligns with the reference document. | ||
Score 10: The answer is correct, complete, and aligns with the reference document. Extra information is acceptable if it is sensible. | ||
If the reference answer contains multiple alternatives, the predicted answer must only match one of the alternatives to be considered correct. | ||
If the predicted answer contains additional helpful and accurate information that is not present in the reference answer, it should still be considered correct and not be penalized. | ||
""" # noqa | ||
}, | ||
llm=eval_llm, | ||
normalize_by=10.0, | ||
), | ||
], | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,68 @@ | ||
"""Default implementations of LLMs that can be used for extraction.""" | ||
from typing import Type, Optional, List, Any, Dict | ||
|
||
from langchain.chains.openai_functions import convert_to_openai_function | ||
from langchain.chat_models import ChatOpenAI | ||
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser | ||
from langchain.schema.runnable import Runnable | ||
from langsmith.client import Client | ||
from pydantic import BaseModel | ||
|
||
from langchain_benchmarks.extraction.evaluators import get_eval_config | ||
from langchain_benchmarks.schema import ExtractionTask | ||
|
||
# PUBLIC API | ||
|
||
|
||
def create_openai_function_based_extractor( | ||
llm: Runnable, | ||
schema: Type[BaseModel], | ||
) -> Runnable[dict, dict]: | ||
"""Create an extraction chain that uses an LLM to extract a schema. | ||
The underlying functionality is exclusively for LLMs that support | ||
extraction using openai functions format. | ||
Args: | ||
llm: The LLM to use for extraction. | ||
schema: The schema to extract. | ||
Returns: | ||
An llm that will extract the schema | ||
""" | ||
openai_functions = [convert_to_openai_function(schema)] | ||
llm_kwargs = { | ||
"functions": openai_functions, | ||
"function_call": {"name": openai_functions[0]["name"]}, | ||
} | ||
output_parser = JsonOutputFunctionsParser() | ||
extraction_chain = ( | ||
llm.bind(**llm_kwargs) | output_parser | (lambda x: {"output": x}) | ||
) | ||
return extraction_chain | ||
|
||
|
||
def run_on_dataset( | ||
task: ExtractionTask, | ||
llm: Runnable, | ||
*, | ||
tags: Optional[List[str]] = None, | ||
**kwargs: Any, | ||
) -> Dict[str, Any]: | ||
"""Run an LLM on a dataset. | ||
Args: | ||
task: The task to run on. | ||
llm: The LLM to run. | ||
tags: The tags to use for the run. | ||
kwargs: Additional arguments to pass to the client. | ||
""" | ||
client = Client() | ||
eval_llm = ChatOpenAI(model="gpt-4", temperature=0.0, model_kwargs={"seed": 42}) | ||
return client.run_on_dataset( | ||
dataset_name=task.name, | ||
llm_or_chain_factory=create_openai_function_based_extractor(llm, task.schema), | ||
evaluation=get_eval_config(eval_llm), | ||
tags=tags, | ||
**kwargs, | ||
) |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,3 +1,2 @@ | ||
def test_email_extraction() -> None: | ||
"""Try to import the email task.""" | ||
from langchain_benchmarks.extraction import email_task # noqa: F401 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
def test_import_stuff() -> None: | ||
"""Test that all imports work.""" | ||
from langchain_benchmarks.extraction import evaluators, implementations # noqa: F401 |