diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 82b7a3452e968..dc4d97722699e 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,7 +1,7 @@ Thank you for contributing to LangChain! - [ ] **PR title**: "package: description" - - Where "package" is whichever of langchain, community, core, experimental, etc. is being modified. Use "docs: ..." for purely docs changes, "templates: ..." for template changes, "infra: ..." for CI changes. + - Where "package" is whichever of langchain, community, core, etc. is being modified. Use "docs: ..." for purely docs changes, "templates: ..." for template changes, "infra: ..." for CI changes. - Example: "community: add foobar LLM" diff --git a/.github/scripts/check_diff.py b/.github/scripts/check_diff.py index fc2ce26f1baf2..9ed13da9a0586 100644 --- a/.github/scripts/check_diff.py +++ b/.github/scripts/check_diff.py @@ -15,7 +15,6 @@ "libs/text-splitters", "libs/langchain", "libs/community", - "libs/experimental", ] # when set to True, we are ignoring core dependents @@ -153,14 +152,19 @@ def _get_pydantic_test_configs( core_min_pydantic_version = get_min_version_from_toml( "./libs/core/pyproject.toml", "release", python_version, include=["pydantic"] )["pydantic"] - core_min_pydantic_minor = core_min_pydantic_version.split(".")[1] if "." in core_min_pydantic_version else "0" - dir_min_pydantic_version = ( - get_min_version_from_toml( - f"./{dir_}/pyproject.toml", "release", python_version, include=["pydantic"] - ) - .get("pydantic", "0.0.0") + core_min_pydantic_minor = ( + core_min_pydantic_version.split(".")[1] + if "." in core_min_pydantic_version + else "0" + ) + dir_min_pydantic_version = get_min_version_from_toml( + f"./{dir_}/pyproject.toml", "release", python_version, include=["pydantic"] + ).get("pydantic", "0.0.0") + dir_min_pydantic_minor = ( + dir_min_pydantic_version.split(".")[1] + if "." in dir_min_pydantic_version + else "0" ) - dir_min_pydantic_minor = dir_min_pydantic_version.split(".")[1] if "." in dir_min_pydantic_version else "0" custom_mins = { # depends on pydantic-settings 2.4 which requires pydantic 2.7 diff --git a/.github/workflows/_test_doc_imports.yml b/.github/workflows/_test_doc_imports.yml index 4166b2c2d6335..58c119ead7a74 100644 --- a/.github/workflows/_test_doc_imports.yml +++ b/.github/workflows/_test_doc_imports.yml @@ -31,7 +31,7 @@ jobs: - name: Install langchain editable run: | - poetry run pip install -e libs/core libs/langchain libs/community libs/experimental + poetry run pip install -e libs/core libs/langchain libs/community - name: Check doc imports shell: bash diff --git a/docs/api_reference/conf.py b/docs/api_reference/conf.py index 0ddf1ce8cf9d8..f8c8895a2d1de 100644 --- a/docs/api_reference/conf.py +++ b/docs/api_reference/conf.py @@ -26,7 +26,6 @@ _DIR = Path(__file__).parent.absolute() sys.path.insert(0, os.path.abspath(".")) sys.path.insert(0, os.path.abspath("../../libs/langchain")) -sys.path.insert(0, os.path.abspath("../../libs/experimental")) with (_DIR.parents[1] / "libs" / "langchain" / "pyproject.toml").open("r") as f: data = toml.load(f) diff --git a/docs/docs/contributing/code/setup.mdx b/docs/docs/contributing/code/setup.mdx index 5e983d30fbecf..c5ad124f5916d 100644 --- a/docs/docs/contributing/code/setup.mdx +++ b/docs/docs/contributing/code/setup.mdx @@ -73,9 +73,9 @@ make docker_tests There are also [integration tests and code-coverage](/docs/contributing/testing/) available. -### Only develop langchain_core or langchain_experimental +### Only develop langchain_core or langchain_community -If you are only developing `langchain_core` or `langchain_experimental`, you can simply install the dependencies for the respective projects and run tests: +If you are only developing `langchain_core` or `langchain_community`, you can simply install the dependencies for the respective projects and run tests: ```bash cd libs/core @@ -86,7 +86,7 @@ make test Or: ```bash -cd libs/experimental +cd libs/community poetry install --with test make test ``` diff --git a/docs/vercel_requirements.txt b/docs/vercel_requirements.txt index 568cd2b582c37..3c3f6fe124f8c 100644 --- a/docs/vercel_requirements.txt +++ b/docs/vercel_requirements.txt @@ -1,7 +1,6 @@ -e ../libs/core -e ../libs/langchain -e ../libs/community --e ../libs/experimental -e ../libs/text-splitters langchain-cohere urllib3==1.26.19 diff --git a/libs/experimental/LICENSE b/libs/experimental/LICENSE deleted file mode 100644 index 3957738673765..0000000000000 --- a/libs/experimental/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) LangChain, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/libs/experimental/Makefile b/libs/experimental/Makefile deleted file mode 100644 index af7fa87ab9859..0000000000000 --- a/libs/experimental/Makefile +++ /dev/null @@ -1,67 +0,0 @@ -.PHONY: all format lint test tests test_watch integration_tests docker_tests help extended_tests - -# Default target executed when no arguments are given to make. -all: help - -# Define a variable for the test file path. -TEST_FILE ?= tests/unit_tests/ - -test: - poetry run pytest $(TEST_FILE) - -tests: - poetry run pytest $(TEST_FILE) - -test_watch: - poetry run ptw --now . -- tests/unit_tests - -extended_tests: - poetry run pytest --only-extended tests/unit_tests - -integration_tests: - poetry run pytest tests/integration_tests - -check_imports: $(shell find langchain_experimental -name '*.py') - poetry run python ./scripts/check_imports.py $^ - - -###################### -# LINTING AND FORMATTING -###################### - -# Define a variable for Python and notebook files. -PYTHON_FILES=. -MYPY_CACHE=.mypy_cache -lint format: PYTHON_FILES=. -lint_diff format_diff: PYTHON_FILES=$(shell git diff --relative=libs/experimental --name-only --diff-filter=d master | grep -E '\.py$$|\.ipynb$$') -lint_package: PYTHON_FILES=langchain_experimental -lint_tests: PYTHON_FILES=tests -lint_tests: MYPY_CACHE=.mypy_cache_test - -lint lint_diff lint_package lint_tests: - [ "$(PYTHON_FILES)" = "" ] || poetry run ruff check $(PYTHON_FILES) - [ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES) --diff - [ "$(PYTHON_FILES)" = "" ] || mkdir -p $(MYPY_CACHE) && poetry run mypy $(PYTHON_FILES) --cache-dir $(MYPY_CACHE) - -format format_diff: - [ "$(PYTHON_FILES)" = "" ] || poetry run ruff format $(PYTHON_FILES) - [ "$(PYTHON_FILES)" = "" ] || poetry run ruff check --select I --fix $(PYTHON_FILES) - -spell_check: - poetry run codespell --toml pyproject.toml - -spell_fix: - poetry run codespell --toml pyproject.toml -w - -###################### -# HELP -###################### - -help: - @echo '----' - @echo 'format - run code formatters' - @echo 'lint - run linters' - @echo 'test - run unit tests' - @echo 'tests - run unit tests' - @echo 'test TEST_FILE= - run all tests in file' - @echo 'test_watch - run unit tests in watch mode' diff --git a/libs/experimental/README.md b/libs/experimental/README.md index ed79068a2970f..876f54e626321 100644 --- a/libs/experimental/README.md +++ b/libs/experimental/README.md @@ -1,16 +1,3 @@ -# 🦜️🧪 LangChain Experimental +This package has moved! -This package holds experimental LangChain code, intended for research and experimental -uses. - -> [!WARNING] -> Portions of the code in this package may be dangerous if not properly deployed -> in a sandboxed environment. Please be wary of deploying experimental code -> to production unless you've taken appropriate precautions and -> have already discussed it with your security team. - -Some of the code here may be marked with security notices. However, -given the exploratory and experimental nature of the code in this package, -the lack of a security notice on a piece of code does not mean that -the code in question does not require additional security considerations -in order to be safe to use. \ No newline at end of file +https://github.com/langchain-ai/langchain-experimental/tree/main/libs/experimental diff --git a/libs/experimental/extended_testing_deps.txt b/libs/experimental/extended_testing_deps.txt deleted file mode 100644 index 06ab41ba342f1..0000000000000 --- a/libs/experimental/extended_testing_deps.txt +++ /dev/null @@ -1,8 +0,0 @@ -presidio-anonymizer>=2.2.352,<3 -presidio-analyzer>=2.2.352,<3 -faker>=19.3.1,<20 -vowpal-wabbit-next==0.7.0 -sentence-transformers>=2,<3 -jinja2>=3,<4 -pandas>=2.0.1,<3 -tabulate>=0.9.0,<1 diff --git a/libs/experimental/langchain_experimental/__init__.py b/libs/experimental/langchain_experimental/__init__.py deleted file mode 100644 index c8bb365cacd4f..0000000000000 --- a/libs/experimental/langchain_experimental/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from importlib import metadata - -try: - __version__ = metadata.version(__package__) -except metadata.PackageNotFoundError: - # Case where package metadata is not available. - __version__ = "" -del metadata # optional, avoids polluting the results of dir(__package__) diff --git a/libs/experimental/langchain_experimental/agents/__init__.py b/libs/experimental/langchain_experimental/agents/__init__.py deleted file mode 100644 index 3984834c6dd08..0000000000000 --- a/libs/experimental/langchain_experimental/agents/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -"""**Agent** is a class that uses an LLM to choose -a sequence of actions to take. - -In Chains, a sequence of actions is hardcoded. In Agents, -a language model is used as a reasoning engine to determine which actions -to take and in which order. - -Agents select and use **Tools** and **Toolkits** for actions. -""" - -from langchain_experimental.agents.agent_toolkits import ( - create_csv_agent, - create_pandas_dataframe_agent, - create_spark_dataframe_agent, - create_xorbits_agent, -) - -__all__ = [ - "create_csv_agent", - "create_pandas_dataframe_agent", - "create_spark_dataframe_agent", - "create_xorbits_agent", -] diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/__init__.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/__init__.py deleted file mode 100644 index da32189a0e600..0000000000000 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from langchain_experimental.agents.agent_toolkits.csv.base import create_csv_agent -from langchain_experimental.agents.agent_toolkits.pandas.base import ( - create_pandas_dataframe_agent, -) -from langchain_experimental.agents.agent_toolkits.python.base import create_python_agent -from langchain_experimental.agents.agent_toolkits.spark.base import ( - create_spark_dataframe_agent, -) -from langchain_experimental.agents.agent_toolkits.xorbits.base import ( - create_xorbits_agent, -) - -__all__ = [ - "create_xorbits_agent", - "create_pandas_dataframe_agent", - "create_spark_dataframe_agent", - "create_python_agent", - "create_csv_agent", -] diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/csv/__init__.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/csv/__init__.py deleted file mode 100644 index 3e3a1e069d1d9..0000000000000 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/csv/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""CSV toolkit.""" diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/csv/base.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/csv/base.py deleted file mode 100644 index 4e0c946cdcf37..0000000000000 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/csv/base.py +++ /dev/null @@ -1,66 +0,0 @@ -from __future__ import annotations - -from io import IOBase -from typing import TYPE_CHECKING, Any, List, Optional, Union - -from langchain_experimental.agents.agent_toolkits.pandas.base import ( - create_pandas_dataframe_agent, -) - -if TYPE_CHECKING: - from langchain.agents.agent import AgentExecutor - from langchain_core.language_models import LanguageModelLike - - -def create_csv_agent( - llm: LanguageModelLike, - path: Union[str, IOBase, List[Union[str, IOBase]]], - pandas_kwargs: Optional[dict] = None, - **kwargs: Any, -) -> AgentExecutor: - """Create pandas dataframe agent by loading csv to a dataframe. - - Args: - llm: Language model to use for the agent. - path: A string path, file-like object or a list of string paths/file-like - objects that can be read in as pandas DataFrames with pd.read_csv(). - pandas_kwargs: Named arguments to pass to pd.read_csv(). - kwargs: Additional kwargs to pass to langchain_experimental.agents.agent_toolkits.pandas.base.create_pandas_dataframe_agent(). - - Returns: - An AgentExecutor with the specified agent_type agent and access to - a PythonAstREPLTool with the loaded DataFrame(s) and any user-provided extra_tools. - - Example: - .. code-block:: python - - from langchain_openai import ChatOpenAI - from langchain_experimental.agents import create_csv_agent - - llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) - agent_executor = create_pandas_dataframe_agent( - llm, - "titanic.csv", - agent_type="openai-tools", - verbose=True - ) - """ # noqa: E501 - try: - import pandas as pd - except ImportError: - raise ImportError( - "pandas package not found, please install with `pip install pandas`." - ) - - _kwargs = pandas_kwargs or {} - if isinstance(path, (str, IOBase)): - df = pd.read_csv(path, **_kwargs) - elif isinstance(path, list): - df = [] - for item in path: - if not isinstance(item, (str, IOBase)): - raise ValueError(f"Expected str or file-like object, got {type(path)}") - df.append(pd.read_csv(item, **_kwargs)) - else: - raise ValueError(f"Expected str, list, or file-like object, got {type(path)}") - return create_pandas_dataframe_agent(llm, df, **kwargs) diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/__init__.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/__init__.py deleted file mode 100644 index a6dc608d470e7..0000000000000 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Pandas toolkit.""" diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py deleted file mode 100644 index 424de3ff7f18b..0000000000000 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py +++ /dev/null @@ -1,359 +0,0 @@ -"""Agent for working with pandas objects.""" - -import warnings -from typing import Any, Dict, List, Literal, Optional, Sequence, Union, cast - -from langchain.agents import ( - AgentType, - create_openai_tools_agent, - create_react_agent, - create_tool_calling_agent, -) -from langchain.agents.agent import ( - AgentExecutor, - BaseMultiActionAgent, - BaseSingleActionAgent, - RunnableAgent, - RunnableMultiActionAgent, -) -from langchain.agents.mrkl.prompt import FORMAT_INSTRUCTIONS -from langchain.agents.openai_functions_agent.base import ( - OpenAIFunctionsAgent, - create_openai_functions_agent, -) -from langchain_core.callbacks import BaseCallbackManager -from langchain_core.language_models import BaseLanguageModel, LanguageModelLike -from langchain_core.messages import SystemMessage -from langchain_core.prompts import ( - BasePromptTemplate, - ChatPromptTemplate, - PromptTemplate, -) -from langchain_core.tools import BaseTool -from langchain_core.utils.interactive_env import is_interactive_env - -from langchain_experimental.agents.agent_toolkits.pandas.prompt import ( - FUNCTIONS_WITH_DF, - FUNCTIONS_WITH_MULTI_DF, - MULTI_DF_PREFIX, - MULTI_DF_PREFIX_FUNCTIONS, - PREFIX, - PREFIX_FUNCTIONS, - SUFFIX_NO_DF, - SUFFIX_WITH_DF, - SUFFIX_WITH_MULTI_DF, -) -from langchain_experimental.tools.python.tool import PythonAstREPLTool - - -def _get_multi_prompt( - dfs: List[Any], - *, - prefix: Optional[str] = None, - suffix: Optional[str] = None, - include_df_in_prompt: Optional[bool] = True, - number_of_head_rows: int = 5, -) -> BasePromptTemplate: - if suffix is not None: - suffix_to_use = suffix - elif include_df_in_prompt: - suffix_to_use = SUFFIX_WITH_MULTI_DF - else: - suffix_to_use = SUFFIX_NO_DF - prefix = prefix if prefix is not None else MULTI_DF_PREFIX - - template = "\n\n".join([prefix, "{tools}", FORMAT_INSTRUCTIONS, suffix_to_use]) - prompt = PromptTemplate.from_template(template) - partial_prompt = prompt.partial() - if "dfs_head" in partial_prompt.input_variables: - dfs_head = "\n\n".join([d.head(number_of_head_rows).to_markdown() for d in dfs]) - partial_prompt = partial_prompt.partial(dfs_head=dfs_head) - if "num_dfs" in partial_prompt.input_variables: - partial_prompt = partial_prompt.partial(num_dfs=str(len(dfs))) - return partial_prompt - - -def _get_single_prompt( - df: Any, - *, - prefix: Optional[str] = None, - suffix: Optional[str] = None, - include_df_in_prompt: Optional[bool] = True, - number_of_head_rows: int = 5, -) -> BasePromptTemplate: - if suffix is not None: - suffix_to_use = suffix - elif include_df_in_prompt: - suffix_to_use = SUFFIX_WITH_DF - else: - suffix_to_use = SUFFIX_NO_DF - prefix = prefix if prefix is not None else PREFIX - - template = "\n\n".join([prefix, "{tools}", FORMAT_INSTRUCTIONS, suffix_to_use]) - prompt = PromptTemplate.from_template(template) - - partial_prompt = prompt.partial() - if "df_head" in partial_prompt.input_variables: - df_head = str(df.head(number_of_head_rows).to_markdown()) - partial_prompt = partial_prompt.partial(df_head=df_head) - return partial_prompt - - -def _get_prompt(df: Any, **kwargs: Any) -> BasePromptTemplate: - return ( - _get_multi_prompt(df, **kwargs) - if isinstance(df, list) - else _get_single_prompt(df, **kwargs) - ) - - -def _get_functions_single_prompt( - df: Any, - *, - prefix: Optional[str] = None, - suffix: str = "", - include_df_in_prompt: Optional[bool] = True, - number_of_head_rows: int = 5, -) -> ChatPromptTemplate: - if include_df_in_prompt: - df_head = str(df.head(number_of_head_rows).to_markdown()) - suffix = (suffix or FUNCTIONS_WITH_DF).format(df_head=df_head) - prefix = prefix if prefix is not None else PREFIX_FUNCTIONS - system_message = SystemMessage(content=prefix + suffix) - prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message) - return prompt - - -def _get_functions_multi_prompt( - dfs: Any, - *, - prefix: str = "", - suffix: str = "", - include_df_in_prompt: Optional[bool] = True, - number_of_head_rows: int = 5, -) -> ChatPromptTemplate: - if include_df_in_prompt: - dfs_head = "\n\n".join([d.head(number_of_head_rows).to_markdown() for d in dfs]) - suffix = (suffix or FUNCTIONS_WITH_MULTI_DF).format(dfs_head=dfs_head) - prefix = (prefix or MULTI_DF_PREFIX_FUNCTIONS).format(num_dfs=str(len(dfs))) - system_message = SystemMessage(content=prefix + suffix) - prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message) - return prompt - - -def _get_functions_prompt(df: Any, **kwargs: Any) -> ChatPromptTemplate: - return ( - _get_functions_multi_prompt(df, **kwargs) - if isinstance(df, list) - else _get_functions_single_prompt(df, **kwargs) - ) - - -def create_pandas_dataframe_agent( - llm: LanguageModelLike, - df: Any, - agent_type: Union[ - AgentType, Literal["openai-tools", "tool-calling"] - ] = AgentType.ZERO_SHOT_REACT_DESCRIPTION, - callback_manager: Optional[BaseCallbackManager] = None, - prefix: Optional[str] = None, - suffix: Optional[str] = None, - input_variables: Optional[List[str]] = None, - verbose: bool = False, - return_intermediate_steps: bool = False, - max_iterations: Optional[int] = 15, - max_execution_time: Optional[float] = None, - early_stopping_method: str = "force", - agent_executor_kwargs: Optional[Dict[str, Any]] = None, - include_df_in_prompt: Optional[bool] = True, - number_of_head_rows: int = 5, - extra_tools: Sequence[BaseTool] = (), - engine: Literal["pandas", "modin"] = "pandas", - allow_dangerous_code: bool = False, - **kwargs: Any, -) -> AgentExecutor: - """Construct a Pandas agent from an LLM and dataframe(s). - - Security Notice: - This agent relies on access to a python repl tool which can execute - arbitrary code. This can be dangerous and requires a specially sandboxed - environment to be safely used. Failure to run this code in a properly - sandboxed environment can lead to arbitrary code execution vulnerabilities, - which can lead to data breaches, data loss, or other security incidents. - - Do not use this code with untrusted inputs, with elevated permissions, - or without consulting your security team about proper sandboxing! - - You must opt-in to use this functionality by setting allow_dangerous_code=True. - - Args: - llm: Language model to use for the agent. If agent_type is "tool-calling" then - llm is expected to support tool calling. - df: Pandas dataframe or list of Pandas dataframes. - agent_type: One of "tool-calling", "openai-tools", "openai-functions", or - "zero-shot-react-description". Defaults to "zero-shot-react-description". - "tool-calling" is recommended over the legacy "openai-tools" and - "openai-functions" types. - callback_manager: DEPRECATED. Pass "callbacks" key into 'agent_executor_kwargs' - instead to pass constructor callbacks to AgentExecutor. - prefix: Prompt prefix string. - suffix: Prompt suffix string. - input_variables: DEPRECATED. Input variables automatically inferred from - constructed prompt. - verbose: AgentExecutor verbosity. - return_intermediate_steps: Passed to AgentExecutor init. - max_iterations: Passed to AgentExecutor init. - max_execution_time: Passed to AgentExecutor init. - early_stopping_method: Passed to AgentExecutor init. - agent_executor_kwargs: Arbitrary additional AgentExecutor args. - include_df_in_prompt: Whether to include the first number_of_head_rows in the - prompt. Must be None if suffix is not None. - number_of_head_rows: Number of initial rows to include in prompt if - include_df_in_prompt is True. - extra_tools: Additional tools to give to agent on top of a PythonAstREPLTool. - engine: One of "modin" or "pandas". Defaults to "pandas". - allow_dangerous_code: bool, default False - This agent relies on access to a python repl tool which can execute - arbitrary code. This can be dangerous and requires a specially sandboxed - environment to be safely used. - Failure to properly sandbox this class can lead to arbitrary code execution - vulnerabilities, which can lead to data breaches, data loss, or - other security incidents. - You must opt in to use this functionality by setting - allow_dangerous_code=True. - - **kwargs: DEPRECATED. Not used, kept for backwards compatibility. - - Returns: - An AgentExecutor with the specified agent_type agent and access to - a PythonAstREPLTool with the DataFrame(s) and any user-provided extra_tools. - - Example: - .. code-block:: python - - from langchain_openai import ChatOpenAI - from langchain_experimental.agents import create_pandas_dataframe_agent - import pandas as pd - - df = pd.read_csv("titanic.csv") - llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) - agent_executor = create_pandas_dataframe_agent( - llm, - df, - agent_type="tool-calling", - verbose=True - ) - - """ - if not allow_dangerous_code: - raise ValueError( - "This agent relies on access to a python repl tool which can execute " - "arbitrary code. This can be dangerous and requires a specially sandboxed " - "environment to be safely used. Please read the security notice in the " - "doc-string of this function. You must opt-in to use this functionality " - "by setting allow_dangerous_code=True." - "For general security guidelines, please see: " - "https://python.langchain.com/v0.2/docs/security/" - ) - try: - if engine == "modin": - import modin.pandas as pd - elif engine == "pandas": - import pandas as pd - else: - raise ValueError( - f"Unsupported engine {engine}. It must be one of 'modin' or 'pandas'." - ) - except ImportError as e: - raise ImportError( - f"`{engine}` package not found, please install with `pip install {engine}`" - ) from e - - if is_interactive_env(): - pd.set_option("display.max_columns", None) - - for _df in df if isinstance(df, list) else [df]: - if not isinstance(_df, pd.DataFrame): - raise ValueError(f"Expected pandas DataFrame, got {type(_df)}") - - if input_variables: - kwargs = kwargs or {} - kwargs["input_variables"] = input_variables - if kwargs: - warnings.warn( - f"Received additional kwargs {kwargs} which are no longer supported." - ) - - df_locals = {} - if isinstance(df, list): - for i, dataframe in enumerate(df): - df_locals[f"df{i + 1}"] = dataframe - else: - df_locals["df"] = df - tools = [PythonAstREPLTool(locals=df_locals)] + list(extra_tools) - - if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION: - if include_df_in_prompt is not None and suffix is not None: - raise ValueError( - "If suffix is specified, include_df_in_prompt should not be." - ) - prompt = _get_prompt( - df, - prefix=prefix, - suffix=suffix, - include_df_in_prompt=include_df_in_prompt, - number_of_head_rows=number_of_head_rows, - ) - agent: Union[BaseSingleActionAgent, BaseMultiActionAgent] = RunnableAgent( - runnable=create_react_agent(llm, tools, prompt), # type: ignore - input_keys_arg=["input"], - return_keys_arg=["output"], - ) - elif agent_type in (AgentType.OPENAI_FUNCTIONS, "openai-tools", "tool-calling"): - prompt = _get_functions_prompt( - df, - prefix=prefix, - suffix=suffix, - include_df_in_prompt=include_df_in_prompt, - number_of_head_rows=number_of_head_rows, - ) - if agent_type == AgentType.OPENAI_FUNCTIONS: - runnable = create_openai_functions_agent( - cast(BaseLanguageModel, llm), tools, prompt - ) - agent = RunnableAgent( - runnable=runnable, - input_keys_arg=["input"], - return_keys_arg=["output"], - ) - else: - if agent_type == "openai-tools": - runnable = create_openai_tools_agent( - cast(BaseLanguageModel, llm), tools, prompt - ) - else: - runnable = create_tool_calling_agent( - cast(BaseLanguageModel, llm), tools, prompt - ) - agent = RunnableMultiActionAgent( - runnable=runnable, - input_keys_arg=["input"], - return_keys_arg=["output"], - ) - else: - raise ValueError( - f"Agent type {agent_type} not supported at the moment. Must be one of " - "'tool-calling', 'openai-tools', 'openai-functions', or " - "'zero-shot-react-description'." - ) - return AgentExecutor( - agent=agent, - tools=tools, - callback_manager=callback_manager, - verbose=verbose, - return_intermediate_steps=return_intermediate_steps, - max_iterations=max_iterations, - max_execution_time=max_execution_time, - early_stopping_method=early_stopping_method, - **(agent_executor_kwargs or {}), - ) diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/prompt.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/prompt.py deleted file mode 100644 index 72b2bc8b20bc4..0000000000000 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/prompt.py +++ /dev/null @@ -1,44 +0,0 @@ -# flake8: noqa - -PREFIX = """ -You are working with a pandas dataframe in Python. The name of the dataframe is `df`. -You should use the tools below to answer the question posed of you:""" - -MULTI_DF_PREFIX = """ -You are working with {num_dfs} pandas dataframes in Python named df1, df2, etc. You -should use the tools below to answer the question posed of you:""" - -SUFFIX_NO_DF = """ -Begin! -Question: {input} -{agent_scratchpad}""" - -SUFFIX_WITH_DF = """ -This is the result of `print(df.head())`: -{df_head} - -Begin! -Question: {input} -{agent_scratchpad}""" - -SUFFIX_WITH_MULTI_DF = """ -This is the result of `print(df.head())` for each dataframe: -{dfs_head} - -Begin! -Question: {input} -{agent_scratchpad}""" - -PREFIX_FUNCTIONS = """ -You are working with a pandas dataframe in Python. The name of the dataframe is `df`.""" - -MULTI_DF_PREFIX_FUNCTIONS = """ -You are working with {num_dfs} pandas dataframes in Python named df1, df2, etc.""" - -FUNCTIONS_WITH_DF = """ -This is the result of `print(df.head())`: -{df_head}""" - -FUNCTIONS_WITH_MULTI_DF = """ -This is the result of `print(df.head())` for each dataframe: -{dfs_head}""" diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/python/__init__.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/python/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/python/base.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/python/base.py deleted file mode 100644 index 4d2cc43cb1748..0000000000000 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/python/base.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Python agent.""" - -from typing import Any, Dict, Optional - -from langchain.agents.agent import AgentExecutor, BaseSingleActionAgent -from langchain.agents.mrkl.base import ZeroShotAgent -from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent -from langchain.agents.types import AgentType -from langchain.chains.llm import LLMChain -from langchain_core.callbacks.base import BaseCallbackManager -from langchain_core.language_models import BaseLanguageModel -from langchain_core.messages import SystemMessage - -from langchain_experimental.agents.agent_toolkits.python.prompt import PREFIX -from langchain_experimental.tools.python.tool import PythonREPLTool - - -def create_python_agent( - llm: BaseLanguageModel, - tool: PythonREPLTool, - agent_type: AgentType = AgentType.ZERO_SHOT_REACT_DESCRIPTION, - callback_manager: Optional[BaseCallbackManager] = None, - verbose: bool = False, - prefix: str = PREFIX, - agent_executor_kwargs: Optional[Dict[str, Any]] = None, - **kwargs: Dict[str, Any], -) -> AgentExecutor: - """Construct a python agent from an LLM and tool.""" - tools = [tool] - agent: BaseSingleActionAgent - - if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION: - prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix) - llm_chain = LLMChain( - llm=llm, - prompt=prompt, - callback_manager=callback_manager, - ) - tool_names = [tool.name for tool in tools] - agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) # type: ignore[arg-type] - elif agent_type == AgentType.OPENAI_FUNCTIONS: - system_message = SystemMessage(content=prefix) - _prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message) - agent = OpenAIFunctionsAgent( # type: ignore[call-arg] - llm=llm, - prompt=_prompt, - tools=tools, - callback_manager=callback_manager, - **kwargs, # type: ignore[arg-type] - ) - else: - raise ValueError(f"Agent type {agent_type} not supported at the moment.") - return AgentExecutor.from_agent_and_tools( - agent=agent, - tools=tools, - callback_manager=callback_manager, - verbose=verbose, - **(agent_executor_kwargs or {}), - ) diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/python/prompt.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/python/prompt.py deleted file mode 100644 index fc97e7916eb47..0000000000000 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/python/prompt.py +++ /dev/null @@ -1,9 +0,0 @@ -# flake8: noqa - -PREFIX = """You are an agent designed to write and execute python code to answer questions. -You have access to a python REPL, which you can use to execute python code. -If you get an error, debug your code and try again. -Only use the output of your code to answer the question. -You might know the answer without running any code, but you should still run the code to get the answer. -If it does not seem like you can write code to answer the question, just return "I don't know" as the answer. -""" diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/spark/__init__.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/spark/__init__.py deleted file mode 100644 index ded6eb03a420a..0000000000000 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/spark/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""spark toolkit""" diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/spark/base.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/spark/base.py deleted file mode 100644 index d48a0b0ed6046..0000000000000 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/spark/base.py +++ /dev/null @@ -1,117 +0,0 @@ -"""Agent for working with pandas objects.""" - -from typing import Any, Dict, List, Optional - -from langchain.agents.agent import AgentExecutor -from langchain.agents.mrkl.base import ZeroShotAgent -from langchain.chains.llm import LLMChain -from langchain_core.callbacks.base import BaseCallbackManager -from langchain_core.language_models import BaseLLM - -from langchain_experimental.agents.agent_toolkits.spark.prompt import PREFIX, SUFFIX -from langchain_experimental.tools.python.tool import PythonAstREPLTool - - -def _validate_spark_df(df: Any) -> bool: - try: - from pyspark.sql import DataFrame as SparkLocalDataFrame - - return isinstance(df, SparkLocalDataFrame) - except ImportError: - return False - - -def _validate_spark_connect_df(df: Any) -> bool: - try: - from pyspark.sql.connect.dataframe import DataFrame as SparkConnectDataFrame - - return isinstance(df, SparkConnectDataFrame) - except ImportError: - return False - - -def create_spark_dataframe_agent( - llm: BaseLLM, - df: Any, - callback_manager: Optional[BaseCallbackManager] = None, - prefix: str = PREFIX, - suffix: str = SUFFIX, - input_variables: Optional[List[str]] = None, - verbose: bool = False, - return_intermediate_steps: bool = False, - max_iterations: Optional[int] = 15, - max_execution_time: Optional[float] = None, - early_stopping_method: str = "force", - agent_executor_kwargs: Optional[Dict[str, Any]] = None, - allow_dangerous_code: bool = False, - **kwargs: Any, -) -> AgentExecutor: - """Construct a Spark agent from an LLM and dataframe. - - Security Notice: - This agent relies on access to a python repl tool which can execute - arbitrary code. This can be dangerous and requires a specially sandboxed - environment to be safely used. Failure to run this code in a properly - sandboxed environment can lead to arbitrary code execution vulnerabilities, - which can lead to data breaches, data loss, or other security incidents. - - Do not use this code with untrusted inputs, with elevated permissions, - or without consulting your security team about proper sandboxing! - - You must opt in to use this functionality by setting allow_dangerous_code=True. - - Args: - allow_dangerous_code: bool, default False - This agent relies on access to a python repl tool which can execute - arbitrary code. This can be dangerous and requires a specially sandboxed - environment to be safely used. - Failure to properly sandbox this class can lead to arbitrary code execution - vulnerabilities, which can lead to data breaches, data loss, or - other security incidents. - You must opt in to use this functionality by setting - allow_dangerous_code=True. - """ - if not allow_dangerous_code: - raise ValueError( - "This agent relies on access to a python repl tool which can execute " - "arbitrary code. This can be dangerous and requires a specially sandboxed " - "environment to be safely used. Please read the security notice in the " - "doc-string of this function. You must opt-in to use this functionality " - "by setting allow_dangerous_code=True." - "For general security guidelines, please see: " - "https://python.langchain.com/v0.2/docs/security/" - ) - - if not _validate_spark_df(df) and not _validate_spark_connect_df(df): - raise ImportError("Spark is not installed. run `pip install pyspark`.") - - if input_variables is None: - input_variables = ["df", "input", "agent_scratchpad"] - tools = [PythonAstREPLTool(locals={"df": df})] - prompt = ZeroShotAgent.create_prompt( - tools, prefix=prefix, suffix=suffix, input_variables=input_variables - ) - partial_prompt = prompt.partial(df=str(df.first())) - llm_chain = LLMChain( - llm=llm, - prompt=partial_prompt, - callback_manager=callback_manager, - ) - tool_names = [tool.name for tool in tools] - agent = ZeroShotAgent( # type: ignore[call-arg] - llm_chain=llm_chain, - allowed_tools=tool_names, - callback_manager=callback_manager, - **kwargs, - ) - return AgentExecutor.from_agent_and_tools( - agent=agent, - tools=tools, - callback_manager=callback_manager, - verbose=verbose, - return_intermediate_steps=return_intermediate_steps, - max_iterations=max_iterations, - max_execution_time=max_execution_time, - early_stopping_method=early_stopping_method, - **(agent_executor_kwargs or {}), - ) diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/spark/prompt.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/spark/prompt.py deleted file mode 100644 index 32ce2c3423540..0000000000000 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/spark/prompt.py +++ /dev/null @@ -1,13 +0,0 @@ -# flake8: noqa - -PREFIX = """ -You are working with a spark dataframe in Python. The name of the dataframe is `df`. -You should use the tools below to answer the question posed of you:""" - -SUFFIX = """ -This is the result of `print(df.first())`: -{df} - -Begin! -Question: {input} -{agent_scratchpad}""" diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/xorbits/__init__.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/xorbits/__init__.py deleted file mode 100644 index 71bf7b70ea2c9..0000000000000 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/xorbits/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Xorbits toolkit.""" diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/xorbits/base.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/xorbits/base.py deleted file mode 100644 index 7dcddf368bf60..0000000000000 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/xorbits/base.py +++ /dev/null @@ -1,128 +0,0 @@ -"""Agent for working with xorbits objects.""" - -from typing import Any, Dict, List, Optional - -from langchain.agents.agent import AgentExecutor -from langchain.agents.mrkl.base import ZeroShotAgent -from langchain.chains.llm import LLMChain -from langchain_core.callbacks.base import BaseCallbackManager -from langchain_core.language_models import BaseLLM - -from langchain_experimental.agents.agent_toolkits.xorbits.prompt import ( - NP_PREFIX, - NP_SUFFIX, - PD_PREFIX, - PD_SUFFIX, -) -from langchain_experimental.tools.python.tool import PythonAstREPLTool - - -def create_xorbits_agent( - llm: BaseLLM, - data: Any, - callback_manager: Optional[BaseCallbackManager] = None, - prefix: str = "", - suffix: str = "", - input_variables: Optional[List[str]] = None, - verbose: bool = False, - return_intermediate_steps: bool = False, - max_iterations: Optional[int] = 15, - max_execution_time: Optional[float] = None, - early_stopping_method: str = "force", - agent_executor_kwargs: Optional[Dict[str, Any]] = None, - allow_dangerous_code: bool = False, - **kwargs: Dict[str, Any], -) -> AgentExecutor: - """Construct a xorbits agent from an LLM and dataframe. - - Security Notice: - This agent relies on access to a python repl tool which can execute - arbitrary code. This can be dangerous and requires a specially sandboxed - environment to be safely used. Failure to run this code in a properly - sandboxed environment can lead to arbitrary code execution vulnerabilities, - which can lead to data breaches, data loss, or other security incidents. - - Do not use this code with untrusted inputs, with elevated permissions, - or without consulting your security team about proper sandboxing! - - You must opt in to use this functionality by setting allow_dangerous_code=True. - - Args: - allow_dangerous_code: bool, default False - This agent relies on access to a python repl tool which can execute - arbitrary code. This can be dangerous and requires a specially sandboxed - environment to be safely used. - Failure to properly sandbox this class can lead to arbitrary code execution - vulnerabilities, which can lead to data breaches, data loss, or - other security incidents. - You must opt in to use this functionality by setting - allow_dangerous_code=True. - """ - if not allow_dangerous_code: - raise ValueError( - "This agent relies on access to a python repl tool which can execute " - "arbitrary code. This can be dangerous and requires a specially sandboxed " - "environment to be safely used. Please read the security notice in the " - "doc-string of this function. You must opt-in to use this functionality " - "by setting allow_dangerous_code=True." - "For general security guidelines, please see: " - "https://python.langchain.com/v0.2/docs/security/" - ) - - try: - from xorbits import numpy as np - from xorbits import pandas as pd - except ImportError: - raise ImportError( - "Xorbits package not installed, please install with `pip install xorbits`" - ) - - if not isinstance(data, (pd.DataFrame, np.ndarray)): - raise ValueError( - f"Expected Xorbits DataFrame or ndarray object, got {type(data)}" - ) - if input_variables is None: - input_variables = ["data", "input", "agent_scratchpad"] - tools = [PythonAstREPLTool(locals={"data": data})] - prompt, partial_input = None, None - - if isinstance(data, pd.DataFrame): - prompt = ZeroShotAgent.create_prompt( - tools, - prefix=PD_PREFIX if prefix == "" else prefix, - suffix=PD_SUFFIX if suffix == "" else suffix, - input_variables=input_variables, - ) - partial_input = str(data.head()) - else: - prompt = ZeroShotAgent.create_prompt( - tools, - prefix=NP_PREFIX if prefix == "" else prefix, - suffix=NP_SUFFIX if suffix == "" else suffix, - input_variables=input_variables, - ) - partial_input = str(data[: len(data) // 2]) - partial_prompt = prompt.partial(data=partial_input) - llm_chain = LLMChain( - llm=llm, - prompt=partial_prompt, - callback_manager=callback_manager, - ) - tool_names = [tool.name for tool in tools] - agent = ZeroShotAgent( # type: ignore[call-arg] - llm_chain=llm_chain, - allowed_tools=tool_names, - callback_manager=callback_manager, - **kwargs, # type: ignore[arg-type] - ) - return AgentExecutor.from_agent_and_tools( - agent=agent, - tools=tools, - callback_manager=callback_manager, - verbose=verbose, - return_intermediate_steps=return_intermediate_steps, - max_iterations=max_iterations, - max_execution_time=max_execution_time, - early_stopping_method=early_stopping_method, - **(agent_executor_kwargs or {}), - ) diff --git a/libs/experimental/langchain_experimental/agents/agent_toolkits/xorbits/prompt.py b/libs/experimental/langchain_experimental/agents/agent_toolkits/xorbits/prompt.py deleted file mode 100644 index 6db3a41a79fbe..0000000000000 --- a/libs/experimental/langchain_experimental/agents/agent_toolkits/xorbits/prompt.py +++ /dev/null @@ -1,33 +0,0 @@ -PD_PREFIX = """ -You are working with Xorbits dataframe object in Python. -Before importing Numpy or Pandas in the current script, -remember to import the xorbits version of the library instead. -To import the xorbits version of Numpy, replace the original import statement -`import pandas as pd` with `import xorbits.pandas as pd`. -The name of the input is `data`. -You should use the tools below to answer the question posed of you:""" - -PD_SUFFIX = """ -This is the result of `print(data)`: -{data} - -Begin! -Question: {input} -{agent_scratchpad}""" - -NP_PREFIX = """ -You are working with Xorbits ndarray object in Python. -Before importing Numpy in the current script, -remember to import the xorbits version of the library instead. -To import the xorbits version of Numpy, replace the original import statement -`import numpy as np` with `import xorbits.numpy as np`. -The name of the input is `data`. -You should use the tools below to answer the question posed of you:""" - -NP_SUFFIX = """ -This is the result of `print(data)`: -{data} - -Begin! -Question: {input} -{agent_scratchpad}""" diff --git a/libs/experimental/langchain_experimental/autonomous_agents/__init__.py b/libs/experimental/langchain_experimental/autonomous_agents/__init__.py deleted file mode 100644 index 371f41da4416a..0000000000000 --- a/libs/experimental/langchain_experimental/autonomous_agents/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -"""**Autonomous agents** in the Langchain experimental package include -[AutoGPT](https://github.com/Significant-Gravitas/AutoGPT), -[BabyAGI](https://github.com/yoheinakajima/babyagi), -and [HuggingGPT](https://arxiv.org/abs/2303.17580) agents that -interact with language models autonomously. - -These agents have specific functionalities like memory management, -task creation, execution chains, and response generation. - -They differ from ordinary agents by their autonomous decision-making capabilities, -memory handling, and specialized functionalities for tasks and response. -""" - -from langchain_experimental.autonomous_agents.autogpt.agent import AutoGPT -from langchain_experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI -from langchain_experimental.autonomous_agents.hugginggpt.hugginggpt import HuggingGPT - -__all__ = ["BabyAGI", "AutoGPT", "HuggingGPT"] diff --git a/libs/experimental/langchain_experimental/autonomous_agents/autogpt/__init__.py b/libs/experimental/langchain_experimental/autonomous_agents/autogpt/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/langchain_experimental/autonomous_agents/autogpt/agent.py b/libs/experimental/langchain_experimental/autonomous_agents/autogpt/agent.py deleted file mode 100644 index 6d1d53fd17e10..0000000000000 --- a/libs/experimental/langchain_experimental/autonomous_agents/autogpt/agent.py +++ /dev/null @@ -1,143 +0,0 @@ -from __future__ import annotations - -from typing import List, Optional - -from langchain.chains.llm import LLMChain -from langchain.memory import ChatMessageHistory -from langchain.schema import ( - BaseChatMessageHistory, - Document, -) -from langchain_community.tools.human.tool import HumanInputRun -from langchain_core.language_models import BaseChatModel -from langchain_core.messages import AIMessage, HumanMessage, SystemMessage -from langchain_core.tools import BaseTool -from langchain_core.vectorstores import VectorStoreRetriever -from pydantic import ValidationError - -from langchain_experimental.autonomous_agents.autogpt.output_parser import ( - AutoGPTOutputParser, - BaseAutoGPTOutputParser, -) -from langchain_experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt -from langchain_experimental.autonomous_agents.autogpt.prompt_generator import ( - FINISH_NAME, -) - - -class AutoGPT: - """Agent for interacting with AutoGPT.""" - - def __init__( - self, - ai_name: str, - memory: VectorStoreRetriever, - chain: LLMChain, - output_parser: BaseAutoGPTOutputParser, - tools: List[BaseTool], - feedback_tool: Optional[HumanInputRun] = None, - chat_history_memory: Optional[BaseChatMessageHistory] = None, - ): - self.ai_name = ai_name - self.memory = memory - self.next_action_count = 0 - self.chain = chain - self.output_parser = output_parser - self.tools = tools - self.feedback_tool = feedback_tool - self.chat_history_memory = chat_history_memory or ChatMessageHistory() - - @classmethod - def from_llm_and_tools( - cls, - ai_name: str, - ai_role: str, - memory: VectorStoreRetriever, - tools: List[BaseTool], - llm: BaseChatModel, - human_in_the_loop: bool = False, - output_parser: Optional[BaseAutoGPTOutputParser] = None, - chat_history_memory: Optional[BaseChatMessageHistory] = None, - ) -> AutoGPT: - prompt = AutoGPTPrompt( # type: ignore[call-arg, call-arg, call-arg, call-arg] - ai_name=ai_name, - ai_role=ai_role, - tools=tools, - input_variables=["memory", "messages", "goals", "user_input"], - token_counter=llm.get_num_tokens, - ) - human_feedback_tool = HumanInputRun() if human_in_the_loop else None - chain = LLMChain(llm=llm, prompt=prompt) - return cls( - ai_name, - memory, - chain, - output_parser or AutoGPTOutputParser(), - tools, - feedback_tool=human_feedback_tool, - chat_history_memory=chat_history_memory, - ) - - def run(self, goals: List[str]) -> str: - user_input = ( - "Determine which next command to use, " - "and respond using the format specified above:" - ) - # Interaction Loop - loop_count = 0 - while True: - # Discontinue if continuous limit is reached - loop_count += 1 - - # Send message to AI, get response - assistant_reply = self.chain.run( - goals=goals, - messages=self.chat_history_memory.messages, - memory=self.memory, - user_input=user_input, - ) - - # Print Assistant thoughts - print(assistant_reply) # noqa: T201 - self.chat_history_memory.add_message(HumanMessage(content=user_input)) - self.chat_history_memory.add_message(AIMessage(content=assistant_reply)) - - # Get command name and arguments - action = self.output_parser.parse(assistant_reply) - tools = {t.name: t for t in self.tools} - if action.name == FINISH_NAME: - return action.args["response"] - if action.name in tools: - tool = tools[action.name] - try: - observation = tool.run(action.args) - except ValidationError as e: - observation = ( - f"Validation Error in args: {str(e)}, args: {action.args}" - ) - except Exception as e: - observation = ( - f"Error: {str(e)}, {type(e).__name__}, args: {action.args}" - ) - result = f"Command {tool.name} returned: {observation}" - elif action.name == "ERROR": - result = f"Error: {action.args}. " - else: - result = ( - f"Unknown command '{action.name}'. " - f"Please refer to the 'COMMANDS' list for available " - f"commands and only respond in the specified JSON format." - ) - - memory_to_add = ( - f"Assistant Reply: {assistant_reply} " f"\nResult: {result} " - ) - if self.feedback_tool is not None: - feedback = f"{self.feedback_tool.run('Input: ')}" - if feedback in {"q", "stop"}: - print("EXITING") # noqa: T201 - return "EXITING" - memory_to_add += f"\n{feedback}" - - self.memory.add_documents([Document(page_content=memory_to_add)]) - self.chat_history_memory.add_message(SystemMessage(content=result)) diff --git a/libs/experimental/langchain_experimental/autonomous_agents/autogpt/memory.py b/libs/experimental/langchain_experimental/autonomous_agents/autogpt/memory.py deleted file mode 100644 index a0029acd74133..0000000000000 --- a/libs/experimental/langchain_experimental/autonomous_agents/autogpt/memory.py +++ /dev/null @@ -1,32 +0,0 @@ -from typing import Any, Dict, List - -from langchain.memory.chat_memory import BaseChatMemory -from langchain.memory.utils import get_prompt_input_key -from langchain_core.vectorstores import VectorStoreRetriever -from pydantic import Field - - -class AutoGPTMemory(BaseChatMemory): - """Memory for AutoGPT.""" - - retriever: VectorStoreRetriever = Field(exclude=True) - """VectorStoreRetriever object to connect to.""" - - @property - def memory_variables(self) -> List[str]: - return ["chat_history", "relevant_context"] - - def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: - """Get the input key for the prompt.""" - if self.input_key is None: - return get_prompt_input_key(inputs, self.memory_variables) - return self.input_key - - def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: - input_key = self._get_prompt_input_key(inputs) - query = inputs[input_key] - docs = self.retriever.invoke(query) - return { - "chat_history": self.chat_memory.messages[-10:], - "relevant_context": docs, - } diff --git a/libs/experimental/langchain_experimental/autonomous_agents/autogpt/output_parser.py b/libs/experimental/langchain_experimental/autonomous_agents/autogpt/output_parser.py deleted file mode 100644 index 774a123054867..0000000000000 --- a/libs/experimental/langchain_experimental/autonomous_agents/autogpt/output_parser.py +++ /dev/null @@ -1,66 +0,0 @@ -import json -import re -from abc import abstractmethod -from typing import Dict, NamedTuple - -from langchain_core.output_parsers import BaseOutputParser - - -class AutoGPTAction(NamedTuple): - """Action returned by AutoGPTOutputParser.""" - - name: str - args: Dict - - -class BaseAutoGPTOutputParser(BaseOutputParser): - """Base Output parser for AutoGPT.""" - - @abstractmethod - def parse(self, text: str) -> AutoGPTAction: - """Return AutoGPTAction""" - - -def preprocess_json_input(input_str: str) -> str: - """Preprocesses a string to be parsed as json. - - Replace single backslashes with double backslashes, - while leaving already escaped ones intact. - - Args: - input_str: String to be preprocessed - - Returns: - Preprocessed string - """ - corrected_str = re.sub( - r'(? AutoGPTAction: - try: - parsed = json.loads(text, strict=False) - except json.JSONDecodeError: - preprocessed_text = preprocess_json_input(text) - try: - parsed = json.loads(preprocessed_text, strict=False) - except Exception: - return AutoGPTAction( - name="ERROR", - args={"error": f"Could not parse invalid json: {text}"}, - ) - try: - return AutoGPTAction( - name=parsed["command"]["name"], - args=parsed["command"]["args"], - ) - except (KeyError, TypeError): - # If the command is null or incomplete, return an erroneous tool - return AutoGPTAction( - name="ERROR", args={"error": f"Incomplete command args: {parsed}"} - ) diff --git a/libs/experimental/langchain_experimental/autonomous_agents/autogpt/prompt.py b/libs/experimental/langchain_experimental/autonomous_agents/autogpt/prompt.py deleted file mode 100644 index faabfca929579..0000000000000 --- a/libs/experimental/langchain_experimental/autonomous_agents/autogpt/prompt.py +++ /dev/null @@ -1,106 +0,0 @@ -import time -from typing import Any, Callable, List, cast - -from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage -from langchain_core.prompts.chat import ( - BaseChatPromptTemplate, -) -from langchain_core.tools import BaseTool -from langchain_core.vectorstores import VectorStoreRetriever -from pydantic import BaseModel - -from langchain_experimental.autonomous_agents.autogpt.prompt_generator import get_prompt - - -# This class has a metaclass conflict: both `BaseChatPromptTemplate` and `BaseModel` -# define a metaclass to use, and the two metaclasses attempt to define -# the same functions but in mutually-incompatible ways. -# It isn't clear how to resolve this, and this code predates mypy -# beginning to perform that check. -# -# Mypy errors: -# ``` -# Definition of "__private_attributes__" in base class "BaseModel" is -# incompatible with definition in base class "BaseModel" [misc] -# Definition of "__repr_name__" in base class "Representation" is -# incompatible with definition in base class "BaseModel" [misc] -# Definition of "__pretty__" in base class "Representation" is -# incompatible with definition in base class "BaseModel" [misc] -# Definition of "__repr_str__" in base class "Representation" is -# incompatible with definition in base class "BaseModel" [misc] -# Definition of "__rich_repr__" in base class "Representation" is -# incompatible with definition in base class "BaseModel" [misc] -# Metaclass conflict: the metaclass of a derived class must be -# a (non-strict) subclass of the metaclasses of all its bases [misc] -# ``` -# -# TODO: look into refactoring this class in a way that avoids the mypy type errors -class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel): # type: ignore[misc] - """Prompt for AutoGPT.""" - - ai_name: str - ai_role: str - tools: List[BaseTool] - token_counter: Callable[[str], int] - send_token_limit: int = 4196 - - def construct_full_prompt(self, goals: List[str]) -> str: - prompt_start = ( - "Your decisions must always be made independently " - "without seeking user assistance.\n" - "Play to your strengths as an LLM and pursue simple " - "strategies with no legal complications.\n" - "If you have completed all your tasks, make sure to " - 'use the "finish" command.' - ) - # Construct full prompt - full_prompt = ( - f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n" - ) - for i, goal in enumerate(goals): - full_prompt += f"{i+1}. {goal}\n" - - full_prompt += f"\n\n{get_prompt(self.tools)}" - return full_prompt - - def format_messages(self, **kwargs: Any) -> List[BaseMessage]: - base_prompt = SystemMessage(content=self.construct_full_prompt(kwargs["goals"])) - time_prompt = SystemMessage( - content=f"The current time and date is {time.strftime('%c')}" - ) - used_tokens = self.token_counter( - cast(str, base_prompt.content) - ) + self.token_counter(cast(str, time_prompt.content)) - memory: VectorStoreRetriever = kwargs["memory"] - previous_messages = kwargs["messages"] - relevant_docs = memory.invoke(str(previous_messages[-10:])) - relevant_memory = [d.page_content for d in relevant_docs] - relevant_memory_tokens = sum( - [self.token_counter(doc) for doc in relevant_memory] - ) - while used_tokens + relevant_memory_tokens > 2500: - relevant_memory = relevant_memory[:-1] - relevant_memory_tokens = sum( - [self.token_counter(doc) for doc in relevant_memory] - ) - content_format = ( - f"This reminds you of these events " - f"from your past:\n{relevant_memory}\n\n" - ) - memory_message = SystemMessage(content=content_format) - used_tokens += self.token_counter(cast(str, memory_message.content)) - historical_messages: List[BaseMessage] = [] - for message in previous_messages[-10:][::-1]: - message_tokens = self.token_counter(message.content) - if used_tokens + message_tokens > self.send_token_limit - 1000: - break - historical_messages = [message] + historical_messages - used_tokens += message_tokens - input_message = HumanMessage(content=kwargs["user_input"]) - messages: List[BaseMessage] = [base_prompt, time_prompt, memory_message] - messages += historical_messages - messages.append(input_message) - return messages - - def pretty_repr(self, html: bool = False) -> str: - raise NotImplementedError diff --git a/libs/experimental/langchain_experimental/autonomous_agents/autogpt/prompt_generator.py b/libs/experimental/langchain_experimental/autonomous_agents/autogpt/prompt_generator.py deleted file mode 100644 index b4a72ce617798..0000000000000 --- a/libs/experimental/langchain_experimental/autonomous_agents/autogpt/prompt_generator.py +++ /dev/null @@ -1,186 +0,0 @@ -import json -from typing import List - -from langchain_core.tools import BaseTool - -FINISH_NAME = "finish" - - -class PromptGenerator: - """Generator of custom prompt strings. - - Does this based on constraints, commands, resources, and performance evaluations. - """ - - def __init__(self) -> None: - """Initialize the PromptGenerator object. - - Starts with empty lists of constraints, commands, resources, - and performance evaluations. - """ - self.constraints: List[str] = [] - self.commands: List[BaseTool] = [] - self.resources: List[str] = [] - self.performance_evaluation: List[str] = [] - self.response_format = { - "thoughts": { - "text": "thought", - "reasoning": "reasoning", - "plan": "- short bulleted\n- list that conveys\n- long-term plan", - "criticism": "constructive self-criticism", - "speak": "thoughts summary to say to user", - }, - "command": {"name": "command name", "args": {"arg name": "value"}}, - } - - def add_constraint(self, constraint: str) -> None: - """ - Add a constraint to the constraints list. - - Args: - constraint (str): The constraint to be added. - """ - self.constraints.append(constraint) - - def add_tool(self, tool: BaseTool) -> None: - self.commands.append(tool) - - def _generate_command_string(self, tool: BaseTool) -> str: - output = f"{tool.name}: {tool.description}" - output += f", args json schema: {json.dumps(tool.args)}" - return output - - def add_resource(self, resource: str) -> None: - """ - Add a resource to the resources list. - - Args: - resource (str): The resource to be added. - """ - self.resources.append(resource) - - def add_performance_evaluation(self, evaluation: str) -> None: - """ - Add a performance evaluation item to the performance_evaluation list. - - Args: - evaluation (str): The evaluation item to be added. - """ - self.performance_evaluation.append(evaluation) - - def _generate_numbered_list(self, items: list, item_type: str = "list") -> str: - """ - Generate a numbered list from given items based on the item_type. - - Args: - items (list): A list of items to be numbered. - item_type (str, optional): The type of items in the list. - Defaults to 'list'. - - Returns: - str: The formatted numbered list. - """ - if item_type == "command": - command_strings = [ - f"{i + 1}. {self._generate_command_string(item)}" - for i, item in enumerate(items) - ] - finish_description = ( - "use this to signal that you have finished all your objectives" - ) - finish_args = ( - '"response": "final response to let ' - 'people know you have finished your objectives"' - ) - finish_string = ( - f"{len(items) + 1}. {FINISH_NAME}: " - f"{finish_description}, args: {finish_args}" - ) - return "\n".join(command_strings + [finish_string]) - else: - return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items)) - - def generate_prompt_string(self) -> str: - """Generate a prompt string. - - Returns: - str: The generated prompt string. - """ - formatted_response_format = json.dumps(self.response_format, indent=4) - prompt_string = ( - f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n" - f"Commands:\n" - f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n" - f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n" - f"Performance Evaluation:\n" - f"{self._generate_numbered_list(self.performance_evaluation)}\n\n" - f"You should only respond in JSON format as described below " - f"\nResponse Format: \n{formatted_response_format} " - f"\nEnsure the response can be parsed by Python json.loads" - ) - - return prompt_string - - -def get_prompt(tools: List[BaseTool]) -> str: - """Generates a prompt string. - - It includes various constraints, commands, resources, and performance evaluations. - - Returns: - str: The generated prompt string. - """ - - # Initialize the PromptGenerator object - prompt_generator = PromptGenerator() - - # Add constraints to the PromptGenerator object - prompt_generator.add_constraint( - "~4000 word limit for short term memory. " - "Your short term memory is short, " - "so immediately save important information to files." - ) - prompt_generator.add_constraint( - "If you are unsure how you previously did something " - "or want to recall past events, " - "thinking about similar events will help you remember." - ) - prompt_generator.add_constraint("No user assistance") - prompt_generator.add_constraint( - 'Exclusively use the commands listed in double quotes e.g. "command name"' - ) - - # Add commands to the PromptGenerator object - for tool in tools: - prompt_generator.add_tool(tool) - - # Add resources to the PromptGenerator object - prompt_generator.add_resource( - "Internet access for searches and information gathering." - ) - prompt_generator.add_resource("Long Term memory management.") - prompt_generator.add_resource( - "GPT-3.5 powered Agents for delegation of simple tasks." - ) - prompt_generator.add_resource("File output.") - - # Add performance evaluations to the PromptGenerator object - prompt_generator.add_performance_evaluation( - "Continuously review and analyze your actions " - "to ensure you are performing to the best of your abilities." - ) - prompt_generator.add_performance_evaluation( - "Constructively self-criticize your big-picture behavior constantly." - ) - prompt_generator.add_performance_evaluation( - "Reflect on past decisions and strategies to refine your approach." - ) - prompt_generator.add_performance_evaluation( - "Every command has a cost, so be smart and efficient. " - "Aim to complete tasks in the least number of steps." - ) - - # Generate the prompt string - prompt_string = prompt_generator.generate_prompt_string() - - return prompt_string diff --git a/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/__init__.py b/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/__init__.py deleted file mode 100644 index d72e2d79dc110..0000000000000 --- a/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -from langchain_experimental.autonomous_agents.baby_agi.baby_agi import BabyAGI -from langchain_experimental.autonomous_agents.baby_agi.task_creation import ( - TaskCreationChain, -) -from langchain_experimental.autonomous_agents.baby_agi.task_execution import ( - TaskExecutionChain, -) -from langchain_experimental.autonomous_agents.baby_agi.task_prioritization import ( - TaskPrioritizationChain, -) - -__all__ = [ - "BabyAGI", - "TaskPrioritizationChain", - "TaskExecutionChain", - "TaskCreationChain", -] diff --git a/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/baby_agi.py b/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/baby_agi.py deleted file mode 100644 index d6c101ae59802..0000000000000 --- a/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/baby_agi.py +++ /dev/null @@ -1,223 +0,0 @@ -"""BabyAGI agent.""" - -from collections import deque -from typing import Any, Dict, List, Optional - -from langchain.chains.base import Chain -from langchain_core.callbacks.manager import CallbackManagerForChainRun -from langchain_core.language_models import BaseLanguageModel -from langchain_core.vectorstores import VectorStore -from pydantic import BaseModel, ConfigDict, Field - -from langchain_experimental.autonomous_agents.baby_agi.task_creation import ( - TaskCreationChain, -) -from langchain_experimental.autonomous_agents.baby_agi.task_execution import ( - TaskExecutionChain, -) -from langchain_experimental.autonomous_agents.baby_agi.task_prioritization import ( - TaskPrioritizationChain, -) - - -# This class has a metaclass conflict: both `Chain` and `BaseModel` define a metaclass -# to use, and the two metaclasses attempt to define the same functions but -# in mutually-incompatible ways. It isn't clear how to resolve this, -# and this code predates mypy beginning to perform that check. -# -# Mypy errors: -# ``` -# Definition of "__repr_str__" in base class "Representation" is -# incompatible with definition in base class "BaseModel" [misc] -# Definition of "__repr_name__" in base class "Representation" is -# incompatible with definition in base class "BaseModel" [misc] -# Definition of "__rich_repr__" in base class "Representation" is -# incompatible with definition in base class "BaseModel" [misc] -# Definition of "__pretty__" in base class "Representation" is -# incompatible with definition in base class "BaseModel" [misc] -# Metaclass conflict: the metaclass of a derived class must be -# a (non-strict) subclass of the metaclasses of all its bases [misc] -# ``` -# -# TODO: look into refactoring this class in a way that avoids the mypy type errors -class BabyAGI(Chain, BaseModel): # type: ignore[misc] - """Controller model for the BabyAGI agent.""" - - task_list: deque = Field(default_factory=deque) - task_creation_chain: Chain = Field(...) - task_prioritization_chain: Chain = Field(...) - execution_chain: Chain = Field(...) - task_id_counter: int = Field(1) - vectorstore: VectorStore = Field(init=False) - max_iterations: Optional[int] = None - - model_config = ConfigDict( - arbitrary_types_allowed=True, - ) - - def add_task(self, task: Dict) -> None: - self.task_list.append(task) - - def print_task_list(self) -> None: - print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m") # noqa: T201 - for t in self.task_list: - print(str(t["task_id"]) + ": " + t["task_name"]) # noqa: T201 - - def print_next_task(self, task: Dict) -> None: - print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m") # noqa: T201 - print(str(task["task_id"]) + ": " + task["task_name"]) # noqa: T201 - - def print_task_result(self, result: str) -> None: - print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m") # noqa: T201 - print(result) # noqa: T201 - - @property - def input_keys(self) -> List[str]: - return ["objective"] - - @property - def output_keys(self) -> List[str]: - return [] - - def get_next_task( - self, result: str, task_description: str, objective: str, **kwargs: Any - ) -> List[Dict]: - """Get the next task.""" - task_names = [t["task_name"] for t in self.task_list] - - incomplete_tasks = ", ".join(task_names) - response = self.task_creation_chain.run( - result=result, - task_description=task_description, - incomplete_tasks=incomplete_tasks, - objective=objective, - **kwargs, - ) - new_tasks = response.split("\n") - return [ - {"task_name": task_name} for task_name in new_tasks if task_name.strip() - ] - - def prioritize_tasks( - self, this_task_id: int, objective: str, **kwargs: Any - ) -> List[Dict]: - """Prioritize tasks.""" - task_names = [t["task_name"] for t in list(self.task_list)] - next_task_id = int(this_task_id) + 1 - response = self.task_prioritization_chain.run( - task_names=", ".join(task_names), - next_task_id=str(next_task_id), - objective=objective, - **kwargs, - ) - new_tasks = response.split("\n") - prioritized_task_list = [] - for task_string in new_tasks: - if not task_string.strip(): - continue - task_parts = task_string.strip().split(".", 1) - if len(task_parts) == 2: - task_id = task_parts[0].strip() - task_name = task_parts[1].strip() - prioritized_task_list.append( - {"task_id": task_id, "task_name": task_name} - ) - return prioritized_task_list - - def _get_top_tasks(self, query: str, k: int) -> List[str]: - """Get the top k tasks based on the query.""" - results = self.vectorstore.similarity_search(query, k=k) - if not results: - return [] - return [str(item.metadata["task"]) for item in results] - - def execute_task(self, objective: str, task: str, k: int = 5, **kwargs: Any) -> str: - """Execute a task.""" - context = self._get_top_tasks(query=objective, k=k) - return self.execution_chain.run( - objective=objective, context="\n".join(context), task=task, **kwargs - ) - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, Any]: - """Run the agent.""" - _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() - objective = inputs["objective"] - first_task = inputs.get("first_task", "Make a todo list") - self.add_task({"task_id": 1, "task_name": first_task}) - num_iters = 0 - while True: - if self.task_list: - self.print_task_list() - - # Step 1: Pull the first task - task = self.task_list.popleft() - self.print_next_task(task) - - # Step 2: Execute the task - result = self.execute_task( - objective, task["task_name"], callbacks=_run_manager.get_child() - ) - this_task_id = int(task["task_id"]) - self.print_task_result(result) - - # Step 3: Store the result in Pinecone - result_id = f"result_{task['task_id']}_{num_iters}" - self.vectorstore.add_texts( - texts=[result], - metadatas=[{"task": task["task_name"]}], - ids=[result_id], - ) - - # Step 4: Create new tasks and reprioritize task list - new_tasks = self.get_next_task( - result, - task["task_name"], - objective, - callbacks=_run_manager.get_child(), - ) - for new_task in new_tasks: - self.task_id_counter += 1 - new_task.update({"task_id": self.task_id_counter}) - self.add_task(new_task) - self.task_list = deque( - self.prioritize_tasks( - this_task_id, objective, callbacks=_run_manager.get_child() - ) - ) - num_iters += 1 - if self.max_iterations is not None and num_iters == self.max_iterations: - print( # noqa: T201 - "\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m" - ) - break - return {} - - @classmethod - def from_llm( - cls, - llm: BaseLanguageModel, - vectorstore: VectorStore, - verbose: bool = False, - task_execution_chain: Optional[Chain] = None, - **kwargs: Any, - ) -> "BabyAGI": - """Initialize the BabyAGI Controller.""" - task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose) - task_prioritization_chain = TaskPrioritizationChain.from_llm( - llm, verbose=verbose - ) - if task_execution_chain is None: - execution_chain: Chain = TaskExecutionChain.from_llm(llm, verbose=verbose) - else: - execution_chain = task_execution_chain - return cls( # type: ignore[call-arg, call-arg, call-arg, call-arg] - task_creation_chain=task_creation_chain, - task_prioritization_chain=task_prioritization_chain, - execution_chain=execution_chain, - vectorstore=vectorstore, - **kwargs, - ) diff --git a/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/task_creation.py b/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/task_creation.py deleted file mode 100644 index e99ab08f069cb..0000000000000 --- a/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/task_creation.py +++ /dev/null @@ -1,31 +0,0 @@ -from langchain.chains import LLMChain -from langchain_core.language_models import BaseLanguageModel -from langchain_core.prompts import PromptTemplate - - -class TaskCreationChain(LLMChain): - """Chain generating tasks.""" - - @classmethod - def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: - """Get the response parser.""" - task_creation_template = ( - "You are an task creation AI that uses the result of an execution agent" - " to create new tasks with the following objective: {objective}," - " The last completed task has the result: {result}." - " This result was based on this task description: {task_description}." - " These are incomplete tasks: {incomplete_tasks}." - " Based on the result, create new tasks to be completed" - " by the AI system that do not overlap with incomplete tasks." - " Return the tasks as an array." - ) - prompt = PromptTemplate( - template=task_creation_template, - input_variables=[ - "result", - "task_description", - "incomplete_tasks", - "objective", - ], - ) - return cls(prompt=prompt, llm=llm, verbose=verbose) diff --git a/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/task_execution.py b/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/task_execution.py deleted file mode 100644 index 1b57ef55a7007..0000000000000 --- a/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/task_execution.py +++ /dev/null @@ -1,22 +0,0 @@ -from langchain.chains import LLMChain -from langchain_core.language_models import BaseLanguageModel -from langchain_core.prompts import PromptTemplate - - -class TaskExecutionChain(LLMChain): - """Chain to execute tasks.""" - - @classmethod - def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: - """Get the response parser.""" - execution_template = ( - "You are an AI who performs one task based on the following objective: " - "{objective}." - "Take into account these previously completed tasks: {context}." - " Your task: {task}. Response:" - ) - prompt = PromptTemplate( - template=execution_template, - input_variables=["objective", "context", "task"], - ) - return cls(prompt=prompt, llm=llm, verbose=verbose) diff --git a/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/task_prioritization.py b/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/task_prioritization.py deleted file mode 100644 index 9b8cfba008183..0000000000000 --- a/libs/experimental/langchain_experimental/autonomous_agents/baby_agi/task_prioritization.py +++ /dev/null @@ -1,25 +0,0 @@ -from langchain.chains import LLMChain -from langchain_core.language_models import BaseLanguageModel -from langchain_core.prompts import PromptTemplate - - -class TaskPrioritizationChain(LLMChain): - """Chain to prioritize tasks.""" - - @classmethod - def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: - """Get the response parser.""" - task_prioritization_template = ( - "You are a task prioritization AI tasked with cleaning the formatting of " - "and reprioritizing the following tasks: {task_names}." - " Consider the ultimate objective of your team: {objective}." - " Do not remove any tasks. Return the result as a numbered list, like:" - " #. First task" - " #. Second task" - " Start the task list with number {next_task_id}." - ) - prompt = PromptTemplate( - template=task_prioritization_template, - input_variables=["task_names", "next_task_id", "objective"], - ) - return cls(prompt=prompt, llm=llm, verbose=verbose) diff --git a/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/__init__.py b/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/hugginggpt.py b/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/hugginggpt.py deleted file mode 100644 index 7a92cdd1adfb3..0000000000000 --- a/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/hugginggpt.py +++ /dev/null @@ -1,34 +0,0 @@ -from typing import List - -from langchain.base_language import BaseLanguageModel -from langchain_core.tools import BaseTool - -from langchain_experimental.autonomous_agents.hugginggpt.repsonse_generator import ( - load_response_generator, -) -from langchain_experimental.autonomous_agents.hugginggpt.task_executor import ( - TaskExecutor, -) -from langchain_experimental.autonomous_agents.hugginggpt.task_planner import ( - load_chat_planner, -) - - -class HuggingGPT: - """Agent for interacting with HuggingGPT.""" - - def __init__(self, llm: BaseLanguageModel, tools: List[BaseTool]): - self.llm = llm - self.tools = tools - self.chat_planner = load_chat_planner(llm) - self.response_generator = load_response_generator(llm) - self.task_executor: TaskExecutor - - def run(self, input: str) -> str: - plan = self.chat_planner.plan(inputs={"input": input, "hf_tools": self.tools}) - self.task_executor = TaskExecutor(plan) - self.task_executor.run() - response = self.response_generator.generate( - {"task_execution": self.task_executor} - ) - return response diff --git a/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/repsonse_generator.py b/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/repsonse_generator.py deleted file mode 100644 index e12d7b31527d5..0000000000000 --- a/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/repsonse_generator.py +++ /dev/null @@ -1,46 +0,0 @@ -from typing import Any, List, Optional - -from langchain.base_language import BaseLanguageModel -from langchain.chains import LLMChain -from langchain_core.callbacks.manager import Callbacks -from langchain_core.prompts import PromptTemplate - - -class ResponseGenerationChain(LLMChain): - """Chain to execute tasks.""" - - @classmethod - def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain: - execution_template = ( - "The AI assistant has parsed the user input into several tasks" - "and executed them. The results are as follows:\n" - "{task_execution}" - "\nPlease summarize the results and generate a response." - ) - prompt = PromptTemplate( - template=execution_template, - input_variables=["task_execution"], - ) - return cls(prompt=prompt, llm=llm, verbose=verbose) - - -class ResponseGenerator: - """Generates a response based on the input.""" - - def __init__(self, llm_chain: LLMChain, stop: Optional[List] = None): - self.llm_chain = llm_chain - self.stop = stop - - def generate(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> str: - """Given input, decided what to do.""" - llm_response = self.llm_chain.run(**inputs, stop=self.stop, callbacks=callbacks) - return llm_response - - -def load_response_generator(llm: BaseLanguageModel) -> ResponseGenerator: - """Load the ResponseGenerator.""" - - llm_chain = ResponseGenerationChain.from_llm(llm) - return ResponseGenerator( - llm_chain=llm_chain, - ) diff --git a/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/task_executor.py b/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/task_executor.py deleted file mode 100644 index 904ff7f7fb3b9..0000000000000 --- a/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/task_executor.py +++ /dev/null @@ -1,151 +0,0 @@ -import copy -import uuid -from typing import Dict, List - -import numpy as np -from langchain_core.tools import BaseTool - -from langchain_experimental.autonomous_agents.hugginggpt.task_planner import Plan - - -class Task: - """Task to be executed.""" - - def __init__(self, task: str, id: int, dep: List[int], args: Dict, tool: BaseTool): - self.task = task - self.id = id - self.dep = dep - self.args = args - self.tool = tool - self.status = "pending" - self.message = "" - self.result = "" - - def __str__(self) -> str: - return f"{self.task}({self.args})" - - def save_product(self) -> None: - import cv2 - - if self.task == "video_generator": - # ndarray to video - product = np.array(self.product) - nframe, height, width, _ = product.shape - video_filename = uuid.uuid4().hex[:6] + ".mp4" - fps = 30 # Frames per second - fourcc = cv2.VideoWriter_fourcc(*"mp4v") # type: ignore - video_out = cv2.VideoWriter(video_filename, fourcc, fps, (width, height)) - for frame in self.product: - video_out.write(frame) - video_out.release() - self.result = video_filename - elif self.task == "image_generator": - # PIL.Image to image - filename = uuid.uuid4().hex[:6] + ".png" - self.product.save(filename) # type: ignore - self.result = filename - - def completed(self) -> bool: - return self.status == "completed" - - def failed(self) -> bool: - return self.status == "failed" - - def pending(self) -> bool: - return self.status == "pending" - - def run(self) -> str: - from diffusers.utils import load_image - - try: - new_args = copy.deepcopy(self.args) - for k, v in new_args.items(): - if k == "image": - new_args["image"] = load_image(v) - if self.task in ["video_generator", "image_generator", "text_reader"]: - self.product = self.tool(**new_args) - else: - self.result = self.tool(**new_args) - except Exception as e: - self.status = "failed" - self.message = str(e) - return self.message - - self.status = "completed" - self.save_product() - - return self.result - - -class TaskExecutor: - """Load tools and execute tasks.""" - - def __init__(self, plan: Plan): - self.plan = plan - self.tasks = [] - self.id_task_map = {} - self.status = "pending" - for step in self.plan.steps: - task = Task(step.task, step.id, step.dep, step.args, step.tool) - self.tasks.append(task) - self.id_task_map[step.id] = task - - def completed(self) -> bool: - return all(task.completed() for task in self.tasks) - - def failed(self) -> bool: - return any(task.failed() for task in self.tasks) - - def pending(self) -> bool: - return any(task.pending() for task in self.tasks) - - def check_dependency(self, task: Task) -> bool: - for dep_id in task.dep: - if dep_id == -1: - continue - dep_task = self.id_task_map[dep_id] - if dep_task.failed() or dep_task.pending(): - return False - return True - - def update_args(self, task: Task) -> None: - for dep_id in task.dep: - if dep_id == -1: - continue - dep_task = self.id_task_map[dep_id] - for k, v in task.args.items(): - if f"" in v: - task.args[k] = task.args[k].replace( - f"", dep_task.result - ) - - def run(self) -> str: - for task in self.tasks: - print(f"running {task}") # noqa: T201 - if task.pending() and self.check_dependency(task): - self.update_args(task) - task.run() - if self.completed(): - self.status = "completed" - elif self.failed(): - self.status = "failed" - else: - self.status = "pending" - return self.status - - def __str__(self) -> str: - result = "" - for task in self.tasks: - result += f"{task}\n" - result += f"status: {task.status}\n" - if task.failed(): - result += f"message: {task.message}\n" - if task.completed(): - result += f"result: {task.result}\n" - return result - - def __repr__(self) -> str: - return self.__str__() - - def describe(self) -> str: - return self.__str__() diff --git a/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/task_planner.py b/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/task_planner.py deleted file mode 100644 index c6519ed7de455..0000000000000 --- a/libs/experimental/langchain_experimental/autonomous_agents/hugginggpt/task_planner.py +++ /dev/null @@ -1,174 +0,0 @@ -import json -import re -from abc import abstractmethod -from typing import Any, Dict, List, Optional, Union - -from langchain.base_language import BaseLanguageModel -from langchain.chains import LLMChain -from langchain_core.callbacks.manager import Callbacks -from langchain_core.prompts.chat import ( - AIMessagePromptTemplate, - ChatPromptTemplate, - HumanMessagePromptTemplate, - SystemMessagePromptTemplate, -) -from langchain_core.tools import BaseTool -from pydantic import BaseModel - -DEMONSTRATIONS = [ - { - "role": "user", - "content": "please show me a video and an image of (based on the text) 'a boy is running' and dub it", # noqa: E501 - }, - { - "role": "assistant", - "content": '[{{"task": "video_generator", "id": 0, "dep": [-1], "args": {{"prompt": "a boy is running" }}}}, {{"task": "text_reader", "id": 1, "dep": [-1], "args": {{"text": "a boy is running" }}}}, {{"task": "image_generator", "id": 2, "dep": [-1], "args": {{"prompt": "a boy is running" }}}}]', # noqa: E501 - }, - { - "role": "user", - "content": "Give you some pictures e1.jpg, e2.png, e3.jpg, help me count the number of sheep?", # noqa: E501 - }, - { - "role": "assistant", - "content": '[ {{"task": "image_qa", "id": 0, "dep": [-1], "args": {{"image": "e1.jpg", "question": "How many sheep in the picture"}}}}, {{"task": "image_qa", "id": 1, "dep": [-1], "args": {{"image": "e2.jpg", "question": "How many sheep in the picture"}}}}, {{"task": "image_qa", "id": 2, "dep": [-1], "args": {{"image": "e3.jpg", "question": "How many sheep in the picture"}}}}]', # noqa: E501 - }, -] - - -class TaskPlaningChain(LLMChain): - """Chain to execute tasks.""" - - @classmethod - def from_llm( - cls, - llm: BaseLanguageModel, - demos: List[Dict] = DEMONSTRATIONS, - verbose: bool = True, - ) -> LLMChain: - """Get the response parser.""" - system_template = """#1 Task Planning Stage: The AI assistant can parse user input to several tasks: [{{"task": task, "id": task_id, "dep": dependency_task_id, "args": {{"input name": text may contain }}}}]. The special tag "dep_id" refer to the one generated text/image/audio in the dependency task (Please consider whether the dependency task generates resources of this type.) and "dep_id" must be in "dep" list. The "dep" field denotes the ids of the previous prerequisite tasks which generate a new resource that the current task relies on. The task MUST be selected from the following tools (along with tool description, input name and output type): {tools}. There may be multiple tasks of the same type. Think step by step about all the tasks needed to resolve the user's request. Parse out as few tasks as possible while ensuring that the user request can be resolved. Pay attention to the dependencies and order among tasks. If the user input can't be parsed, you need to reply empty JSON [].""" # noqa: E501 - human_template = """Now I input: {input}.""" - system_message_prompt = SystemMessagePromptTemplate.from_template( - system_template - ) - human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) - - demo_messages: List[ - Union[HumanMessagePromptTemplate, AIMessagePromptTemplate] - ] = [] - for demo in demos: - if demo["role"] == "user": - demo_messages.append( - HumanMessagePromptTemplate.from_template(demo["content"]) - ) - else: - demo_messages.append( - AIMessagePromptTemplate.from_template(demo["content"]) - ) - # demo_messages.append(message) - - prompt = ChatPromptTemplate.from_messages( - [system_message_prompt, *demo_messages, human_message_prompt] - ) - - return cls(prompt=prompt, llm=llm, verbose=verbose) - - -class Step: - """A step in the plan.""" - - def __init__( - self, task: str, id: int, dep: List[int], args: Dict[str, str], tool: BaseTool - ): - self.task = task - self.id = id - self.dep = dep - self.args = args - self.tool = tool - - -class Plan: - """A plan to execute.""" - - def __init__(self, steps: List[Step]): - self.steps = steps - - def __str__(self) -> str: - return str([str(step) for step in self.steps]) - - def __repr__(self) -> str: - return str(self) - - -class BasePlanner(BaseModel): - """Base class for a planner.""" - - @abstractmethod - def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan: - """Given input, decide what to do.""" - - @abstractmethod - async def aplan( - self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any - ) -> Plan: - """Asynchronous Given input, decide what to do.""" - - -class PlanningOutputParser(BaseModel): - """Parses the output of the planning stage.""" - - def parse(self, text: str, hf_tools: List[BaseTool]) -> Plan: - """Parse the output of the planning stage. - - Args: - text: The output of the planning stage. - hf_tools: The tools available. - - Returns: - The plan. - """ - steps = [] - for v in json.loads(re.findall(r"\[.*\]", text)[0]): - choose_tool = None - for tool in hf_tools: - if tool.name == v["task"]: - choose_tool = tool - break - if choose_tool: - steps.append(Step(v["task"], v["id"], v["dep"], v["args"], tool)) - return Plan(steps=steps) - - -class TaskPlanner(BasePlanner): - """Planner for tasks.""" - - llm_chain: LLMChain - output_parser: PlanningOutputParser - stop: Optional[List] = None - - def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan: - """Given input, decided what to do.""" - inputs["tools"] = [ - f"{tool.name}: {tool.description}" for tool in inputs["hf_tools"] - ] - llm_response = self.llm_chain.run(**inputs, stop=self.stop, callbacks=callbacks) - return self.output_parser.parse(llm_response, inputs["hf_tools"]) - - async def aplan( - self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any - ) -> Plan: - """Asynchronous Given input, decided what to do.""" - inputs["hf_tools"] = [ - f"{tool.name}: {tool.description}" for tool in inputs["hf_tools"] - ] - llm_response = await self.llm_chain.arun( - **inputs, stop=self.stop, callbacks=callbacks - ) - return self.output_parser.parse(llm_response, inputs["hf_tools"]) - - -def load_chat_planner(llm: BaseLanguageModel) -> TaskPlanner: - """Load the chat planner.""" - - llm_chain = TaskPlaningChain.from_llm(llm) - return TaskPlanner(llm_chain=llm_chain, output_parser=PlanningOutputParser()) diff --git a/libs/experimental/langchain_experimental/chat_models/__init__.py b/libs/experimental/langchain_experimental/chat_models/__init__.py deleted file mode 100644 index 07be8ea7b738b..0000000000000 --- a/libs/experimental/langchain_experimental/chat_models/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -"""**Chat Models** are a variation on language models. - -While Chat Models use language models under the hood, the interface they expose -is a bit different. Rather than expose a "text in, text out" API, they expose -an interface where "chat messages" are the inputs and outputs. - -**Class hierarchy:** - -.. code-block:: - - BaseLanguageModel --> BaseChatModel --> # Examples: ChatOpenAI, ChatGooglePalm - -**Main helpers:** - -.. code-block:: - - AIMessage, BaseMessage, HumanMessage -""" # noqa: E501 - -from langchain_experimental.chat_models.llm_wrapper import ( - Llama2Chat, - Mixtral, - Orca, - Vicuna, -) - -__all__ = ["Llama2Chat", "Orca", "Vicuna", "Mixtral"] diff --git a/libs/experimental/langchain_experimental/chat_models/llm_wrapper.py b/libs/experimental/langchain_experimental/chat_models/llm_wrapper.py deleted file mode 100644 index 115bd8a6d292b..0000000000000 --- a/libs/experimental/langchain_experimental/chat_models/llm_wrapper.py +++ /dev/null @@ -1,196 +0,0 @@ -"""Generic Wrapper for chat LLMs, with sample implementations -for Llama-2-chat, Llama-2-instruct and Vicuna models. -""" - -from typing import Any, List, Optional, cast - -from langchain.schema import ( - AIMessage, - BaseMessage, - ChatGeneration, - ChatResult, - HumanMessage, - LLMResult, - SystemMessage, -) -from langchain_core.callbacks.manager import ( - AsyncCallbackManagerForLLMRun, - CallbackManagerForLLMRun, -) -from langchain_core.language_models import LLM, BaseChatModel - -DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. - -If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""" # noqa: E501 - - -class ChatWrapper(BaseChatModel): - """Wrapper for chat LLMs.""" - - llm: LLM - sys_beg: str - sys_end: str - ai_n_beg: str - ai_n_end: str - usr_n_beg: str - usr_n_end: str - usr_0_beg: Optional[str] = None - usr_0_end: Optional[str] = None - - system_message: SystemMessage = SystemMessage(content=DEFAULT_SYSTEM_PROMPT) - - def _generate( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> ChatResult: - llm_input = self._to_chat_prompt(messages) - llm_result = self.llm._generate( - prompts=[llm_input], stop=stop, run_manager=run_manager, **kwargs - ) - return self._to_chat_result(llm_result) - - async def _agenerate( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> ChatResult: - llm_input = self._to_chat_prompt(messages) - llm_result = await self.llm._agenerate( - prompts=[llm_input], stop=stop, run_manager=run_manager, **kwargs - ) - return self._to_chat_result(llm_result) - - def _to_chat_prompt( - self, - messages: List[BaseMessage], - ) -> str: - """Convert a list of messages into a prompt format expected by wrapped LLM.""" - if not messages: - raise ValueError("at least one HumanMessage must be provided") - - if not isinstance(messages[0], SystemMessage): - messages = [self.system_message] + messages - - if not isinstance(messages[1], HumanMessage): - raise ValueError( - "messages list must start with a SystemMessage or UserMessage" - ) - - if not isinstance(messages[-1], HumanMessage): - raise ValueError("last message must be a HumanMessage") - - prompt_parts = [] - - if self.usr_0_beg is None: - self.usr_0_beg = self.usr_n_beg - - if self.usr_0_end is None: - self.usr_0_end = self.usr_n_end - - prompt_parts.append( - self.sys_beg + cast(str, messages[0].content) + self.sys_end - ) - prompt_parts.append( - self.usr_0_beg + cast(str, messages[1].content) + self.usr_0_end - ) - - for ai_message, human_message in zip(messages[2::2], messages[3::2]): - if not isinstance(ai_message, AIMessage) or not isinstance( - human_message, HumanMessage - ): - raise ValueError( - "messages must be alternating human- and ai-messages, " - "optionally prepended by a system message" - ) - - prompt_parts.append( - self.ai_n_beg + cast(str, ai_message.content) + self.ai_n_end - ) - prompt_parts.append( - self.usr_n_beg + cast(str, human_message.content) + self.usr_n_end - ) - - return "".join(prompt_parts) - - @staticmethod - def _to_chat_result(llm_result: LLMResult) -> ChatResult: - chat_generations = [] - - for g in llm_result.generations[0]: - chat_generation = ChatGeneration( - message=AIMessage(content=g.text), generation_info=g.generation_info - ) - chat_generations.append(chat_generation) - - return ChatResult( - generations=chat_generations, llm_output=llm_result.llm_output - ) - - -class Llama2Chat(ChatWrapper): - """Wrapper for Llama-2-chat model.""" - - @property - def _llm_type(self) -> str: - return "llama-2-chat" - - sys_beg: str = "[INST] <>\n" - sys_end: str = "\n<>\n\n" - ai_n_beg: str = " " - ai_n_end: str = " " - usr_n_beg: str = "[INST] " - usr_n_end: str = " [/INST]" - usr_0_beg: str = "" - usr_0_end: str = " [/INST]" - - -class Mixtral(ChatWrapper): - """See https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1#instruction-format""" - - @property - def _llm_type(self) -> str: - return "mixtral" - - sys_beg: str = "[INST] " - sys_end: str = "\n" - ai_n_beg: str = " " - ai_n_end: str = " " - usr_n_beg: str = " [INST] " - usr_n_end: str = " [/INST]" - usr_0_beg: str = "" - usr_0_end: str = " [/INST]" - - -class Orca(ChatWrapper): - """Wrapper for Orca-style models.""" - - @property - def _llm_type(self) -> str: - return "orca-style" - - sys_beg: str = "### System:\n" - sys_end: str = "\n\n" - ai_n_beg: str = "### Assistant:\n" - ai_n_end: str = "\n\n" - usr_n_beg: str = "### User:\n" - usr_n_end: str = "\n\n" - - -class Vicuna(ChatWrapper): - """Wrapper for Vicuna-style models.""" - - @property - def _llm_type(self) -> str: - return "vicuna-style" - - sys_beg: str = "" - sys_end: str = " " - ai_n_beg: str = "ASSISTANT: " - ai_n_end: str = " " - usr_n_beg: str = "USER: " - usr_n_end: str = " " diff --git a/libs/experimental/langchain_experimental/comprehend_moderation/__init__.py b/libs/experimental/langchain_experimental/comprehend_moderation/__init__.py deleted file mode 100644 index 17a07cc8ad12d..0000000000000 --- a/libs/experimental/langchain_experimental/comprehend_moderation/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -**Comprehend Moderation** is used to detect and handle `Personally Identifiable Information (PII)`, -`toxicity`, and `prompt safety` in text. - -The Langchain experimental package includes the **AmazonComprehendModerationChain** class -for the comprehend moderation tasks. It is based on `Amazon Comprehend` service. -This class can be configured with specific moderation settings like PII labels, redaction, -toxicity thresholds, and prompt safety thresholds. - -See more at https://aws.amazon.com/comprehend/ - -`Amazon Comprehend` service is used by several other classes: -- **ComprehendToxicity** class is used to check the toxicity of text prompts using - `AWS Comprehend service` and take actions based on the configuration -- **ComprehendPromptSafety** class is used to validate the safety of given prompt - text, raising an error if unsafe content is detected based on the specified threshold -- **ComprehendPII** class is designed to handle - `Personally Identifiable Information (PII)` moderation tasks, - detecting and managing PII entities in text inputs -""" # noqa: E501 - -from langchain_experimental.comprehend_moderation.amazon_comprehend_moderation import ( - AmazonComprehendModerationChain, -) -from langchain_experimental.comprehend_moderation.base_moderation import BaseModeration -from langchain_experimental.comprehend_moderation.base_moderation_callbacks import ( - BaseModerationCallbackHandler, -) -from langchain_experimental.comprehend_moderation.base_moderation_config import ( - BaseModerationConfig, - ModerationPiiConfig, - ModerationPromptSafetyConfig, - ModerationToxicityConfig, -) -from langchain_experimental.comprehend_moderation.pii import ComprehendPII -from langchain_experimental.comprehend_moderation.prompt_safety import ( - ComprehendPromptSafety, -) -from langchain_experimental.comprehend_moderation.toxicity import ComprehendToxicity - -__all__ = [ - "BaseModeration", - "ComprehendPII", - "ComprehendPromptSafety", - "ComprehendToxicity", - "BaseModerationConfig", - "ModerationPiiConfig", - "ModerationToxicityConfig", - "ModerationPromptSafetyConfig", - "BaseModerationCallbackHandler", - "AmazonComprehendModerationChain", -] diff --git a/libs/experimental/langchain_experimental/comprehend_moderation/amazon_comprehend_moderation.py b/libs/experimental/langchain_experimental/comprehend_moderation/amazon_comprehend_moderation.py deleted file mode 100644 index a30ea7f43911e..0000000000000 --- a/libs/experimental/langchain_experimental/comprehend_moderation/amazon_comprehend_moderation.py +++ /dev/null @@ -1,192 +0,0 @@ -from typing import Any, Dict, List, Optional - -from langchain.chains.base import Chain -from langchain_core.callbacks.manager import CallbackManagerForChainRun -from pydantic import model_validator - -from langchain_experimental.comprehend_moderation.base_moderation import BaseModeration -from langchain_experimental.comprehend_moderation.base_moderation_callbacks import ( - BaseModerationCallbackHandler, -) -from langchain_experimental.comprehend_moderation.base_moderation_config import ( - BaseModerationConfig, -) - - -class AmazonComprehendModerationChain(Chain): - """Moderation Chain, based on `Amazon Comprehend` service. - - See more at https://aws.amazon.com/comprehend/ - """ - - output_key: str = "output" #: :meta private: - """Key used to fetch/store the output in data containers. Defaults to `output`""" - - input_key: str = "input" #: :meta private: - """Key used to fetch/store the input in data containers. Defaults to `input`""" - - moderation_config: BaseModerationConfig = BaseModerationConfig() - """ - Configuration settings for moderation, - defaults to BaseModerationConfig with default values - """ - - client: Optional[Any] = None - """boto3 client object for connection to Amazon Comprehend""" - - region_name: Optional[str] = None - """The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable - or region specified in ~/.aws/config in case it is not provided here. - """ - - credentials_profile_name: Optional[str] = None - """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which - has either access keys or role information specified. - If not specified, the default credential profile or, if on an EC2 instance, - credentials from IMDS will be used. - See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html - """ - - moderation_callback: Optional[BaseModerationCallbackHandler] = None - """Callback handler for moderation, this is different - from regular callbacks which can be used in addition to this.""" - - unique_id: Optional[str] = None - """A unique id that can be used to identify or group a user or session""" - - @model_validator(mode="before") - @classmethod - def create_client(cls, values: Dict[str, Any]) -> Any: - """ - Creates an Amazon Comprehend client. - - Args: - values (Dict[str, Any]): A dictionary containing configuration values. - - Returns: - Dict[str, Any]: A dictionary with the updated configuration values, - including the Amazon Comprehend client. - - Raises: - ModuleNotFoundError: If the 'boto3' package is not installed. - ValueError: If there is an issue importing 'boto3' or loading - AWS credentials. - - Example: - .. code-block:: python - - config = { - "credentials_profile_name": "my-profile", - "region_name": "us-west-2" - } - updated_config = create_client(config) - comprehend_client = updated_config["client"] - """ - - if values.get("client") is not None: - return values - try: - import boto3 - - if values.get("credentials_profile_name"): - session = boto3.Session(profile_name=values["credentials_profile_name"]) - else: - # use default credentials - session = boto3.Session() - - client_params = {} - if values.get("region_name"): - client_params["region_name"] = values["region_name"] - - values["client"] = session.client("comprehend", **client_params) - - return values - except ImportError: - raise ModuleNotFoundError( - "Could not import boto3 python package. " - "Please install it with `pip install boto3`." - ) - except Exception as e: - raise ValueError( - "Could not load credentials to authenticate with AWS client. " - "Please check that credentials in the specified " - f"profile name are valid. {e}" - ) from e - - @property - def output_keys(self) -> List[str]: - """ - Returns a list of output keys. - - This method defines the output keys that will be used to access the output - values produced by the chain or function. It ensures that the specified keys - are available to access the outputs. - - Returns: - List[str]: A list of output keys. - - Note: - This method is considered private and may not be intended for direct - external use. - - """ - return [self.output_key] - - @property - def input_keys(self) -> List[str]: - """ - Returns a list of input keys expected by the prompt. - - This method defines the input keys that the prompt expects in order to perform - its processing. It ensures that the specified keys are available for providing - input to the prompt. - - Returns: - List[str]: A list of input keys. - - Note: - This method is considered private and may not be intended for direct - external use. - """ - return [self.input_key] - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - """ - Executes the moderation process on the input text and returns the processed - output. - - This internal method performs the moderation process on the input text. It - converts the input prompt value to plain text, applies the specified filters, - and then converts the filtered output back to a suitable prompt value object. - Additionally, it provides the option to log information about the run using - the provided `run_manager`. - - Args: - inputs: A dictionary containing input values - run_manager: A run manager to handle run-related events. Default is None - - Returns: - Dict[str, str]: A dictionary containing the processed output of the - moderation process. - - Raises: - ValueError: If there is an error during the moderation process - """ - - if run_manager: - run_manager.on_text("Running AmazonComprehendModerationChain...\n") - - moderation = BaseModeration( - client=self.client, - config=self.moderation_config, - moderation_callback=self.moderation_callback, - unique_id=self.unique_id, - run_manager=run_manager, - ) - response = moderation.moderate(prompt=inputs[self.input_keys[0]]) - - return {self.output_key: response} diff --git a/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation.py b/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation.py deleted file mode 100644 index be236f5ecebaa..0000000000000 --- a/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation.py +++ /dev/null @@ -1,185 +0,0 @@ -import uuid -from typing import Any, Callable, Optional, cast - -from langchain_core.callbacks.manager import CallbackManagerForChainRun -from langchain_core.messages import AIMessage, HumanMessage -from langchain_core.prompt_values import ChatPromptValue, StringPromptValue - -from langchain_experimental.comprehend_moderation.pii import ComprehendPII -from langchain_experimental.comprehend_moderation.prompt_safety import ( - ComprehendPromptSafety, -) -from langchain_experimental.comprehend_moderation.toxicity import ComprehendToxicity - - -class BaseModeration: - """Base class for moderation.""" - - def __init__( - self, - client: Any, - config: Optional[Any] = None, - moderation_callback: Optional[Any] = None, - unique_id: Optional[str] = None, - run_manager: Optional[CallbackManagerForChainRun] = None, - ): - self.client = client - self.config = config - self.moderation_callback = moderation_callback - self.unique_id = unique_id - self.chat_message_index = 0 - self.run_manager = run_manager - self.chain_id = str(uuid.uuid4()) - - def _convert_prompt_to_text(self, prompt: Any) -> str: - input_text = str() - - if isinstance(prompt, StringPromptValue): - input_text = prompt.text - elif isinstance(prompt, str): - input_text = prompt - elif isinstance(prompt, ChatPromptValue): - """ - We will just check the last message in the message Chain of a - ChatPromptTemplate. The typical chronology is - SystemMessage > HumanMessage > AIMessage and so on. However assuming - that with every chat the chain is invoked we will only check the last - message. This is assuming that all previous messages have been checked - already. Only HumanMessage and AIMessage will be checked. We can perhaps - loop through and take advantage of the additional_kwargs property in the - HumanMessage and AIMessage schema to mark messages that have been moderated. - However that means that this class could generate multiple text chunks - and moderate() logics would need to be updated. This also means some - complexity in re-constructing the prompt while keeping the messages in - sequence. - """ - message = prompt.messages[-1] - self.chat_message_index = len(prompt.messages) - 1 - if isinstance(message, HumanMessage): - input_text = cast(str, message.content) - - if isinstance(message, AIMessage): - input_text = cast(str, message.content) - else: - raise ValueError( - f"Invalid input type {type(input_text)}. " - "Must be a PromptValue, str, or list of BaseMessages." - ) - return input_text - - def _convert_text_to_prompt(self, prompt: Any, text: str) -> Any: - if isinstance(prompt, StringPromptValue): - return StringPromptValue(text=text) - elif isinstance(prompt, str): - return text - elif isinstance(prompt, ChatPromptValue): - # Copy the messages because we may need to mutate them. - # We don't want to mutate data we don't own. - messages = list(prompt.messages) - - message = messages[self.chat_message_index] - - if isinstance(message, HumanMessage): - messages[self.chat_message_index] = HumanMessage( - content=text, - example=message.example, - additional_kwargs=message.additional_kwargs, - ) - if isinstance(message, AIMessage): - messages[self.chat_message_index] = AIMessage( - content=text, - example=message.example, - additional_kwargs=message.additional_kwargs, - ) - return ChatPromptValue(messages=messages) - else: - raise ValueError( - f"Invalid input type {type(input)}. " - "Must be a PromptValue, str, or list of BaseMessages." - ) - - def _moderation_class(self, moderation_class: Any) -> Callable: - return moderation_class( - client=self.client, - callback=self.moderation_callback, - unique_id=self.unique_id, - chain_id=self.chain_id, - ).validate - - def _log_message_for_verbose(self, message: str) -> None: - if self.run_manager: - self.run_manager.on_text(message) - - def moderate(self, prompt: Any) -> str: - """Moderate the input prompt.""" - - from langchain_experimental.comprehend_moderation.base_moderation_config import ( # noqa: E501 - ModerationPiiConfig, - ModerationPromptSafetyConfig, - ModerationToxicityConfig, - ) - from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ( # noqa: E501 - ModerationPiiError, - ModerationPromptSafetyError, - ModerationToxicityError, - ) - - try: - # convert prompt to text - input_text = self._convert_prompt_to_text(prompt=prompt) - output_text = str() - - # perform moderation - filter_functions = { - "pii": ComprehendPII, - "toxicity": ComprehendToxicity, - "prompt_safety": ComprehendPromptSafety, - } - - filters = self.config.filters # type: ignore - - for _filter in filters: - filter_name = ( - "pii" - if isinstance(_filter, ModerationPiiConfig) - else ( - "toxicity" - if isinstance(_filter, ModerationToxicityConfig) - else ( - "prompt_safety" - if isinstance(_filter, ModerationPromptSafetyConfig) - else None - ) - ) - ) - if filter_name in filter_functions: - self._log_message_for_verbose( - f"Running {filter_name} Validation...\n" - ) - validation_fn = self._moderation_class( - moderation_class=filter_functions[filter_name] - ) - input_text = input_text if not output_text else output_text - output_text = validation_fn( - prompt_value=input_text, - config=_filter.dict(), - ) - - # convert text to prompt and return - return self._convert_text_to_prompt(prompt=prompt, text=output_text) - - except ModerationPiiError as e: - self._log_message_for_verbose(f"Found PII content..stopping..\n{str(e)}\n") - raise e - except ModerationToxicityError as e: - self._log_message_for_verbose( - f"Found Toxic content..stopping..\n{str(e)}\n" - ) - raise e - except ModerationPromptSafetyError as e: - self._log_message_for_verbose( - f"Found Harmful intention..stopping..\n{str(e)}\n" - ) - raise e - except Exception as e: - raise e diff --git a/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation_callbacks.py b/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation_callbacks.py deleted file mode 100644 index dd39c14608d79..0000000000000 --- a/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation_callbacks.py +++ /dev/null @@ -1,67 +0,0 @@ -from typing import Any, Callable, Dict - - -class BaseModerationCallbackHandler: - """Base class for moderation callback handlers.""" - - def __init__(self) -> None: - if ( - self._is_method_unchanged( - BaseModerationCallbackHandler.on_after_pii, self.on_after_pii - ) - and self._is_method_unchanged( - BaseModerationCallbackHandler.on_after_toxicity, self.on_after_toxicity - ) - and self._is_method_unchanged( - BaseModerationCallbackHandler.on_after_prompt_safety, - self.on_after_prompt_safety, - ) - ): - raise NotImplementedError( - "Subclasses must override at least one of on_after_pii(), " - "on_after_toxicity(), or on_after_prompt_safety() functions." - ) - - def _is_method_unchanged( - self, base_method: Callable, derived_method: Callable - ) -> bool: - return base_method.__qualname__ == derived_method.__qualname__ - - async def on_after_pii( - self, moderation_beacon: Dict[str, Any], unique_id: str, **kwargs: Any - ) -> None: - """Run after PII validation is complete.""" - pass - - async def on_after_toxicity( - self, moderation_beacon: Dict[str, Any], unique_id: str, **kwargs: Any - ) -> None: - """Run after Toxicity validation is complete.""" - pass - - async def on_after_prompt_safety( - self, moderation_beacon: Dict[str, Any], unique_id: str, **kwargs: Any - ) -> None: - """Run after Prompt Safety validation is complete.""" - pass - - @property - def pii_callback(self) -> bool: - return ( - self.on_after_pii.__func__ # type: ignore - is not BaseModerationCallbackHandler.on_after_pii - ) - - @property - def toxicity_callback(self) -> bool: - return ( - self.on_after_toxicity.__func__ # type: ignore - is not BaseModerationCallbackHandler.on_after_toxicity - ) - - @property - def prompt_safety_callback(self) -> bool: - return ( - self.on_after_prompt_safety.__func__ # type: ignore - is not BaseModerationCallbackHandler.on_after_prompt_safety - ) diff --git a/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation_config.py b/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation_config.py deleted file mode 100644 index eaa371d99fdb4..0000000000000 --- a/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation_config.py +++ /dev/null @@ -1,61 +0,0 @@ -from typing import List, Union - -from pydantic import BaseModel - - -class ModerationPiiConfig(BaseModel): - """Configuration for PII moderation filter.""" - - threshold: float = 0.5 - """Threshold for PII confidence score, defaults to 0.5 i.e. 50%""" - - labels: List[str] = [] - """ - List of PII Universal Labels. - Defaults to `list[]` - """ - - redact: bool = False - """Whether to perform redaction of detected PII entities""" - - mask_character: str = "*" - """Redaction mask character in case redact=True, defaults to asterisk (*)""" - - -class ModerationToxicityConfig(BaseModel): - """Configuration for Toxicity moderation filter.""" - - threshold: float = 0.5 - """Threshold for Toxic label confidence score, defaults to 0.5 i.e. 50%""" - - labels: List[str] = [] - """List of toxic labels, defaults to `list[]`""" - - -class ModerationPromptSafetyConfig(BaseModel): - """Configuration for Prompt Safety moderation filter.""" - - threshold: float = 0.5 - """ - Threshold for Prompt Safety classification - confidence score, defaults to 0.5 i.e. 50% - """ - - -class BaseModerationConfig(BaseModel): - """Base configuration settings for moderation.""" - - filters: List[ - Union[ - ModerationPiiConfig, ModerationToxicityConfig, ModerationPromptSafetyConfig - ] - ] = [ - ModerationPiiConfig(), - ModerationToxicityConfig(), - ModerationPromptSafetyConfig(), - ] - """ - Filters applied to the moderation chain, defaults to - `[ModerationPiiConfig(), ModerationToxicityConfig(), - ModerationPromptSafetyConfig()]` - """ diff --git a/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation_exceptions.py b/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation_exceptions.py deleted file mode 100644 index 52c08f6bd0fc4..0000000000000 --- a/libs/experimental/langchain_experimental/comprehend_moderation/base_moderation_exceptions.py +++ /dev/null @@ -1,41 +0,0 @@ -class ModerationPiiError(Exception): - """Exception raised if PII entities are detected. - - Attributes: - message -- explanation of the error - """ - - def __init__( - self, message: str = "The prompt contains PII entities and cannot be processed" - ): - self.message = message - super().__init__(self.message) - - -class ModerationToxicityError(Exception): - """Exception raised if Toxic entities are detected. - - Attributes: - message -- explanation of the error - """ - - def __init__( - self, message: str = "The prompt contains toxic content and cannot be processed" - ): - self.message = message - super().__init__(self.message) - - -class ModerationPromptSafetyError(Exception): - """Exception raised if Unsafe prompts are detected. - - Attributes: - message -- explanation of the error - """ - - def __init__( - self, - message: str = ("The prompt is unsafe and cannot be processed"), - ): - self.message = message - super().__init__(self.message) diff --git a/libs/experimental/langchain_experimental/comprehend_moderation/pii.py b/libs/experimental/langchain_experimental/comprehend_moderation/pii.py deleted file mode 100644 index 88e29ee115401..0000000000000 --- a/libs/experimental/langchain_experimental/comprehend_moderation/pii.py +++ /dev/null @@ -1,166 +0,0 @@ -import asyncio -from typing import Any, Dict, Optional - -from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ( - ModerationPiiError, -) - - -class ComprehendPII: - """Class to handle Personally Identifiable Information (PII) moderation.""" - - def __init__( - self, - client: Any, - callback: Optional[Any] = None, - unique_id: Optional[str] = None, - chain_id: Optional[str] = None, - ) -> None: - self.client = client - self.moderation_beacon = { - "moderation_chain_id": chain_id, - "moderation_type": "PII", - "moderation_status": "LABELS_NOT_FOUND", - } - self.callback = callback - self.unique_id = unique_id - - def validate(self, prompt_value: str, config: Any = None) -> str: - redact = config.get("redact") - return ( - self._detect_pii(prompt_value=prompt_value, config=config) - if redact - else self._contains_pii(prompt_value=prompt_value, config=config) - ) - - def _contains_pii(self, prompt_value: str, config: Any = None) -> str: - """ - Checks for Personally Identifiable Information (PII) labels above a - specified threshold. Uses Amazon Comprehend Contains PII Entities API. See - - https://docs.aws.amazon.com/comprehend/latest/APIReference/API_ContainsPiiEntities.html - Args: - prompt_value (str): The input text to be checked for PII labels. - config (Dict[str, Any]): Configuration for PII check and actions. - - Returns: - str: the original prompt - - Note: - - The provided client should be initialized with valid AWS credentials. - """ - pii_identified = self.client.contains_pii_entities( - Text=prompt_value, LanguageCode="en" - ) - - if self.callback and self.callback.pii_callback: - self.moderation_beacon["moderation_input"] = prompt_value - self.moderation_beacon["moderation_output"] = pii_identified - - threshold = config.get("threshold") - pii_labels = config.get("labels") - pii_found = False - for entity in pii_identified["Labels"]: - if (entity["Score"] >= threshold and entity["Name"] in pii_labels) or ( - entity["Score"] >= threshold and not pii_labels - ): - pii_found = True - break - - if self.callback and self.callback.pii_callback: - if pii_found: - self.moderation_beacon["moderation_status"] = "LABELS_FOUND" - asyncio.create_task( - self.callback.on_after_pii(self.moderation_beacon, self.unique_id) - ) - if pii_found: - raise ModerationPiiError - return prompt_value - - def _detect_pii(self, prompt_value: str, config: Optional[Dict[str, Any]]) -> str: - """ - Detects and handles Personally Identifiable Information (PII) entities in the - given prompt text using Amazon Comprehend's detect_pii_entities API. The - function provides options to redact or stop processing based on the identified - PII entities and a provided configuration. Uses Amazon Comprehend Detect PII - Entities API. - - Args: - prompt_value (str): The input text to be checked for PII entities. - config (Dict[str, Any]): A configuration specifying how to handle - PII entities. - - Returns: - str: The processed prompt text with redacted PII entities or raised - exceptions. - - Raises: - ValueError: If the prompt contains configured PII entities for - stopping processing. - - Note: - - If PII is not found in the prompt, the original prompt is returned. - - The client should be initialized with valid AWS credentials. - """ - pii_identified = self.client.detect_pii_entities( - Text=prompt_value, LanguageCode="en" - ) - - if self.callback and self.callback.pii_callback: - self.moderation_beacon["moderation_input"] = prompt_value - self.moderation_beacon["moderation_output"] = pii_identified - - if (pii_identified["Entities"]) == []: - if self.callback and self.callback.pii_callback: - asyncio.create_task( - self.callback.on_after_pii(self.moderation_beacon, self.unique_id) - ) - return prompt_value - - pii_found = False - if not config and pii_identified["Entities"]: - for entity in pii_identified["Entities"]: - if entity["Score"] >= 0.5: - pii_found = True - break - - if self.callback and self.callback.pii_callback: - if pii_found: - self.moderation_beacon["moderation_status"] = "LABELS_FOUND" - asyncio.create_task( - self.callback.on_after_pii(self.moderation_beacon, self.unique_id) - ) - if pii_found: - raise ModerationPiiError - else: - threshold = config.get("threshold") # type: ignore - pii_labels = config.get("labels") # type: ignore - mask_marker = config.get("mask_character") # type: ignore - pii_found = False - - for entity in pii_identified["Entities"]: - if ( - pii_labels - and entity["Type"] in pii_labels - and entity["Score"] >= threshold - ) or (not pii_labels and entity["Score"] >= threshold): - pii_found = True - char_offset_begin = entity["BeginOffset"] - char_offset_end = entity["EndOffset"] - - mask_length = char_offset_end - char_offset_begin + 1 - masked_part = mask_marker * mask_length - - prompt_value = ( - prompt_value[:char_offset_begin] - + masked_part - + prompt_value[char_offset_end + 1 :] - ) - - if self.callback and self.callback.pii_callback: - if pii_found: - self.moderation_beacon["moderation_status"] = "LABELS_FOUND" - asyncio.create_task( - self.callback.on_after_pii(self.moderation_beacon, self.unique_id) - ) - - return prompt_value diff --git a/libs/experimental/langchain_experimental/comprehend_moderation/prompt_safety.py b/libs/experimental/langchain_experimental/comprehend_moderation/prompt_safety.py deleted file mode 100644 index 3e1f764fbf4ef..0000000000000 --- a/libs/experimental/langchain_experimental/comprehend_moderation/prompt_safety.py +++ /dev/null @@ -1,89 +0,0 @@ -import asyncio -from typing import Any, Optional - -from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ( - ModerationPromptSafetyError, -) - - -class ComprehendPromptSafety: - """Class to handle prompt safety moderation.""" - - def __init__( - self, - client: Any, - callback: Optional[Any] = None, - unique_id: Optional[str] = None, - chain_id: Optional[str] = None, - ) -> None: - self.client = client - self.moderation_beacon = { - "moderation_chain_id": chain_id, - "moderation_type": "PromptSafety", - "moderation_status": "LABELS_NOT_FOUND", - } - self.callback = callback - self.unique_id = unique_id - - def _get_arn(self) -> str: - region_name = self.client.meta.region_name - service = "comprehend" - prompt_safety_endpoint = "document-classifier-endpoint/prompt-safety" - return f"arn:aws:{service}:{region_name}:aws:{prompt_safety_endpoint}" - - def validate(self, prompt_value: str, config: Any = None) -> str: - """ - Check and validate the safety of the given prompt text. - - Args: - prompt_value (str): The input text to be checked for unsafe text. - config (Dict[str, Any]): Configuration settings for prompt safety checks. - - Raises: - ValueError: If unsafe prompt is found in the prompt text based - on the specified threshold. - - Returns: - str: The input prompt_value. - - Note: - This function checks the safety of the provided prompt text using - Comprehend's classify_document API and raises an error if unsafe - text is detected with a score above the specified threshold. - - Example: - comprehend_client = boto3.client('comprehend') - prompt_text = "Please tell me your credit card information." - config = {"threshold": 0.7} - checked_prompt = check_prompt_safety(comprehend_client, prompt_text, config) - """ - - threshold = config.get("threshold") - unsafe_prompt = False - - endpoint_arn = self._get_arn() - response = self.client.classify_document( - Text=prompt_value, EndpointArn=endpoint_arn - ) - - if self.callback and self.callback.prompt_safety_callback: - self.moderation_beacon["moderation_input"] = prompt_value - self.moderation_beacon["moderation_output"] = response - - for class_result in response["Classes"]: - if ( - class_result["Score"] >= threshold - and class_result["Name"] == "UNSAFE_PROMPT" - ): - unsafe_prompt = True - break - - if self.callback and self.callback.intent_callback: - if unsafe_prompt: - self.moderation_beacon["moderation_status"] = "LABELS_FOUND" - asyncio.create_task( - self.callback.on_after_intent(self.moderation_beacon, self.unique_id) - ) - if unsafe_prompt: - raise ModerationPromptSafetyError - return prompt_value diff --git a/libs/experimental/langchain_experimental/comprehend_moderation/toxicity.py b/libs/experimental/langchain_experimental/comprehend_moderation/toxicity.py deleted file mode 100644 index 2e7af07b347a1..0000000000000 --- a/libs/experimental/langchain_experimental/comprehend_moderation/toxicity.py +++ /dev/null @@ -1,167 +0,0 @@ -import asyncio -import importlib -from typing import Any, List, Optional - -from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ( - ModerationToxicityError, -) - - -class ComprehendToxicity: - """Class to handle toxicity moderation.""" - - def __init__( - self, - client: Any, - callback: Optional[Any] = None, - unique_id: Optional[str] = None, - chain_id: Optional[str] = None, - ) -> None: - self.client = client - self.moderation_beacon = { - "moderation_chain_id": chain_id, - "moderation_type": "Toxicity", - "moderation_status": "LABELS_NOT_FOUND", - } - self.callback = callback - self.unique_id = unique_id - - def _toxicity_init_validate(self, max_size: int) -> Any: - """ - Validate and initialize toxicity processing configuration. - - Args: - max_size (int): Maximum sentence size defined in the - configuration object. - - Raises: - Exception: If the maximum sentence size exceeds the 5KB limit. - - Note: - This function ensures that the NLTK punkt tokenizer is downloaded - if not already present. - - Returns: - None - """ - if max_size > 1024 * 5: - raise Exception("The sentence length should not exceed 5KB.") - try: - nltk = importlib.import_module("nltk") - nltk.data.find("tokenizers/punkt") - return nltk - except ImportError: - raise ModuleNotFoundError( - "Could not import nltk python package. " - "Please install it with `pip install nltk`." - ) - except LookupError: - nltk.download("punkt") - - def _split_paragraph( - self, prompt_value: str, max_size: int = 1024 * 4 - ) -> List[List[str]]: - """ - Split a paragraph into chunks of sentences, respecting the maximum size limit. - - Args: - paragraph (str): The input paragraph to be split into chunks. - max_size (int, optional): The maximum size limit in bytes for - each chunk. Defaults to 1024. - - Returns: - List[List[str]]: A list of chunks, where each chunk is a list - of sentences. - - Note: - This function validates the maximum sentence size based on service - limits using the 'toxicity_init_validate' function. It uses the NLTK - sentence tokenizer to split the paragraph into sentences. - - Example: - paragraph = "This is a sample paragraph. It - contains multiple sentences. ..." - chunks = split_paragraph(paragraph, max_size=2048) - """ - - # validate max. sentence size based on Service limits - nltk = self._toxicity_init_validate(max_size) - sentences = nltk.sent_tokenize(prompt_value) - chunks = list() # type: ignore - current_chunk = list() # type: ignore - current_size = 0 - - for sentence in sentences: - sentence_size = len(sentence.encode("utf-8")) - # If adding a new sentence exceeds max_size - # or current_chunk has 10 sentences, start a new chunk - if (current_size + sentence_size > max_size) or (len(current_chunk) >= 10): - if current_chunk: # Avoid appending empty chunks - chunks.append(current_chunk) - current_chunk = [] - current_size = 0 - - current_chunk.append(sentence) - current_size += sentence_size - - # Add any remaining sentences - if current_chunk: - chunks.append(current_chunk) - return chunks - - def validate(self, prompt_value: str, config: Any = None) -> str: - """ - Check the toxicity of a given text prompt using AWS - Comprehend service and apply actions based on configuration. - Args: - prompt_value (str): The text content to be checked for toxicity. - config (Dict[str, Any]): Configuration for toxicity checks and actions. - - Returns: - str: The original prompt_value if allowed or no toxicity found. - - Raises: - ValueError: If the prompt contains toxic labels and cannot be - processed based on the configuration. - """ - - chunks = self._split_paragraph(prompt_value=prompt_value) - for sentence_list in chunks: - segments = [{"Text": sentence} for sentence in sentence_list] - response = self.client.detect_toxic_content( - TextSegments=segments, LanguageCode="en" - ) - if self.callback and self.callback.toxicity_callback: - self.moderation_beacon["moderation_input"] = segments # type: ignore - self.moderation_beacon["moderation_output"] = response - toxicity_found = False - threshold = config.get("threshold") - toxicity_labels = config.get("labels") - - if not toxicity_labels: - for item in response["ResultList"]: - for label in item["Labels"]: - if label["Score"] >= threshold: - toxicity_found = True - break - else: - for item in response["ResultList"]: - for label in item["Labels"]: - if ( - label["Name"] in toxicity_labels - and label["Score"] >= threshold - ): - toxicity_found = True - break - - if self.callback and self.callback.toxicity_callback: - if toxicity_found: - self.moderation_beacon["moderation_status"] = "LABELS_FOUND" - asyncio.create_task( - self.callback.on_after_toxicity( - self.moderation_beacon, self.unique_id - ) - ) - if toxicity_found: - raise ModerationToxicityError - return prompt_value diff --git a/libs/experimental/langchain_experimental/cpal/README.md b/libs/experimental/langchain_experimental/cpal/README.md deleted file mode 100644 index 0e9a18b896e78..0000000000000 --- a/libs/experimental/langchain_experimental/cpal/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Causal program-aided language (CPAL) chain - - -see https://github.com/langchain-ai/langchain/pull/6255 diff --git a/libs/experimental/langchain_experimental/cpal/__init__.py b/libs/experimental/langchain_experimental/cpal/__init__.py deleted file mode 100644 index 5bbca5495935d..0000000000000 --- a/libs/experimental/langchain_experimental/cpal/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -""" -**Causal program-aided language (CPAL)** is a concept implemented in LangChain as -a chain for causal modeling and narrative decomposition. - -CPAL improves upon the program-aided language (**PAL**) by incorporating -causal structure to prevent hallucination in language models, -particularly when dealing with complex narratives and math -problems with nested dependencies. - -CPAL involves translating causal narratives into a stack of operations, -setting hypothetical conditions for causal models, and decomposing -narratives into story elements. - -It allows for the creation of causal chains that define the relationships -between different elements in a narrative, enabling the modeling and analysis -of causal relationships within a given context. -""" diff --git a/libs/experimental/langchain_experimental/cpal/base.py b/libs/experimental/langchain_experimental/cpal/base.py deleted file mode 100644 index e475565469742..0000000000000 --- a/libs/experimental/langchain_experimental/cpal/base.py +++ /dev/null @@ -1,303 +0,0 @@ -""" -CPAL Chain and its subchains -""" - -from __future__ import annotations - -import json -from typing import Any, ClassVar, Dict, List, Optional, Type - -import pydantic -from langchain.base_language import BaseLanguageModel -from langchain.chains.base import Chain -from langchain.chains.llm import LLMChain -from langchain.output_parsers import PydanticOutputParser -from langchain_core.callbacks.manager import CallbackManagerForChainRun -from langchain_core.prompts.prompt import PromptTemplate - -from langchain_experimental.cpal.constants import Constant -from langchain_experimental.cpal.models import ( - CausalModel, - InterventionModel, - NarrativeModel, - QueryModel, - StoryModel, -) -from langchain_experimental.cpal.templates.univariate.causal import ( - template as causal_template, -) -from langchain_experimental.cpal.templates.univariate.intervention import ( - template as intervention_template, -) -from langchain_experimental.cpal.templates.univariate.narrative import ( - template as narrative_template, -) -from langchain_experimental.cpal.templates.univariate.query import ( - template as query_template, -) - - -class _BaseStoryElementChain(Chain): - chain: LLMChain - input_key: str = Constant.narrative_input.value #: :meta private: - output_key: str = Constant.chain_answer.value #: :meta private: - pydantic_model: ClassVar[Optional[Type[pydantic.BaseModel]]] = ( - None #: :meta private: - ) - template: ClassVar[Optional[str]] = None #: :meta private: - - @classmethod - def parser(cls) -> PydanticOutputParser: - """Parse LLM output into a pydantic object.""" - if cls.pydantic_model is None: - raise NotImplementedError( - f"pydantic_model not implemented for {cls.__name__}" - ) - return PydanticOutputParser(pydantic_object=cls.pydantic_model) - - @property - def input_keys(self) -> List[str]: - """Return the input keys. - - :meta private: - """ - return [self.input_key] - - @property - def output_keys(self) -> List[str]: - """Return the output keys. - - :meta private: - """ - _output_keys = [self.output_key] - return _output_keys - - @classmethod - def from_univariate_prompt( - cls, - llm: BaseLanguageModel, - **kwargs: Any, - ) -> Any: - return cls( - chain=LLMChain( - llm=llm, - prompt=PromptTemplate( - input_variables=[Constant.narrative_input.value], - template=kwargs.get("template", cls.template), - partial_variables={ - "format_instructions": cls.parser().get_format_instructions() - }, - ), - ), - **kwargs, - ) - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, Any]: - completion = self.chain.run(inputs[self.input_key]) - pydantic_data = self.__class__.parser().parse(completion) - return { - Constant.chain_data.value: pydantic_data, - Constant.chain_answer.value: None, - } - - -class NarrativeChain(_BaseStoryElementChain): - """Decompose the narrative into its story elements. - - - causal model - - query - - intervention - """ - - pydantic_model: ClassVar[Type[pydantic.BaseModel]] = NarrativeModel - template: ClassVar[str] = narrative_template - - -class CausalChain(_BaseStoryElementChain): - """Translate the causal narrative into a stack of operations.""" - - pydantic_model: ClassVar[Type[pydantic.BaseModel]] = CausalModel - template: ClassVar[str] = causal_template - - -class InterventionChain(_BaseStoryElementChain): - """Set the hypothetical conditions for the causal model.""" - - pydantic_model: ClassVar[Type[pydantic.BaseModel]] = InterventionModel - template: ClassVar[str] = intervention_template - - -class QueryChain(_BaseStoryElementChain): - """Query the outcome table using SQL. - - *Security note*: This class implements an AI technique that generates SQL code. - If those SQL commands are executed, it's critical to ensure they use credentials - that are narrowly-scoped to only include the permissions this chain needs. - Failure to do so may result in data corruption or loss, since this chain may - attempt commands like `DROP TABLE` or `INSERT` if appropriately prompted. - The best way to guard against such negative outcomes is to (as appropriate) - limit the permissions granted to the credentials used with this chain. - """ - - pydantic_model: ClassVar[Type[pydantic.BaseModel]] = QueryModel - template: ClassVar[str] = query_template # TODO: incl. table schema - - -class CPALChain(_BaseStoryElementChain): - """Causal program-aided language (CPAL) chain implementation. - - *Security note*: The building blocks of this class include the implementation - of an AI technique that generates SQL code. If those SQL commands - are executed, it's critical to ensure they use credentials that - are narrowly-scoped to only include the permissions this chain needs. - Failure to do so may result in data corruption or loss, since this chain may - attempt commands like `DROP TABLE` or `INSERT` if appropriately prompted. - The best way to guard against such negative outcomes is to (as appropriate) - limit the permissions granted to the credentials used with this chain. - """ - - llm: BaseLanguageModel - narrative_chain: Optional[NarrativeChain] = None - causal_chain: Optional[CausalChain] = None - intervention_chain: Optional[InterventionChain] = None - query_chain: Optional[QueryChain] = None - _story: StoryModel = pydantic.PrivateAttr(default=None) # TODO: change name ? - - @classmethod - def from_univariate_prompt( - cls, - llm: BaseLanguageModel, - **kwargs: Any, - ) -> CPALChain: - """instantiation depends on component chains - - *Security note*: The building blocks of this class include the implementation - of an AI technique that generates SQL code. If those SQL commands - are executed, it's critical to ensure they use credentials that - are narrowly-scoped to only include the permissions this chain needs. - Failure to do so may result in data corruption or loss, since this chain may - attempt commands like `DROP TABLE` or `INSERT` if appropriately prompted. - The best way to guard against such negative outcomes is to (as appropriate) - limit the permissions granted to the credentials used with this chain. - """ - return cls( - llm=llm, - chain=LLMChain( - llm=llm, - prompt=PromptTemplate( - input_variables=["question", "query_result"], - template=( - "Summarize this answer '{query_result}' to this " - "question '{question}'? " - ), - ), - ), - narrative_chain=NarrativeChain.from_univariate_prompt(llm=llm), - causal_chain=CausalChain.from_univariate_prompt(llm=llm), - intervention_chain=InterventionChain.from_univariate_prompt(llm=llm), - query_chain=QueryChain.from_univariate_prompt(llm=llm), - **kwargs, - ) - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - **kwargs: Any, - ) -> Dict[str, Any]: - # instantiate component chains - if self.narrative_chain is None: - self.narrative_chain = NarrativeChain.from_univariate_prompt(llm=self.llm) - if self.causal_chain is None: - self.causal_chain = CausalChain.from_univariate_prompt(llm=self.llm) - if self.intervention_chain is None: - self.intervention_chain = InterventionChain.from_univariate_prompt( - llm=self.llm - ) - if self.query_chain is None: - self.query_chain = QueryChain.from_univariate_prompt(llm=self.llm) - - # decompose narrative into three causal story elements - narrative = self.narrative_chain(inputs[Constant.narrative_input.value])[ - Constant.chain_data.value - ] - - story = StoryModel( - causal_operations=self.causal_chain(narrative.story_plot)[ - Constant.chain_data.value - ], - intervention=self.intervention_chain(narrative.story_hypothetical)[ - Constant.chain_data.value - ], - query=self.query_chain(narrative.story_outcome_question)[ - Constant.chain_data.value - ], - ) - self._story = story - - def pretty_print_str(title: str, d: str) -> str: - return title + "\n" + d - - _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() - _run_manager.on_text( - pretty_print_str("story outcome data", story._outcome_table.to_string()), - color="green", - end="\n\n", - verbose=self.verbose, - ) - - def pretty_print_dict(title: str, d: dict) -> str: - return title + "\n" + json.dumps(d, indent=4) - - _run_manager.on_text( - pretty_print_dict("query data", story.query.dict()), - color="blue", - end="\n\n", - verbose=self.verbose, - ) - if story.query._result_table.empty: - # prevent piping bad data into subsequent chains - raise ValueError( - ( - "unanswerable, query and outcome are incoherent\n" - "\n" - "outcome:\n" - f"{story._outcome_table}\n" - "query:\n" - f"{story.query.dict()}" - ) - ) - else: - query_result = float(story.query._result_table.values[0][-1]) - if False: - """TODO: add this back in when demanded by composable chains""" - reporting_chain = self.chain - human_report = reporting_chain.run( - question=story.query.question, query_result=query_result - ) - query_result = { - "query_result": query_result, - "human_report": human_report, - } - output = { - Constant.chain_data.value: story, - self.output_key: query_result, - **kwargs, - } - return output - - def draw(self, **kwargs: Any) -> None: - """ - CPAL chain can draw its resulting DAG. - - Usage in a jupyter notebook: - - >>> from IPython.display import SVG - >>> cpal_chain.draw(path="graph.svg") - >>> SVG('graph.svg') - """ - self._story._networkx_wrapper.draw_graphviz(**kwargs) diff --git a/libs/experimental/langchain_experimental/cpal/constants.py b/libs/experimental/langchain_experimental/cpal/constants.py deleted file mode 100644 index 8d51af705b5a1..0000000000000 --- a/libs/experimental/langchain_experimental/cpal/constants.py +++ /dev/null @@ -1,9 +0,0 @@ -from enum import Enum - - -class Constant(Enum): - """Enum for constants used in the CPAL.""" - - narrative_input = "narrative_input" - chain_answer = "chain_answer" # natural language answer - chain_data = "chain_data" # pydantic instance diff --git a/libs/experimental/langchain_experimental/cpal/models.py b/libs/experimental/langchain_experimental/cpal/models.py deleted file mode 100644 index 485bd928a9302..0000000000000 --- a/libs/experimental/langchain_experimental/cpal/models.py +++ /dev/null @@ -1,279 +0,0 @@ -from __future__ import annotations # allows pydantic model to reference itself - -import re -from typing import Any, List, Optional, Union - -from langchain_community.graphs.networkx_graph import NetworkxEntityGraph -from pydantic import ( - BaseModel, - ConfigDict, - Field, - PrivateAttr, - field_validator, - model_validator, -) - -from langchain_experimental.cpal.constants import Constant - - -class NarrativeModel(BaseModel): - """ - Narrative input as three story elements. - """ - - story_outcome_question: str - story_hypothetical: str - story_plot: str # causal stack of operations - - @field_validator("*", mode="before") - def empty_str_to_none(cls, v: str) -> Union[str, None]: - """Empty strings are not allowed""" - if v == "": - return None - return v - - -class EntityModel(BaseModel): - """Entity in the story.""" - - name: str = Field(description="entity name") - code: str = Field(description="entity actions") - value: float = Field(description="entity initial value") - depends_on: List[str] = Field(default=[], description="ancestor entities") - - # TODO: generalize to multivariate math - # TODO: acyclic graph - - model_config = ConfigDict( - validate_assignment=True, - ) - - @field_validator("name") - def lower_case_name(cls, v: str) -> str: - v = v.lower() - return v - - -class CausalModel(BaseModel): - """Casual data.""" - - attribute: str = Field(description="name of the attribute to be calculated") - entities: List[EntityModel] = Field(description="entities in the story") - - # TODO: root validate each `entity.depends_on` using system's entity names - - -class EntitySettingModel(BaseModel): - """Entity initial conditions. - - Initial conditions for an entity - - {"name": "bud", "attribute": "pet_count", "value": 12} - """ - - name: str = Field(description="name of the entity") - attribute: str = Field(description="name of the attribute to be calculated") - value: float = Field(description="entity's attribute value (calculated)") - - @field_validator("name") - def lower_case_transform(cls, v: str) -> str: - v = v.lower() - return v - - -class SystemSettingModel(BaseModel): - """System initial conditions. - - Initial global conditions for the system. - - {"parameter": "interest_rate", "value": .05} - """ - - parameter: str - value: float - - -class InterventionModel(BaseModel): - """Intervention data of the story aka initial conditions. - - >>> intervention.dict() - { - entity_settings: [ - {"name": "bud", "attribute": "pet_count", "value": 12}, - {"name": "pat", "attribute": "pet_count", "value": 0}, - ], - system_settings: None, - } - """ - - entity_settings: List[EntitySettingModel] - system_settings: Optional[List[SystemSettingModel]] = None - - @field_validator("system_settings") - def lower_case_name(cls, v: str) -> Union[str, None]: - if v is not None: - raise NotImplementedError("system_setting is not implemented yet") - return v - - -class QueryModel(BaseModel): - """Query data of the story. - - translate a question about the story outcome into a programmatic expression""" - - question: str = Field( # type: ignore[literal-required] - alias=Constant.narrative_input.value - ) # input # type: ignore[literal-required] - expression: str # output, part of llm completion - llm_error_msg: str # output, part of llm completion - _result_table: str = PrivateAttr() # result of the executed query - - -class ResultModel(BaseModel): - """Result of the story query.""" - - question: str = Field( # type: ignore[literal-required] - alias=Constant.narrative_input.value - ) # input # type: ignore[literal-required] - _result_table: str = PrivateAttr() # result of the executed query - - -class StoryModel(BaseModel): - """Story data.""" - - causal_operations: Any = Field() - intervention: Any = Field() - query: Any = Field() - _outcome_table: Any = PrivateAttr(default=None) - _networkx_wrapper: Any = PrivateAttr(default=None) - - def __init__(self, **kwargs: Any): - super().__init__(**kwargs) - self._compute() - - # TODO: when langchain adopts pydantic.v2 replace w/ `__post_init__` - # misses hints github.com/pydantic/pydantic/issues/1729#issuecomment-1300576214 - - @model_validator(mode="before") - @classmethod - def check_intervention_is_valid(cls, values: dict) -> Any: - valid_names = [e.name for e in values["causal_operations"].entities] - for setting in values["intervention"].entity_settings: - if setting.name not in valid_names: - error_msg = f""" - Hypothetical question has an invalid entity name. - `{setting.name}` not in `{valid_names}` - """ - raise ValueError(error_msg) - return values - - def _block_back_door_paths(self) -> None: - # stop intervention entities from depending on others - intervention_entities = [ - entity_setting.name for entity_setting in self.intervention.entity_settings - ] - for entity in self.causal_operations.entities: - if entity.name in intervention_entities: - entity.depends_on = [] - entity.code = "pass" - - def _set_initial_conditions(self) -> None: - for entity_setting in self.intervention.entity_settings: - for entity in self.causal_operations.entities: - if entity.name == entity_setting.name: - entity.value = entity_setting.value - - def _make_graph(self) -> None: - self._networkx_wrapper = NetworkxEntityGraph() - for entity in self.causal_operations.entities: - for parent_name in entity.depends_on: - self._networkx_wrapper._graph.add_edge( - parent_name, entity.name, relation=entity.code - ) - - # TODO: is it correct to drop entities with no impact on the outcome (?) - self.causal_operations.entities = [ - entity - for entity in self.causal_operations.entities - if entity.name in self._networkx_wrapper.get_topological_sort() - ] - - def _sort_entities(self) -> None: - # order the sequence of causal actions - sorted_nodes = self._networkx_wrapper.get_topological_sort() - self.causal_operations.entities.sort(key=lambda x: sorted_nodes.index(x.name)) - - def _forward_propagate(self) -> None: - try: - import pandas as pd - except ImportError as e: - raise ImportError( - "Unable to import pandas, please install with `pip install pandas`." - ) from e - entity_scope = { - entity.name: entity for entity in self.causal_operations.entities - } - for entity in self.causal_operations.entities: - if entity.code == "pass": - continue - else: - # gist.github.com/dean0x7d/df5ce97e4a1a05be4d56d1378726ff92 - exec(entity.code, globals(), entity_scope) - row_values = [entity.dict() for entity in entity_scope.values()] - self._outcome_table = pd.DataFrame(row_values) - - def _run_query(self) -> None: - def humanize_sql_error_msg(error: str) -> str: - pattern = r"column\s+(.*?)\s+not found" - col_match = re.search(pattern, error) - if col_match: - return ( - "SQL error: " - + col_match.group(1) - + " is not an attribute in your story!" - ) - else: - return str(error) - - if self.query.llm_error_msg == "": - try: - import duckdb - - df = self._outcome_table # noqa - query_result = duckdb.sql(self.query.expression).df() - self.query._result_table = query_result - except duckdb.BinderException as e: - self.query._result_table = humanize_sql_error_msg(str(e)) - except ImportError as e: - raise ImportError( - "Unable to import duckdb, please install with `pip install duckdb`." - ) from e - except Exception as e: - self.query._result_table = str(e) - else: - msg = "LLM maybe failed to translate question to SQL query." - raise ValueError( - { - "question": self.query.question, - "llm_error_msg": self.query.llm_error_msg, - "msg": msg, - } - ) - - def _compute(self) -> Any: - self._block_back_door_paths() - self._set_initial_conditions() - self._make_graph() - self._sort_entities() - self._forward_propagate() - self._run_query() - - def print_debug_report(self) -> None: - report = { - "outcome": self._outcome_table, - "query": self.query.dict(), - "result": self.query._result_table, - } - from pprint import pprint - - pprint(report) diff --git a/libs/experimental/langchain_experimental/cpal/templates/__init__.py b/libs/experimental/langchain_experimental/cpal/templates/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/langchain_experimental/cpal/templates/univariate/__init__.py b/libs/experimental/langchain_experimental/cpal/templates/univariate/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/langchain_experimental/cpal/templates/univariate/causal.py b/libs/experimental/langchain_experimental/cpal/templates/univariate/causal.py deleted file mode 100644 index 074249a051c80..0000000000000 --- a/libs/experimental/langchain_experimental/cpal/templates/univariate/causal.py +++ /dev/null @@ -1,113 +0,0 @@ -# ruff: noqa: E501 - -# fmt: off -template = ( - """ -Transform the math story plot into a JSON object. Don't guess at any of the parts. - -{format_instructions} - - - -Story: Boris has seven times the number of pets as Marcia. Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. - - - -# JSON: - - - -{{ - "attribute": "pet_count", - "entities": [ - {{ - "name": "cindy", - "value": 0, - "depends_on": [], - "code": "pass" - }}, - {{ - "name": "marcia", - "value": 0, - "depends_on": ["cindy"], - "code": "marcia.value = cindy.value + 2" - }}, - {{ - "name": "boris", - "value": 0, - "depends_on": ["marcia"], - "code": "boris.value = marcia.value * 7" - }}, - {{ - "name": "jan", - "value": 0, - "depends_on": ["marcia"], - "code": "jan.value = marcia.value * 3" - }} - ] -}} - - - - -Story: Boris gives 20 percent of his money to Marcia. Marcia gives 10 -percent of her money to Cindy. Cindy gives 5 percent of her money to Jan. - - - - -# JSON: - - - -{{ - "attribute": "money", - "entities": [ - {{ - "name": "boris", - "value": 0, - "depends_on": [], - "code": "pass" - }}, - {{ - "name": "marcia", - "value": 0, - "depends_on": ["boris"], - "code": " - marcia.value = boris.value * 0.2 - boris.value = boris.value * 0.8 - " - }}, - {{ - "name": "cindy", - "value": 0, - "depends_on": ["marcia"], - "code": " - cindy.value = marcia.value * 0.1 - marcia.value = marcia.value * 0.9 - " - }}, - {{ - "name": "jan", - "value": 0, - "depends_on": ["cindy"], - "code": " - jan.value = cindy.value * 0.05 - cindy.value = cindy.value * 0.9 - " - }} - ] -}} - - - - -Story: {narrative_input} - - - -# JSON: -""".strip() - + "\n" -) -# fmt: on diff --git a/libs/experimental/langchain_experimental/cpal/templates/univariate/intervention.py b/libs/experimental/langchain_experimental/cpal/templates/univariate/intervention.py deleted file mode 100644 index 6eceadd9fe19a..0000000000000 --- a/libs/experimental/langchain_experimental/cpal/templates/univariate/intervention.py +++ /dev/null @@ -1,59 +0,0 @@ -# ruff: noqa: E501 - -# fmt: off -template = ( - """ -Transform the hypothetical whatif statement into JSON. Don't guess at any of the parts. Write NONE if you are unsure. - -{format_instructions} - - - -statement: if cindy's pet count was 4 - - - - -# JSON: - - - -{{ - "entity_settings" : [ - {{ "name": "cindy", "attribute": "pet_count", "value": "4" }} - ] -}} - - - - - -statement: Let's say boris has ten dollars and Bill has 20 dollars. - - - - -# JSON: - - -{{ - "entity_settings" : [ - {{ "name": "boris", "attribute": "dollars", "value": "10" }}, - {{ "name": "bill", "attribute": "dollars", "value": "20" }} - ] -}} - - - - - -Statement: {narrative_input} - - - - -# JSON: -""".strip() - + "\n\n\n" -) -# fmt: on diff --git a/libs/experimental/langchain_experimental/cpal/templates/univariate/narrative.py b/libs/experimental/langchain_experimental/cpal/templates/univariate/narrative.py deleted file mode 100644 index 1f9ebe9bea2f3..0000000000000 --- a/libs/experimental/langchain_experimental/cpal/templates/univariate/narrative.py +++ /dev/null @@ -1,79 +0,0 @@ -# ruff: noqa: E501 - - -# fmt: off -template = ( - """ -Split the given text into three parts: the question, the story_hypothetical, and the logic. Don't guess at any of the parts. Write NONE if you are unsure. - -{format_instructions} - - - -Q: Boris has seven times the number of pets as Marcia. Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy. If Cindy has four pets, how many total pets do the three have? - - - -# JSON - - - -{{ - "story_outcome_question": "how many total pets do the three have?", - "story_hypothetical": "If Cindy has four pets", - "story_plot": "Boris has seven times the number of pets as Marcia. Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy." -}} - - - -Q: boris gives ten percent of his money to marcia. marcia gives ten -percent of her money to andy. If boris has 100 dollars, how much money -will andy have? - - - -# JSON - - - -{{ - "story_outcome_question": "how much money will andy have?", - "story_hypothetical": "If boris has 100 dollars" - "story_plot": "boris gives ten percent of his money to marcia. marcia gives ten percent of her money to andy." -}} - - - - -Q: boris gives ten percent of his candy to marcia. marcia gives ten -percent of her candy to andy. If boris has 100 pounds of candy and marcia has -200 pounds of candy, then how many pounds of candy will andy have? - - - - - -# JSON - - - - -{{ - "story_outcome_question": "how many pounds of candy will andy have?", - "story_hypothetical": "If boris has 100 pounds of candy and marcia has 200 pounds of candy" - "story_plot": "boris gives ten percent of his candy to marcia. marcia gives ten percent of her candy to andy." -}} - - - - - -Q: {narrative_input} - - - -# JSON -""".strip() - + "\n\n\n" -) -# fmt: on diff --git a/libs/experimental/langchain_experimental/cpal/templates/univariate/query.py b/libs/experimental/langchain_experimental/cpal/templates/univariate/query.py deleted file mode 100644 index 87cc86f34a197..0000000000000 --- a/libs/experimental/langchain_experimental/cpal/templates/univariate/query.py +++ /dev/null @@ -1,270 +0,0 @@ -# ruff: noqa: E501 - - -# fmt: off -template = ( - """ -Transform the narrative_input into an SQL expression. If you are -unsure, then do not guess, instead add a llm_error_msg that explains why you are unsure. - - -{format_instructions} - - -narrative_input: how much money will boris have? - - -# JSON: - - {{ - "narrative_input": "how much money will boris have?", - "llm_error_msg": "", - "expression": "SELECT name, value FROM df WHERE name = 'boris'" - }} - - - -narrative_input: How much money does ted have? - - - -# JSON: - - {{ - "narrative_input": "How much money does ted have?", - "llm_error_msg": "", - "expression": "SELECT name, value FROM df WHERE name = 'ted'" - }} - - - -narrative_input: what is the sum of pet count for all the people? - - - -# JSON: - - {{ - "narrative_input": "what is the sum of pet count for all the people?", - "llm_error_msg": "", - "expression": "SELECT SUM(value) FROM df" - }} - - - - -narrative_input: what's the average of the pet counts for all the people? - - - -# JSON: - - {{ - "narrative_input": "what's the average of the pet counts for all the people?", - "llm_error_msg": "", - "expression": "SELECT AVG(value) FROM df" - }} - - - - -narrative_input: what's the maximum of the pet counts for all the people? - - - -# JSON: - - {{ - "narrative_input": "what's the maximum of the pet counts for all the people?", - "llm_error_msg": "", - "expression": "SELECT MAX(value) FROM df" - }} - - - - -narrative_input: what's the minimum of the pet counts for all the people? - - - -# JSON: - - {{ - "narrative_input": "what's the minimum of the pet counts for all the people?", - "llm_error_msg": "", - "expression": "SELECT MIN(value) FROM df" - }} - - - - -narrative_input: what's the number of people with pet counts greater than 10? - - - -# JSON: - - {{ - "narrative_input": "what's the number of people with pet counts greater than 10?", - "llm_error_msg": "", - "expression": "SELECT COUNT(*) FROM df WHERE value > 10" - }} - - - - -narrative_input: what's the pet count for boris? - - - -# JSON: - - {{ - "narrative_input": "what's the pet count for boris?", - "llm_error_msg": "", - "expression": "SELECT name, value FROM df WHERE name = 'boris'" - }} - - - - -narrative_input: what's the pet count for cindy and marcia? - - - -# JSON: - - {{ - "narrative_input": "what's the pet count for cindy and marcia?", - "llm_error_msg": "", - "expression": "SELECT name, value FROM df WHERE name IN ('cindy', 'marcia')" - }} - - - - -narrative_input: what's the total pet count for cindy and marcia? - - - -# JSON: - - {{ - "narrative_input": "what's the total pet count for cindy and marcia?", - "llm_error_msg": "", - "expression": "SELECT SUM(value) FROM df WHERE name IN ('cindy', 'marcia')" - }} - - - - -narrative_input: what's the total pet count for TED? - - - -# JSON: - - {{ - "narrative_input": "what's the total pet count for TED?", - "llm_error_msg": "", - "expression": "SELECT SUM(value) FROM df WHERE name = 'TED'" - }} - - - - - -narrative_input: what's the total dollar count for TED and cindy? - - - -# JSON: - - {{ - "narrative_input": "what's the total dollar count for TED and cindy?", - "llm_error_msg": "", - "expression": "SELECT SUM(value) FROM df WHERE name IN ('TED', 'cindy')" - }} - - - - -narrative_input: what's the total pet count for TED and cindy? - - - - -# JSON: - - {{ - "narrative_input": "what's the total pet count for TED and cindy?", - "llm_error_msg": "", - "expression": "SELECT SUM(value) FROM df WHERE name IN ('TED', 'cindy')" - }} - - - - -narrative_input: what's the best for TED and cindy? - - - - -# JSON: - - {{ - "narrative_input": "what's the best for TED and cindy?", - "llm_error_msg": "ambiguous narrative_input, not sure what 'best' means", - "expression": "" - }} - - - - -narrative_input: what's the value? - - - - -# JSON: - - {{ - "narrative_input": "what's the value?", - "llm_error_msg": "ambiguous narrative_input, not sure what entity is being asked about", - "expression": "" - }} - - - - - - -narrative_input: how many total pets do the three have? - - - - - -# JSON: - - {{ - "narrative_input": "how many total pets do the three have?", - "llm_error_msg": "", - "expression": "SELECT SUM(value) FROM df" - }} - - - - - - -narrative_input: {narrative_input} - - - - -# JSON: -""".strip() - + "\n\n\n" -) -# fmt: on diff --git a/libs/experimental/langchain_experimental/data_anonymizer/__init__.py b/libs/experimental/langchain_experimental/data_anonymizer/__init__.py deleted file mode 100644 index 18878098fccb1..0000000000000 --- a/libs/experimental/langchain_experimental/data_anonymizer/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -"""**Data anonymizer** contains both Anonymizers and Deanonymizers. -It uses the [Microsoft Presidio](https://microsoft.github.io/presidio/) library. - -**Anonymizers** are used to replace a `Personally Identifiable Information (PII)` -entity text with some other -value by applying a certain operator (e.g. replace, mask, redact, encrypt). - -**Deanonymizers** are used to revert the anonymization operation -(e.g. to decrypt an encrypted text). -""" - -from langchain_experimental.data_anonymizer.presidio import ( - PresidioAnonymizer, - PresidioReversibleAnonymizer, -) - -__all__ = ["PresidioAnonymizer", "PresidioReversibleAnonymizer"] diff --git a/libs/experimental/langchain_experimental/data_anonymizer/base.py b/libs/experimental/langchain_experimental/data_anonymizer/base.py deleted file mode 100644 index 85282dd3a7f73..0000000000000 --- a/libs/experimental/langchain_experimental/data_anonymizer/base.py +++ /dev/null @@ -1,61 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Callable, List, Optional - -from langchain_experimental.data_anonymizer.deanonymizer_mapping import MappingDataType -from langchain_experimental.data_anonymizer.deanonymizer_matching_strategies import ( - exact_matching_strategy, -) - -DEFAULT_DEANONYMIZER_MATCHING_STRATEGY = exact_matching_strategy - - -class AnonymizerBase(ABC): - """Base abstract class for anonymizers. - - It is public and non-virtual because it allows - wrapping the behavior for all methods in a base class. - """ - - def anonymize( - self, - text: str, - language: Optional[str] = None, - allow_list: Optional[List[str]] = None, - ) -> str: - """Anonymize text.""" - - return self._anonymize(text, language, allow_list) - - @abstractmethod - def _anonymize( - self, text: str, language: Optional[str], allow_list: Optional[List[str]] = None - ) -> str: - """Abstract method to anonymize text""" - - -class ReversibleAnonymizerBase(AnonymizerBase): - """ - Base abstract class for reversible anonymizers. - """ - - def deanonymize( - self, - text_to_deanonymize: str, - deanonymizer_matching_strategy: Callable[ - [str, MappingDataType], str - ] = DEFAULT_DEANONYMIZER_MATCHING_STRATEGY, - ) -> str: - """Deanonymize text""" - return self._deanonymize(text_to_deanonymize, deanonymizer_matching_strategy) - - @abstractmethod - def _deanonymize( - self, - text_to_deanonymize: str, - deanonymizer_matching_strategy: Callable[[str, MappingDataType], str], - ) -> str: - """Abstract method to deanonymize text""" - - @abstractmethod - def reset_deanonymizer_mapping(self) -> None: - """Abstract method to reset deanonymizer mapping""" diff --git a/libs/experimental/langchain_experimental/data_anonymizer/deanonymizer_mapping.py b/libs/experimental/langchain_experimental/data_anonymizer/deanonymizer_mapping.py deleted file mode 100644 index b19d654654a7c..0000000000000 --- a/libs/experimental/langchain_experimental/data_anonymizer/deanonymizer_mapping.py +++ /dev/null @@ -1,144 +0,0 @@ -import re -from collections import defaultdict -from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Dict, List - -if TYPE_CHECKING: - from presidio_analyzer import RecognizerResult - from presidio_anonymizer.entities import EngineResult - -MappingDataType = Dict[str, Dict[str, str]] - - -def format_duplicated_operator(operator_name: str, count: int) -> str: - """Format the operator name with the count.""" - - clean_operator_name = re.sub(r"[<>]", "", operator_name) - clean_operator_name = re.sub(r"_\d+$", "", clean_operator_name) - - if operator_name.startswith("<") and operator_name.endswith(">"): - return f"<{clean_operator_name}_{count}>" - else: - return f"{clean_operator_name}_{count}" - - -@dataclass -class DeanonymizerMapping: - """Deanonymizer mapping.""" - - mapping: MappingDataType = field( - default_factory=lambda: defaultdict(lambda: defaultdict(str)) - ) - - @property - def data(self) -> MappingDataType: - """Return the deanonymizer mapping.""" - return {k: dict(v) for k, v in self.mapping.items()} - - def update(self, new_mapping: MappingDataType) -> None: - """Update the deanonymizer mapping with new values. - - Duplicated values will not be added - If there are multiple entities of the same type, the mapping will - include a count to differentiate them. For example, if there are - two names in the input text, the mapping will include NAME_1 and NAME_2. - """ - seen_values = set() - - for entity_type, values in new_mapping.items(): - count = len(self.mapping[entity_type]) + 1 - - for key, value in values.items(): - if ( - value not in seen_values - and value not in self.mapping[entity_type].values() - ): - new_key = ( - format_duplicated_operator(key, count) - if key in self.mapping[entity_type] - else key - ) - - self.mapping[entity_type][new_key] = value - seen_values.add(value) - count += 1 - - -def create_anonymizer_mapping( - original_text: str, - analyzer_results: List["RecognizerResult"], - anonymizer_results: "EngineResult", - is_reversed: bool = False, -) -> MappingDataType: - """Create or update the mapping used to anonymize and/or - deanonymize a text. - - This method exploits the results returned by the - analysis and anonymization processes. - - If is_reversed is True, it constructs a mapping from each original - entity to its anonymized value. - - If is_reversed is False, it constructs a mapping from each - anonymized entity back to its original text value. - - If there are multiple entities of the same type, the mapping will - include a count to differentiate them. For example, if there are - two names in the input text, the mapping will include NAME_1 and NAME_2. - - Example of mapping: - { - "PERSON": { - "": "", - "John Doe": "Slim Shady" - }, - "PHONE_NUMBER": { - "111-111-1111": "555-555-5555" - } - ... - } - """ - # We are able to zip and loop through both lists because we expect - # them to return corresponding entities for each identified piece - # of analyzable data from our input. - - # We sort them by their 'start' attribute because it allows us to - # match corresponding entities by their position in the input text. - analyzer_results.sort(key=lambda d: d.start) - anonymizer_results.items.sort(key=lambda d: d.start) - - mapping: MappingDataType = defaultdict(dict) - count: dict = defaultdict(int) - - for analyzed, anonymized in zip(analyzer_results, anonymizer_results.items): - original_value = original_text[analyzed.start : analyzed.end] - entity_type = anonymized.entity_type - - if is_reversed: - cond = original_value in mapping[entity_type].values() - else: - cond = original_value in mapping[entity_type] - - if cond: - continue - - if ( - anonymized.text in mapping[entity_type].values() - or anonymized.text in mapping[entity_type] - ): - anonymized_value = format_duplicated_operator( - anonymized.text, count[entity_type] + 2 - ) - count[entity_type] += 1 - else: - anonymized_value = anonymized.text - - mapping_key, mapping_value = ( - (anonymized_value, original_value) - if is_reversed - else (original_value, anonymized_value) - ) - - mapping[entity_type][mapping_key] = mapping_value - - return mapping diff --git a/libs/experimental/langchain_experimental/data_anonymizer/deanonymizer_matching_strategies.py b/libs/experimental/langchain_experimental/data_anonymizer/deanonymizer_matching_strategies.py deleted file mode 100644 index 11bb9aca4aaa6..0000000000000 --- a/libs/experimental/langchain_experimental/data_anonymizer/deanonymizer_matching_strategies.py +++ /dev/null @@ -1,185 +0,0 @@ -import re -from typing import List - -from langchain_experimental.data_anonymizer.deanonymizer_mapping import MappingDataType - - -def exact_matching_strategy(text: str, deanonymizer_mapping: MappingDataType) -> str: - """Exact matching strategy for deanonymization. - - It replaces all the anonymized entities with the original ones. - - Args: - text: text to deanonymize - deanonymizer_mapping: mapping between anonymized entities and original ones""" - - # Iterate over all the entities (PERSON, EMAIL_ADDRESS, etc.) - for entity_type in deanonymizer_mapping: - for anonymized, original in deanonymizer_mapping[entity_type].items(): - text = text.replace(anonymized, original) - return text - - -def case_insensitive_matching_strategy( - text: str, deanonymizer_mapping: MappingDataType -) -> str: - """Case insensitive matching strategy for deanonymization. - - It replaces all the anonymized entities with the original ones - irrespective of their letter case. - - Args: - text: text to deanonymize - deanonymizer_mapping: mapping between anonymized entities and original ones - - Examples of matching: - keanu reeves -> Keanu Reeves - JOHN F. KENNEDY -> John F. Kennedy - """ - - # Iterate over all the entities (PERSON, EMAIL_ADDRESS, etc.) - for entity_type in deanonymizer_mapping: - for anonymized, original in deanonymizer_mapping[entity_type].items(): - # Use regular expressions for case-insensitive matching and replacing - text = re.sub(anonymized, original, text, flags=re.IGNORECASE) - return text - - -def fuzzy_matching_strategy( - text: str, deanonymizer_mapping: MappingDataType, max_l_dist: int = 3 -) -> str: - """Fuzzy matching strategy for deanonymization. - - It uses fuzzy matching to find the position of the anonymized entity in the text. - It replaces all the anonymized entities with the original ones. - - Args: - text: text to deanonymize - deanonymizer_mapping: mapping between anonymized entities and original ones - max_l_dist: maximum Levenshtein distance between the anonymized entity and the - text segment to consider it a match - - Examples of matching: - Kaenu Reves -> Keanu Reeves - John F. Kennedy -> John Kennedy - """ - - try: - from fuzzysearch import find_near_matches - except ImportError as e: - raise ImportError( - "Could not import fuzzysearch, please install with " - "`pip install fuzzysearch`." - ) from e - - for entity_type in deanonymizer_mapping: - for anonymized, original in deanonymizer_mapping[entity_type].items(): - matches = find_near_matches(anonymized, text, max_l_dist=max_l_dist) - new_text = "" - last_end = 0 - for m in matches: - # add the text that isn't part of a match - new_text += text[last_end : m.start] - # add the replacement text - new_text += original - last_end = m.end - # add the remaining text that wasn't part of a match - new_text += text[last_end:] - text = new_text - - return text - - -def combined_exact_fuzzy_matching_strategy( - text: str, deanonymizer_mapping: MappingDataType, max_l_dist: int = 3 -) -> str: - """Combined exact and fuzzy matching strategy for deanonymization. - - It is a RECOMMENDED STRATEGY. - - Args: - text: text to deanonymize - deanonymizer_mapping: mapping between anonymized entities and original ones - max_l_dist: maximum Levenshtein distance between the anonymized entity and the - text segment to consider it a match - - Examples of matching: - Kaenu Reves -> Keanu Reeves - John F. Kennedy -> John Kennedy - """ - text = exact_matching_strategy(text, deanonymizer_mapping) - text = fuzzy_matching_strategy(text, deanonymizer_mapping, max_l_dist) - return text - - -def ngram_fuzzy_matching_strategy( - text: str, - deanonymizer_mapping: MappingDataType, - fuzzy_threshold: int = 85, - use_variable_length: bool = True, -) -> str: - """N-gram fuzzy matching strategy for deanonymization. - - It replaces all the anonymized entities with the original ones. - It uses fuzzy matching to find the position of the anonymized entity in the text. - It generates n-grams of the same length as the anonymized entity from the text and - uses fuzzy matching to find the position of the anonymized entity in the text. - - Args: - text: text to deanonymize - deanonymizer_mapping: mapping between anonymized entities and original ones - fuzzy_threshold: fuzzy matching threshold - use_variable_length: whether to use (n-1, n, n+1)-grams or just n-grams - """ - - def generate_ngrams(words_list: List[str], n: int) -> list: - """Generate n-grams from a list of words""" - return [ - " ".join(words_list[i : i + n]) for i in range(len(words_list) - (n - 1)) - ] - - try: - from fuzzywuzzy import fuzz - except ImportError as e: - raise ImportError( - "Could not import fuzzywuzzy, please install with " - "`pip install fuzzywuzzy`." - ) from e - - text_words = text.split() - replacements = [] - matched_indices: List[int] = [] - - for entity_type in deanonymizer_mapping: - for anonymized, original in deanonymizer_mapping[entity_type].items(): - anonymized_words = anonymized.split() - - if use_variable_length: - gram_lengths = [ - len(anonymized_words) - 1, - len(anonymized_words), - len(anonymized_words) + 1, - ] - else: - gram_lengths = [len(anonymized_words)] - for n in gram_lengths: - if n > 0: # Take only positive values - segments = generate_ngrams(text_words, n) - for i, segment in enumerate(segments): - if ( - fuzz.ratio(anonymized.lower(), segment.lower()) - > fuzzy_threshold - and i not in matched_indices - ): - replacements.append((i, n, original)) - # Add the matched segment indices to the list - matched_indices.extend(range(i, i + n)) - - # Sort replacements by index in reverse order - replacements.sort(key=lambda x: x[0], reverse=True) - - # Apply replacements in reverse order to not affect subsequent indices - for start, length, replacement in replacements: - text_words[start : start + length] = replacement.split() - - return " ".join(text_words) diff --git a/libs/experimental/langchain_experimental/data_anonymizer/faker_presidio_mapping.py b/libs/experimental/langchain_experimental/data_anonymizer/faker_presidio_mapping.py deleted file mode 100644 index e06cccc55ae68..0000000000000 --- a/libs/experimental/langchain_experimental/data_anonymizer/faker_presidio_mapping.py +++ /dev/null @@ -1,62 +0,0 @@ -import string -from typing import Callable, Dict, Optional - - -def get_pseudoanonymizer_mapping(seed: Optional[int] = None) -> Dict[str, Callable]: - """Get a mapping of entities to pseudo anonymize them.""" - - try: - from faker import Faker - except ImportError as e: - raise ImportError( - "Could not import faker, please install it with `pip install Faker`." - ) from e - - fake = Faker() - fake.seed_instance(seed) - - # Listed entities supported by Microsoft Presidio (for now, global and US only) - # Source: https://microsoft.github.io/presidio/supported_entities/ - return { - # Global entities - "PERSON": lambda _: fake.name(), - "EMAIL_ADDRESS": lambda _: fake.email(), - "PHONE_NUMBER": lambda _: fake.phone_number(), - "IBAN_CODE": lambda _: fake.iban(), - "CREDIT_CARD": lambda _: fake.credit_card_number(), - "CRYPTO": lambda _: "bc1" - + "".join( - fake.random_choices(string.ascii_lowercase + string.digits, length=26) - ), - "IP_ADDRESS": lambda _: fake.ipv4_public(), - "LOCATION": lambda _: fake.city(), - "DATE_TIME": lambda _: fake.date(), - "NRP": lambda _: str(fake.random_number(digits=8, fix_len=True)), - "MEDICAL_LICENSE": lambda _: fake.bothify(text="??######").upper(), - "URL": lambda _: fake.url(), - # US-specific entities - "US_BANK_NUMBER": lambda _: fake.bban(), - "US_DRIVER_LICENSE": lambda _: str(fake.random_number(digits=9, fix_len=True)), - "US_ITIN": lambda _: fake.bothify(text="9##-7#-####"), - "US_PASSPORT": lambda _: fake.bothify(text="#####??").upper(), - "US_SSN": lambda _: fake.ssn(), - # UK-specific entities - "UK_NHS": lambda _: str(fake.random_number(digits=10, fix_len=True)), - # Spain-specific entities - "ES_NIF": lambda _: fake.bothify(text="########?").upper(), - # Italy-specific entities - "IT_FISCAL_CODE": lambda _: fake.bothify(text="??????##?##?###?").upper(), - "IT_DRIVER_LICENSE": lambda _: fake.bothify(text="?A#######?").upper(), - "IT_VAT_CODE": lambda _: fake.bothify(text="IT???????????"), - "IT_PASSPORT": lambda _: str(fake.random_number(digits=9, fix_len=True)), - "IT_IDENTITY_CARD": lambda _: lambda _: str( - fake.random_number(digits=7, fix_len=True) - ), - # Singapore-specific entities - "SG_NRIC_FIN": lambda _: fake.bothify(text="????####?").upper(), - # Australia-specific entities - "AU_ABN": lambda _: str(fake.random_number(digits=11, fix_len=True)), - "AU_ACN": lambda _: str(fake.random_number(digits=9, fix_len=True)), - "AU_TFN": lambda _: str(fake.random_number(digits=9, fix_len=True)), - "AU_MEDICARE": lambda _: str(fake.random_number(digits=10, fix_len=True)), - } diff --git a/libs/experimental/langchain_experimental/data_anonymizer/presidio.py b/libs/experimental/langchain_experimental/data_anonymizer/presidio.py deleted file mode 100644 index 032c2a65f1b6b..0000000000000 --- a/libs/experimental/langchain_experimental/data_anonymizer/presidio.py +++ /dev/null @@ -1,468 +0,0 @@ -from __future__ import annotations - -import json -from pathlib import Path -from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Union - -import yaml - -from langchain_experimental.data_anonymizer.base import ( - DEFAULT_DEANONYMIZER_MATCHING_STRATEGY, - AnonymizerBase, - ReversibleAnonymizerBase, -) -from langchain_experimental.data_anonymizer.deanonymizer_mapping import ( - DeanonymizerMapping, - MappingDataType, - create_anonymizer_mapping, -) -from langchain_experimental.data_anonymizer.deanonymizer_matching_strategies import ( - exact_matching_strategy, -) -from langchain_experimental.data_anonymizer.faker_presidio_mapping import ( - get_pseudoanonymizer_mapping, -) - -if TYPE_CHECKING: - from presidio_analyzer import AnalyzerEngine, EntityRecognizer - from presidio_analyzer.nlp_engine import NlpEngineProvider - from presidio_anonymizer import AnonymizerEngine - from presidio_anonymizer.entities import ConflictResolutionStrategy, OperatorConfig - - -def _import_analyzer_engine() -> "AnalyzerEngine": - try: - from presidio_analyzer import AnalyzerEngine - - except ImportError as e: - raise ImportError( - "Could not import presidio_analyzer, please install with " - "`pip install presidio-analyzer`. You will also need to download a " - "spaCy model to use the analyzer, e.g. " - "`python -m spacy download en_core_web_lg`." - ) from e - return AnalyzerEngine - - -def _import_nlp_engine_provider() -> "NlpEngineProvider": - try: - from presidio_analyzer.nlp_engine import NlpEngineProvider - - except ImportError as e: - raise ImportError( - "Could not import presidio_analyzer, please install with " - "`pip install presidio-analyzer`. You will also need to download a " - "spaCy model to use the analyzer, e.g. " - "`python -m spacy download en_core_web_lg`." - ) from e - return NlpEngineProvider - - -def _import_anonymizer_engine() -> "AnonymizerEngine": - try: - from presidio_anonymizer import AnonymizerEngine - except ImportError as e: - raise ImportError( - "Could not import presidio_anonymizer, please install with " - "`pip install presidio-anonymizer`." - ) from e - return AnonymizerEngine - - -def _import_operator_config() -> "OperatorConfig": - try: - from presidio_anonymizer.entities import OperatorConfig - except ImportError as e: - raise ImportError( - "Could not import presidio_anonymizer, please install with " - "`pip install presidio-anonymizer`." - ) from e - return OperatorConfig - - -# Configuring Anonymizer for multiple languages -# Detailed description and examples can be found here: -# langchain/docs/extras/guides/privacy/multi_language_anonymization.ipynb -DEFAULT_LANGUAGES_CONFIG = { - # You can also use Stanza or transformers library. - # See https://microsoft.github.io/presidio/analyzer/customizing_nlp_models/ - "nlp_engine_name": "spacy", - "models": [ - {"lang_code": "en", "model_name": "en_core_web_lg"}, - # {"lang_code": "de", "model_name": "de_core_news_md"}, - # {"lang_code": "es", "model_name": "es_core_news_md"}, - # ... - # List of available models: https://spacy.io/usage/models - ], -} - - -class PresidioAnonymizerBase(AnonymizerBase): - """Base Anonymizer using Microsoft Presidio. - - See more: https://microsoft.github.io/presidio/ - """ - - def __init__( - self, - analyzed_fields: Optional[List[str]] = None, - operators: Optional[Dict[str, OperatorConfig]] = None, - languages_config: Optional[Dict] = None, - add_default_faker_operators: bool = True, - faker_seed: Optional[int] = None, - ): - """ - Args: - analyzed_fields: List of fields to detect and then anonymize. - Defaults to all entities supported by Microsoft Presidio. - operators: Operators to use for anonymization. - Operators allow for custom anonymization of detected PII. - Learn more: - https://microsoft.github.io/presidio/tutorial/10_simple_anonymization/ - languages_config: Configuration for the NLP engine. - First language in the list will be used as the main language - in self.anonymize(...) when no language is specified. - Learn more: - https://microsoft.github.io/presidio/analyzer/customizing_nlp_models/ - faker_seed: Seed used to initialize faker. - Defaults to None, in which case faker will be seeded randomly - and provide random values. - """ - if languages_config is None: - languages_config = DEFAULT_LANGUAGES_CONFIG - OperatorConfig = _import_operator_config() - AnalyzerEngine = _import_analyzer_engine() - NlpEngineProvider = _import_nlp_engine_provider() - AnonymizerEngine = _import_anonymizer_engine() - - self.analyzed_fields = ( - analyzed_fields - if analyzed_fields is not None - else list(get_pseudoanonymizer_mapping().keys()) - ) - - if add_default_faker_operators: - self.operators = { - field: OperatorConfig( - operator_name="custom", params={"lambda": faker_function} - ) - for field, faker_function in get_pseudoanonymizer_mapping( - faker_seed - ).items() - } - else: - self.operators = {} - - if operators: - self.add_operators(operators) - - provider = NlpEngineProvider(nlp_configuration=languages_config) - nlp_engine = provider.create_engine() - - self.supported_languages = list(nlp_engine.nlp.keys()) - - self._analyzer = AnalyzerEngine( - supported_languages=self.supported_languages, nlp_engine=nlp_engine - ) - self._anonymizer = AnonymizerEngine() - - def add_recognizer(self, recognizer: EntityRecognizer) -> None: - """Add a recognizer to the analyzer - - Args: - recognizer: Recognizer to add to the analyzer. - """ - self._analyzer.registry.add_recognizer(recognizer) - self.analyzed_fields.extend(recognizer.supported_entities) - - def add_operators(self, operators: Dict[str, OperatorConfig]) -> None: - """Add operators to the anonymizer - - Args: - operators: Operators to add to the anonymizer. - """ - self.operators.update(operators) - - -class PresidioAnonymizer(PresidioAnonymizerBase): - """Anonymizer using Microsoft Presidio.""" - - def _anonymize( - self, - text: str, - language: Optional[str] = None, - allow_list: Optional[List[str]] = None, - conflict_resolution: Optional[ConflictResolutionStrategy] = None, - ) -> str: - """Anonymize text. - Each PII entity is replaced with a fake value. - Each time fake values will be different, as they are generated randomly. - - PresidioAnonymizer has no built-in memory - - so it will not remember the effects of anonymizing previous texts. - >>> anonymizer = PresidioAnonymizer() - >>> anonymizer.anonymize("My name is John Doe. Hi John Doe!") - 'My name is Noah Rhodes. Hi Noah Rhodes!' - >>> anonymizer.anonymize("My name is John Doe. Hi John Doe!") - 'My name is Brett Russell. Hi Brett Russell!' - - Args: - text: text to anonymize - language: language to use for analysis of PII - If None, the first (main) language in the list - of languages specified in the configuration will be used. - """ - if language is None: - language = self.supported_languages[0] - elif language not in self.supported_languages: - raise ValueError( - f"Language '{language}' is not supported. " - f"Supported languages are: {self.supported_languages}. " - "Change your language configuration file to add more languages." - ) - - # Check supported entities for given language - # e.g. IT_FISCAL_CODE is not supported for English in Presidio by default - # If you want to use it, you need to add a recognizer manually - supported_entities = [] - for recognizer in self._analyzer.get_recognizers(language): - recognizer_dict = recognizer.to_dict() - supported_entities.extend( - [recognizer_dict["supported_entity"]] - if "supported_entity" in recognizer_dict - else recognizer_dict["supported_entities"] - ) - - entities_to_analyze = list( - set(supported_entities).intersection(set(self.analyzed_fields)) - ) - - analyzer_results = self._analyzer.analyze( - text, - entities=entities_to_analyze, - language=language, - allow_list=allow_list, - ) - - filtered_analyzer_results = ( - self._anonymizer._remove_conflicts_and_get_text_manipulation_data( - analyzer_results, conflict_resolution - ) - ) - - anonymizer_results = self._anonymizer.anonymize( - text, - analyzer_results=analyzer_results, - operators=self.operators, - ) - - anonymizer_mapping = create_anonymizer_mapping( - text, - filtered_analyzer_results, - anonymizer_results, - ) - return exact_matching_strategy(text, anonymizer_mapping) - - -class PresidioReversibleAnonymizer(PresidioAnonymizerBase, ReversibleAnonymizerBase): - """Reversible Anonymizer using Microsoft Presidio.""" - - def __init__( - self, - analyzed_fields: Optional[List[str]] = None, - operators: Optional[Dict[str, OperatorConfig]] = None, - languages_config: Optional[Dict] = None, - add_default_faker_operators: bool = True, - faker_seed: Optional[int] = None, - ): - if languages_config is None: - languages_config = DEFAULT_LANGUAGES_CONFIG - super().__init__( - analyzed_fields, - operators, - languages_config, - add_default_faker_operators, - faker_seed, - ) - self._deanonymizer_mapping = DeanonymizerMapping() - - @property - def deanonymizer_mapping(self) -> MappingDataType: - """Return the deanonymizer mapping""" - return self._deanonymizer_mapping.data - - @property - def anonymizer_mapping(self) -> MappingDataType: - """Return the anonymizer mapping - This is just the reverse version of the deanonymizer mapping.""" - return { - key: {v: k for k, v in inner_dict.items()} - for key, inner_dict in self.deanonymizer_mapping.items() - } - - def _anonymize( - self, - text: str, - language: Optional[str] = None, - allow_list: Optional[List[str]] = None, - conflict_resolution: Optional[ConflictResolutionStrategy] = None, - ) -> str: - """Anonymize text. - Each PII entity is replaced with a fake value. - Each time fake values will be different, as they are generated randomly. - At the same time, we will create a mapping from each anonymized entity - back to its original text value. - - Thanks to the built-in memory, all previously anonymised entities - will be remembered and replaced by the same fake values: - >>> anonymizer = PresidioReversibleAnonymizer() - >>> anonymizer.anonymize("My name is John Doe. Hi John Doe!") - 'My name is Noah Rhodes. Hi Noah Rhodes!' - >>> anonymizer.anonymize("My name is John Doe. Hi John Doe!") - 'My name is Noah Rhodes. Hi Noah Rhodes!' - - Args: - text: text to anonymize - language: language to use for analysis of PII - If None, the first (main) language in the list - of languages specified in the configuration will be used. - """ - if language is None: - language = self.supported_languages[0] - - if language not in self.supported_languages: - raise ValueError( - f"Language '{language}' is not supported. " - f"Supported languages are: {self.supported_languages}. " - "Change your language configuration file to add more languages." - ) - - # Check supported entities for given language - # e.g. IT_FISCAL_CODE is not supported for English in Presidio by default - # If you want to use it, you need to add a recognizer manually - supported_entities = [] - for recognizer in self._analyzer.get_recognizers(language): - recognizer_dict = recognizer.to_dict() - supported_entities.extend( - [recognizer_dict["supported_entity"]] - if "supported_entity" in recognizer_dict - else recognizer_dict["supported_entities"] - ) - - entities_to_analyze = list( - set(supported_entities).intersection(set(self.analyzed_fields)) - ) - - analyzer_results = self._analyzer.analyze( - text, - entities=entities_to_analyze, - language=language, - allow_list=allow_list, - ) - - filtered_analyzer_results = ( - self._anonymizer._remove_conflicts_and_get_text_manipulation_data( - analyzer_results, conflict_resolution - ) - ) - - anonymizer_results = self._anonymizer.anonymize( - text, - analyzer_results=analyzer_results, - operators=self.operators, - ) - - new_deanonymizer_mapping = create_anonymizer_mapping( - text, - filtered_analyzer_results, - anonymizer_results, - is_reversed=True, - ) - self._deanonymizer_mapping.update(new_deanonymizer_mapping) - - return exact_matching_strategy(text, self.anonymizer_mapping) - - def _deanonymize( - self, - text_to_deanonymize: str, - deanonymizer_matching_strategy: Callable[ - [str, MappingDataType], str - ] = DEFAULT_DEANONYMIZER_MATCHING_STRATEGY, - ) -> str: - """Deanonymize text. - Each anonymized entity is replaced with its original value. - This method exploits the mapping created during the anonymization process. - - Args: - text_to_deanonymize: text to deanonymize - deanonymizer_matching_strategy: function to use to match - anonymized entities with their original values and replace them. - """ - if not self._deanonymizer_mapping: - raise ValueError( - "Deanonymizer mapping is empty.", - "Please call anonymize() and anonymize some text first.", - ) - - text_to_deanonymize = deanonymizer_matching_strategy( - text_to_deanonymize, self.deanonymizer_mapping - ) - - return text_to_deanonymize - - def reset_deanonymizer_mapping(self) -> None: - """Reset the deanonymizer mapping""" - self._deanonymizer_mapping = DeanonymizerMapping() - - def save_deanonymizer_mapping(self, file_path: Union[Path, str]) -> None: - """Save the deanonymizer mapping to a JSON or YAML file. - - Args: - file_path: Path to file to save the mapping to. - - Example: - .. code-block:: python - - anonymizer.save_deanonymizer_mapping(file_path="path/mapping.json") - """ - - save_path = Path(file_path) - - if save_path.suffix not in [".json", ".yaml"]: - raise ValueError(f"{save_path} must have an extension of .json or .yaml") - - # Make sure parent directories exist - save_path.parent.mkdir(parents=True, exist_ok=True) - - if save_path.suffix == ".json": - with open(save_path, "w") as f: - json.dump(self.deanonymizer_mapping, f, indent=2) - elif save_path.suffix.endswith((".yaml", ".yml")): - with open(save_path, "w") as f: - yaml.dump(self.deanonymizer_mapping, f, default_flow_style=False) - - def load_deanonymizer_mapping(self, file_path: Union[Path, str]) -> None: - """Load the deanonymizer mapping from a JSON or YAML file. - - Args: - file_path: Path to file to load the mapping from. - - Example: - .. code-block:: python - - anonymizer.load_deanonymizer_mapping(file_path="path/mapping.json") - """ - - load_path = Path(file_path) - - if load_path.suffix not in [".json", ".yaml"]: - raise ValueError(f"{load_path} must have an extension of .json or .yaml") - - if load_path.suffix == ".json": - with open(load_path, "r") as f: - loaded_mapping = json.load(f) - elif load_path.suffix.endswith((".yaml", ".yml")): - with open(load_path, "r") as f: - loaded_mapping = yaml.load(f, Loader=yaml.FullLoader) - - self._deanonymizer_mapping.update(loaded_mapping) diff --git a/libs/experimental/langchain_experimental/fallacy_removal/__init__.py b/libs/experimental/langchain_experimental/fallacy_removal/__init__.py deleted file mode 100644 index 2ce9701be6bd2..0000000000000 --- a/libs/experimental/langchain_experimental/fallacy_removal/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -"""**Fallacy Removal** Chain runs a self-review of logical fallacies -as determined by paper -[Robust and Explainable Identification of Logical Fallacies in Natural -Language Arguments](https://arxiv.org/pdf/2212.07425.pdf). -It is modeled after `Constitutional AI` and in the same format, but applying logical -fallacies as generalized rules to remove them in output. -""" diff --git a/libs/experimental/langchain_experimental/fallacy_removal/base.py b/libs/experimental/langchain_experimental/fallacy_removal/base.py deleted file mode 100644 index c6283a3c32c50..0000000000000 --- a/libs/experimental/langchain_experimental/fallacy_removal/base.py +++ /dev/null @@ -1,183 +0,0 @@ -"""Chain for applying removals of logical fallacies.""" - -from __future__ import annotations - -from typing import Any, Dict, List, Optional - -from langchain.chains.base import Chain -from langchain.chains.llm import LLMChain -from langchain.schema import BasePromptTemplate -from langchain_core.callbacks.manager import CallbackManagerForChainRun -from langchain_core.language_models import BaseLanguageModel - -from langchain_experimental.fallacy_removal.fallacies import FALLACIES -from langchain_experimental.fallacy_removal.models import LogicalFallacy -from langchain_experimental.fallacy_removal.prompts import ( - FALLACY_CRITIQUE_PROMPT, - FALLACY_REVISION_PROMPT, -) - - -class FallacyChain(Chain): - """Chain for applying logical fallacy evaluations. - - It is modeled after Constitutional AI and in same format, but - applying logical fallacies as generalized rules to remove in output. - - Example: - .. code-block:: python - - from langchain_community.llms import OpenAI - from langchain.chains import LLMChain - from langchain_experimental.fallacy import FallacyChain - from langchain_experimental.fallacy_removal.models import LogicalFallacy - - llm = OpenAI() - - qa_prompt = PromptTemplate( - template="Q: {question} A:", - input_variables=["question"], - ) - qa_chain = LLMChain(llm=llm, prompt=qa_prompt) - - fallacy_chain = FallacyChain.from_llm( - llm=llm, - chain=qa_chain, - logical_fallacies=[ - LogicalFallacy( - fallacy_critique_request="Tell if this answer meets criteria.", - fallacy_revision_request=\ - "Give an answer that meets better criteria.", - ) - ], - ) - - fallacy_chain.run(question="How do I know if the earth is round?") - """ - - chain: LLMChain - logical_fallacies: List[LogicalFallacy] - fallacy_critique_chain: LLMChain - fallacy_revision_chain: LLMChain - return_intermediate_steps: bool = False - - @classmethod - def get_fallacies(cls, names: Optional[List[str]] = None) -> List[LogicalFallacy]: - if names is None: - return list(FALLACIES.values()) - else: - return [FALLACIES[name] for name in names] - - @classmethod - def from_llm( - cls, - llm: BaseLanguageModel, - chain: LLMChain, - fallacy_critique_prompt: BasePromptTemplate = FALLACY_CRITIQUE_PROMPT, - fallacy_revision_prompt: BasePromptTemplate = FALLACY_REVISION_PROMPT, - **kwargs: Any, - ) -> "FallacyChain": - """Create a chain from an LLM.""" - fallacy_critique_chain = LLMChain(llm=llm, prompt=fallacy_critique_prompt) - fallacy_revision_chain = LLMChain(llm=llm, prompt=fallacy_revision_prompt) - return cls( - chain=chain, - fallacy_critique_chain=fallacy_critique_chain, - fallacy_revision_chain=fallacy_revision_chain, - **kwargs, - ) - - @property - def input_keys(self) -> List[str]: - """Input keys.""" - return self.chain.input_keys - - @property - def output_keys(self) -> List[str]: - """Output keys.""" - if self.return_intermediate_steps: - return ["output", "fallacy_critiques_and_revisions", "initial_output"] - return ["output"] - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, Any]: - _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() - response = self.chain.run( - **inputs, - callbacks=_run_manager.get_child("original"), - ) - initial_response = response - input_prompt = self.chain.prompt.format(**inputs) - - _run_manager.on_text( - text="Initial response: " + response + "\n\n", - verbose=self.verbose, - color="yellow", - ) - fallacy_critiques_and_revisions = [] - for logical_fallacy in self.logical_fallacies: - # Fallacy critique below - - fallacy_raw_critique = self.fallacy_critique_chain.run( - input_prompt=input_prompt, - output_from_model=response, - fallacy_critique_request=logical_fallacy.fallacy_critique_request, - callbacks=_run_manager.get_child("fallacy_critique"), - ) - fallacy_critique = self._parse_critique( - output_string=fallacy_raw_critique, - ).strip() - - # if fallacy critique contains "No fallacy critique needed" then done - if "no fallacy critique needed" in fallacy_critique.lower(): - fallacy_critiques_and_revisions.append((fallacy_critique, "")) - continue - - fallacy_revision = self.fallacy_revision_chain.run( - input_prompt=input_prompt, - output_from_model=response, - fallacy_critique_request=logical_fallacy.fallacy_critique_request, - fallacy_critique=fallacy_critique, - revision_request=logical_fallacy.fallacy_revision_request, - callbacks=_run_manager.get_child("fallacy_revision"), - ).strip() - response = fallacy_revision - fallacy_critiques_and_revisions.append((fallacy_critique, fallacy_revision)) - - _run_manager.on_text( - text=f"Applying {logical_fallacy.name}..." + "\n\n", - verbose=self.verbose, - color="green", - ) - - _run_manager.on_text( - text="Logical Fallacy: " + fallacy_critique + "\n\n", - verbose=self.verbose, - color="blue", - ) - - _run_manager.on_text( - text="Updated response: " + fallacy_revision + "\n\n", - verbose=self.verbose, - color="yellow", - ) - - final_output: Dict[str, Any] = {"output": response} - if self.return_intermediate_steps: - final_output["initial_output"] = initial_response - final_output["fallacy_critiques_and_revisions"] = ( - fallacy_critiques_and_revisions - ) - return final_output - - @staticmethod - def _parse_critique(output_string: str) -> str: - if "Fallacy Revision request:" not in output_string: - return output_string - output_string = output_string.split("Fallacy Revision request:")[0] - if "\n\n" in output_string: - output_string = output_string.split("\n\n")[0] - return output_string diff --git a/libs/experimental/langchain_experimental/fallacy_removal/fallacies.py b/libs/experimental/langchain_experimental/fallacy_removal/fallacies.py deleted file mode 100644 index efe4838fda762..0000000000000 --- a/libs/experimental/langchain_experimental/fallacy_removal/fallacies.py +++ /dev/null @@ -1,209 +0,0 @@ -"""Classification of logical fallacies in Natural Language Arguments \ -from https://arxiv.org/pdf/2212.07425.pdf -""" - -from typing import Dict - -from langchain_experimental.fallacy_removal.models import LogicalFallacy - -FALLACIES: Dict[str, LogicalFallacy] = { - "adhominem": LogicalFallacy( - name="adhominem", - fallacy_critique_request="Identify any feasible ways in which \ - the assistant’s last response is attacking the character or \ - personal traits of the person making an argument rather than \ - addressing the actual argument and evidence.", - fallacy_revision_request="Please rewrite the assistant response\ - to remove any attacking the character or personal traits of the\ - person making an argument rather than addressing the actual\ - argument and evidence.", - ), - "adpopulum": LogicalFallacy( - name="adpopulum", - fallacy_critique_request="Identify ways in which the assistant’s\ - last response may be asserting that something must be true or \ - correct simply because many people believe it or do it, without \ - actual facts or evidence to support the conclusion.", - fallacy_revision_request="Please rewrite the assistant response \ - to remove any assertion that something must be true or correct \ - simply because many people believe it or do it, without actual \ - facts or evidence to support the conclusion.", - ), - "appealtoemotion": LogicalFallacy( - name="appealtoemotion", - fallacy_critique_request="Identify all ways in which the \ - assistant’s last response is an attempt to win support for an \ - argument by exploiting or manipulating people's emotions rather \ - than using facts and reason.", - fallacy_revision_request="Please rewrite the assistant response \ - to remove any attempt to win support for an argument by \ - exploiting or manipulating people's emotions rather than using \ - facts and reason.", - ), - "fallacyofextension": LogicalFallacy( - name="fallacyofextension", - fallacy_critique_request="Identify any ways in which the \ - assitant's last response is making broad, sweeping generalizations\ - and extending the implications of an argument far beyond what the \ - initial premises support.", - fallacy_revision_request="Rewrite the assistant response to remove\ - all broad, sweeping generalizations and extending the implications\ - of an argument far beyond what the initial premises support.", - ), - "intentionalfallacy": LogicalFallacy( - name="intentionalfallacy", - fallacy_critique_request="Identify any way in which the assistant’s\ - last response may be falsely supporting a conclusion by claiming to\ - understand an author or creator's subconscious intentions without \ - clear evidence.", - fallacy_revision_request="Revise the assistant’s last response to \ - remove any false support of a conclusion by claiming to understand\ - an author or creator's subconscious intentions without clear \ - evidence.", - ), - "falsecausality": LogicalFallacy( - name="falsecausality", - fallacy_critique_request="Think carefully about whether the \ - assistant's last response is jumping to conclusions about causation\ - between events or circumstances without adequate evidence to infer \ - a causal relationship.", - fallacy_revision_request="Please write a new version of the \ - assistant’s response that removes jumping to conclusions about\ - causation between events or circumstances without adequate \ - evidence to infer a causal relationship.", - ), - "falsedilemma": LogicalFallacy( - name="falsedilemma", - fallacy_critique_request="Identify any way in which the \ - assistant's last response may be presenting only two possible options\ - or sides to a situation when there are clearly other alternatives \ - that have not been considered or addressed.", - fallacy_revision_request="Amend the assistant’s last response to \ - remove any presentation of only two possible options or sides to a \ - situation when there are clearly other alternatives that have not \ - been considered or addressed.", - ), - "hastygeneralization": LogicalFallacy( - name="hastygeneralization", - fallacy_critique_request="Identify any way in which the assistant’s\ - last response is making a broad inference or generalization to \ - situations, people, or circumstances that are not sufficiently \ - similar based on a specific example or limited evidence.", - fallacy_revision_request="Please rewrite the assistant response to\ - remove a broad inference or generalization to situations, people, \ - or circumstances that are not sufficiently similar based on a \ - specific example or limited evidence.", - ), - "illogicalarrangement": LogicalFallacy( - name="illogicalarrangement", - fallacy_critique_request="Think carefully about any ways in which \ - the assistant's last response is constructing an argument in a \ - flawed, illogical way, so the premises do not connect to or lead\ - to the conclusion properly.", - fallacy_revision_request="Please rewrite the assistant’s response\ - so as to remove any construction of an argument that is flawed and\ - illogical or if the premises do not connect to or lead to the \ - conclusion properly.", - ), - "fallacyofcredibility": LogicalFallacy( - name="fallacyofcredibility", - fallacy_critique_request="Discuss whether the assistant's last \ - response was dismissing or attacking the credibility of the person\ - making an argument rather than directly addressing the argument \ - itself.", - fallacy_revision_request="Revise the assistant’s response so as \ - that it refrains from dismissing or attacking the credibility of\ - the person making an argument rather than directly addressing \ - the argument itself.", - ), - "circularreasoning": LogicalFallacy( - name="circularreasoning", - fallacy_critique_request="Discuss ways in which the assistant’s\ - last response may be supporting a premise by simply repeating \ - the premise as the conclusion without giving actual proof or \ - evidence.", - fallacy_revision_request="Revise the assistant’s response if \ - possible so that it’s not supporting a premise by simply \ - repeating the premise as the conclusion without giving actual\ - proof or evidence.", - ), - "beggingthequestion": LogicalFallacy( - name="beggingthequestion", - fallacy_critique_request="Discuss ways in which the assistant's\ - last response is restating the conclusion of an argument as a \ - premise without providing actual support for the conclusion in \ - the first place.", - fallacy_revision_request="Write a revision of the assistant’s \ - response that refrains from restating the conclusion of an \ - argument as a premise without providing actual support for the \ - conclusion in the first place.", - ), - "trickquestion": LogicalFallacy( - name="trickquestion", - fallacy_critique_request="Identify ways in which the \ - assistant’s last response is asking a question that \ - contains or assumes information that has not been proven or \ - substantiated.", - fallacy_revision_request="Please write a new assistant \ - response so that it does not ask a question that contains \ - or assumes information that has not been proven or \ - substantiated.", - ), - "overapplier": LogicalFallacy( - name="overapplier", - fallacy_critique_request="Identify ways in which the assistant’s\ - last response is applying a general rule or generalization to a \ - specific case it was not meant to apply to.", - fallacy_revision_request="Please write a new response that does\ - not apply a general rule or generalization to a specific case \ - it was not meant to apply to.", - ), - "equivocation": LogicalFallacy( - name="equivocation", - fallacy_critique_request="Read the assistant’s last response \ - carefully and identify if it is using the same word or phrase \ - in two different senses or contexts within an argument.", - fallacy_revision_request="Rewrite the assistant response so \ - that it does not use the same word or phrase in two different \ - senses or contexts within an argument.", - ), - "amphiboly": LogicalFallacy( - name="amphiboly", - fallacy_critique_request="Critique the assistant’s last response\ - to see if it is constructing sentences such that the grammar \ - or structure is ambiguous, leading to multiple interpretations.", - fallacy_revision_request="Please rewrite the assistant response\ - to remove any construction of sentences where the grammar or \ - structure is ambiguous or leading to multiple interpretations.", - ), - "accent": LogicalFallacy( - name="accent", - fallacy_critique_request="Discuss whether the assitant's response\ - is misrepresenting an argument by shifting the emphasis of a word\ - or phrase to give it a different meaning than intended.", - fallacy_revision_request="Please rewrite the AI model's response\ - so that it is not misrepresenting an argument by shifting the \ - emphasis of a word or phrase to give it a different meaning than\ - intended.", - ), - "composition": LogicalFallacy( - name="composition", - fallacy_critique_request="Discuss whether the assistant's \ - response is erroneously inferring that something is true of \ - the whole based on the fact that it is true of some part or \ - parts.", - fallacy_revision_request="Please rewrite the assitant's response\ - so that it is not erroneously inferring that something is true \ - of the whole based on the fact that it is true of some part or \ - parts.", - ), - "division": LogicalFallacy( - name="division", - fallacy_critique_request="Discuss whether the assistant's last \ - response is erroneously inferring that something is true of the \ - parts based on the fact that it is true of the whole.", - fallacy_revision_request="Please rewrite the assitant's response\ - so that it is not erroneously inferring that something is true \ - of the parts based on the fact that it is true of the whole.", - ), -} diff --git a/libs/experimental/langchain_experimental/fallacy_removal/models.py b/libs/experimental/langchain_experimental/fallacy_removal/models.py deleted file mode 100644 index 042de0897b63c..0000000000000 --- a/libs/experimental/langchain_experimental/fallacy_removal/models.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Models for the Logical Fallacy Chain""" - -from pydantic import BaseModel - - -class LogicalFallacy(BaseModel): - """Logical fallacy.""" - - fallacy_critique_request: str - fallacy_revision_request: str - name: str = "Logical Fallacy" diff --git a/libs/experimental/langchain_experimental/fallacy_removal/prompts.py b/libs/experimental/langchain_experimental/fallacy_removal/prompts.py deleted file mode 100644 index 5c44da3bb1f6d..0000000000000 --- a/libs/experimental/langchain_experimental/fallacy_removal/prompts.py +++ /dev/null @@ -1,135 +0,0 @@ -from langchain_core.prompts.few_shot import FewShotPromptTemplate -from langchain_core.prompts.prompt import PromptTemplate - -fallacy_critique_example = PromptTemplate( - template="""Human: {input_prompt} - -Model: {output_from_model} - -Fallacy Critique Request: {fallacy_critique_request} - -Fallacy Critique: {fallacy_critique}""", - input_variables=[ - "input_prompt", - "output_from_model", - "fallacy_critique_request", - "fallacy_critique", - ], -) - -examples = [ - { - "input_prompt": "If everyone says the Earth is round, \ - how do I know that's correct?", - "output_from_model": "The earth is round because your \ - teacher says it is", - "fallacy_critique_request": "Identify specific ways in\ - which the model’s previous response had a logical fallacy. \ - Also point out potential logical fallacies in the human’s \ - questions and responses. Examples of logical fallacies \ - include but are not limited to ad hominem, ad populum, \ - appeal to emotion and false causality.", - "fallacy_critique": "This statement contains the logical \ - fallacy of Ad Verecundiam or Appeal to Authority. It is \ - a fallacy because it asserts something to be true purely \ - based on the authority of the source making the claim, \ - without any actual evidence to support it. Fallacy \ - Critique Needed", - "fallacy_revision_request": "Please rewrite the model \ - response to remove all logical fallacies, and to \ - politely point out any logical fallacies from the \ - human.", - "fallacy_revision": "The earth is round based on \ - evidence from observations of its curvature from high \ - altitudes, photos from space showing its spherical shape, \ - circumnavigation, and the fact that we see its rounded \ - shadow on the moon during lunar eclipses.", - }, - { - "input_prompt": "Should we invest more in our school \ - music program? After all, studies show students \ - involved in music perform better academically.", - "output_from_model": "I don't think we should invest \ - more in the music program. Playing the piccolo won't \ - teach someone better math skills.", - "fallacy_critique_request": "Identify specific ways \ - in which the model’s previous response had a logical \ - fallacy. Also point out potential logical fallacies \ - in the human’s questions and responses. Examples of \ - logical fallacies include but are not limited to ad \ - homimem, ad populum, appeal to emotion and false \ - causality.", - "fallacy_critique": "This answer commits the division \ - fallacy by rejecting the argument based on assuming \ - capabilities true of the parts (playing an instrument \ - like piccolo) also apply to the whole \ - (the full music program). The answer focuses only on \ - part of the music program rather than considering it \ - as a whole. Fallacy Critique Needed.", - "fallacy_revision_request": "Please rewrite the model \ - response to remove all logical fallacies, and to \ - politely point out any logical fallacies from the human.", - "fallacy_revision": "While playing an instrument may \ - teach discipline, more evidence is needed on whether \ - music education courses improve critical thinking \ - skills across subjects before determining if increased \ - investment in the whole music program is warranted.", - }, -] - -FALLACY_CRITIQUE_PROMPT = FewShotPromptTemplate( - example_prompt=fallacy_critique_example, - examples=[ - {k: v for k, v in e.items() if k != "fallacy_revision_request"} - for e in examples - ], - prefix="Below is a conversation between a human and an \ - AI assistant. If there is no material critique of the \ - model output, append to the end of the Fallacy Critique: \ - 'No fallacy critique needed.' If there is material \ - critique \ - of the model output, append to the end of the Fallacy \ - Critique: 'Fallacy Critique needed.'", - suffix="""Human: {input_prompt} -Model: {output_from_model} - -Fallacy Critique Request: {fallacy_critique_request} - -Fallacy Critique:""", - example_separator="\n === \n", - input_variables=["input_prompt", "output_from_model", "fallacy_critique_request"], -) - -FALLACY_REVISION_PROMPT = FewShotPromptTemplate( - example_prompt=fallacy_critique_example, - examples=examples, - prefix="Below is a conversation between a human and \ - an AI assistant.", - suffix="""Human: {input_prompt} - -Model: {output_from_model} - -Fallacy Critique Request: {fallacy_critique_request} - -Fallacy Critique: {fallacy_critique} - -If the fallacy critique does not identify anything worth \ -changing, ignore the Fallacy Revision Request and do not \ -make any revisions. Instead, return "No revisions needed". - -If the fallacy critique does identify something worth \ -changing, please revise the model response based on the \ -Fallacy Revision Request. - -Fallacy Revision Request: {fallacy_revision_request} - -Fallacy Revision:""", - example_separator="\n === \n", - input_variables=[ - "input_prompt", - "output_from_model", - "fallacy_critique_request", - "fallacy_critique", - "fallacy_revision_request", - ], -) diff --git a/libs/experimental/langchain_experimental/generative_agents/__init__.py b/libs/experimental/langchain_experimental/generative_agents/__init__.py deleted file mode 100644 index 3d1f1c9592e3d..0000000000000 --- a/libs/experimental/langchain_experimental/generative_agents/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -"""**Generative Agent** primitives.""" - -from langchain_experimental.generative_agents.generative_agent import GenerativeAgent -from langchain_experimental.generative_agents.memory import GenerativeAgentMemory - -__all__ = ["GenerativeAgent", "GenerativeAgentMemory"] diff --git a/libs/experimental/langchain_experimental/generative_agents/generative_agent.py b/libs/experimental/langchain_experimental/generative_agents/generative_agent.py deleted file mode 100644 index bfa9f52e697e7..0000000000000 --- a/libs/experimental/langchain_experimental/generative_agents/generative_agent.py +++ /dev/null @@ -1,249 +0,0 @@ -import re -from datetime import datetime -from typing import Any, Dict, List, Optional, Tuple - -from langchain.chains import LLMChain -from langchain_core.language_models import BaseLanguageModel -from langchain_core.prompts import PromptTemplate -from pydantic import BaseModel, ConfigDict, Field - -from langchain_experimental.generative_agents.memory import GenerativeAgentMemory - - -class GenerativeAgent(BaseModel): - """Agent as a character with memory and innate characteristics.""" - - name: str - """The character's name.""" - age: Optional[int] = None - """The optional age of the character.""" - traits: str = "N/A" - """Permanent traits to ascribe to the character.""" - status: str - """The traits of the character you wish not to change.""" - memory: GenerativeAgentMemory - """The memory object that combines relevance, recency, and 'importance'.""" - llm: BaseLanguageModel - """The underlying language model.""" - verbose: bool = False - summary: str = "" #: :meta private: - """Stateful self-summary generated via reflection on the character's memory.""" - summary_refresh_seconds: int = 3600 #: :meta private: - """How frequently to re-generate the summary.""" - last_refreshed: datetime = Field(default_factory=datetime.now) # : :meta private: - """The last time the character's summary was regenerated.""" - daily_summaries: List[str] = Field(default_factory=list) # : :meta private: - """Summary of the events in the plan that the agent took.""" - - model_config = ConfigDict( - arbitrary_types_allowed=True, - ) - - # LLM-related methods - @staticmethod - def _parse_list(text: str) -> List[str]: - """Parse a newline-separated string into a list of strings.""" - lines = re.split(r"\n", text.strip()) - return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines] - - def chain(self, prompt: PromptTemplate) -> LLMChain: - """Create a chain with the same settings as the agent.""" - - return LLMChain( - llm=self.llm, prompt=prompt, verbose=self.verbose, memory=self.memory - ) - - def _get_entity_from_observation(self, observation: str) -> str: - prompt = PromptTemplate.from_template( - "What is the observed entity in the following observation? {observation}" - + "\nEntity=" - ) - return self.chain(prompt).run(observation=observation).strip() - - def _get_entity_action(self, observation: str, entity_name: str) -> str: - prompt = PromptTemplate.from_template( - "What is the {entity} doing in the following observation? {observation}" - + "\nThe {entity} is" - ) - return ( - self.chain(prompt).run(entity=entity_name, observation=observation).strip() - ) - - def summarize_related_memories(self, observation: str) -> str: - """Summarize memories that are most relevant to an observation.""" - prompt = PromptTemplate.from_template( - """ -{q1}? -Context from memory: -{relevant_memories} -Relevant context: -""" - ) - entity_name = self._get_entity_from_observation(observation) - entity_action = self._get_entity_action(observation, entity_name) - q1 = f"What is the relationship between {self.name} and {entity_name}" - q2 = f"{entity_name} is {entity_action}" - return self.chain(prompt=prompt).run(q1=q1, queries=[q1, q2]).strip() - - def _generate_reaction( - self, observation: str, suffix: str, now: Optional[datetime] = None - ) -> str: - """React to a given observation or dialogue act.""" - prompt = PromptTemplate.from_template( - "{agent_summary_description}" - + "\nIt is {current_time}." - + "\n{agent_name}'s status: {agent_status}" - + "\nSummary of relevant context from {agent_name}'s memory:" - + "\n{relevant_memories}" - + "\nMost recent observations: {most_recent_memories}" - + "\nObservation: {observation}" - + "\n\n" - + suffix - ) - agent_summary_description = self.get_summary(now=now) - relevant_memories_str = self.summarize_related_memories(observation) - current_time_str = ( - datetime.now().strftime("%B %d, %Y, %I:%M %p") - if now is None - else now.strftime("%B %d, %Y, %I:%M %p") - ) - kwargs: Dict[str, Any] = dict( - agent_summary_description=agent_summary_description, - current_time=current_time_str, - relevant_memories=relevant_memories_str, - agent_name=self.name, - observation=observation, - agent_status=self.status, - ) - consumed_tokens = self.llm.get_num_tokens( - prompt.format(most_recent_memories="", **kwargs) - ) - kwargs[self.memory.most_recent_memories_token_key] = consumed_tokens - return self.chain(prompt=prompt).run(**kwargs).strip() - - def _clean_response(self, text: str) -> str: - return re.sub(f"^{self.name} ", "", text.strip()).strip() - - def generate_reaction( - self, observation: str, now: Optional[datetime] = None - ) -> Tuple[bool, str]: - """React to a given observation.""" - call_to_action_template = ( - "Should {agent_name} react to the observation, and if so," - + " what would be an appropriate reaction? Respond in one line." - + ' If the action is to engage in dialogue, write:\nSAY: "what to say"' - + "\notherwise, write:\nREACT: {agent_name}'s reaction (if anything)." - + "\nEither do nothing, react, or say something but not both.\n\n" - ) - full_result = self._generate_reaction( - observation, call_to_action_template, now=now - ) - result = full_result.strip().split("\n")[0] - # AAA - self.memory.save_context( - {}, - { - self.memory.add_memory_key: f"{self.name} observed " - f"{observation} and reacted by {result}", - self.memory.now_key: now, - }, - ) - if "REACT:" in result: - reaction = self._clean_response(result.split("REACT:")[-1]) - return False, f"{self.name} {reaction}" - if "SAY:" in result: - said_value = self._clean_response(result.split("SAY:")[-1]) - return True, f"{self.name} said {said_value}" - else: - return False, result - - def generate_dialogue_response( - self, observation: str, now: Optional[datetime] = None - ) -> Tuple[bool, str]: - """React to a given observation.""" - call_to_action_template = ( - "What would {agent_name} say? To end the conversation, write:" - ' GOODBYE: "what to say". Otherwise to continue the conversation,' - ' write: SAY: "what to say next"\n\n' - ) - full_result = self._generate_reaction( - observation, call_to_action_template, now=now - ) - result = full_result.strip().split("\n")[0] - if "GOODBYE:" in result: - farewell = self._clean_response(result.split("GOODBYE:")[-1]) - self.memory.save_context( - {}, - { - self.memory.add_memory_key: f"{self.name} observed " - f"{observation} and said {farewell}", - self.memory.now_key: now, - }, - ) - return False, f"{self.name} said {farewell}" - if "SAY:" in result: - response_text = self._clean_response(result.split("SAY:")[-1]) - self.memory.save_context( - {}, - { - self.memory.add_memory_key: f"{self.name} observed " - f"{observation} and said {response_text}", - self.memory.now_key: now, - }, - ) - return True, f"{self.name} said {response_text}" - else: - return False, result - - ###################################################### - # Agent stateful' summary methods. # - # Each dialog or response prompt includes a header # - # summarizing the agent's self-description. This is # - # updated periodically through probing its memories # - ###################################################### - def _compute_agent_summary(self) -> str: - """""" - prompt = PromptTemplate.from_template( - "How would you summarize {name}'s core characteristics given the" - + " following statements:\n" - + "{relevant_memories}" - + "Do not embellish." - + "\n\nSummary: " - ) - # The agent seeks to think about their core characteristics. - return ( - self.chain(prompt) - .run(name=self.name, queries=[f"{self.name}'s core characteristics"]) - .strip() - ) - - def get_summary( - self, force_refresh: bool = False, now: Optional[datetime] = None - ) -> str: - """Return a descriptive summary of the agent.""" - current_time = datetime.now() if now is None else now - since_refresh = (current_time - self.last_refreshed).seconds - if ( - not self.summary - or since_refresh >= self.summary_refresh_seconds - or force_refresh - ): - self.summary = self._compute_agent_summary() - self.last_refreshed = current_time - age = self.age if self.age is not None else "N/A" - return ( - f"Name: {self.name} (age: {age})" - + f"\nInnate traits: {self.traits}" - + f"\n{self.summary}" - ) - - def get_full_header( - self, force_refresh: bool = False, now: Optional[datetime] = None - ) -> str: - """Return a full header of the agent's status, summary, and current time.""" - now = datetime.now() if now is None else now - summary = self.get_summary(force_refresh=force_refresh, now=now) - current_time_str = now.strftime("%B %d, %Y, %I:%M %p") - return ( - f"{summary}\nIt is {current_time_str}.\n{self.name}'s status: {self.status}" - ) diff --git a/libs/experimental/langchain_experimental/generative_agents/memory.py b/libs/experimental/langchain_experimental/generative_agents/memory.py deleted file mode 100644 index ac06acacf509a..0000000000000 --- a/libs/experimental/langchain_experimental/generative_agents/memory.py +++ /dev/null @@ -1,296 +0,0 @@ -import logging -import re -from datetime import datetime -from typing import Any, Dict, List, Optional - -from langchain.chains import LLMChain -from langchain.retrievers import TimeWeightedVectorStoreRetriever -from langchain.schema import BaseMemory, Document -from langchain.utils import mock_now -from langchain_core.language_models import BaseLanguageModel -from langchain_core.prompts import PromptTemplate - -logger = logging.getLogger(__name__) - - -class GenerativeAgentMemory(BaseMemory): - """Memory for the generative agent.""" - - llm: BaseLanguageModel - """The core language model.""" - memory_retriever: TimeWeightedVectorStoreRetriever - """The retriever to fetch related memories.""" - verbose: bool = False - reflection_threshold: Optional[float] = None - """When aggregate_importance exceeds reflection_threshold, stop to reflect.""" - current_plan: List[str] = [] - """The current plan of the agent.""" - # A weight of 0.15 makes this less important than it - # would be otherwise, relative to salience and time - importance_weight: float = 0.15 - """How much weight to assign the memory importance.""" - aggregate_importance: float = 0.0 # : :meta private: - """Track the sum of the 'importance' of recent memories. - - Triggers reflection when it reaches reflection_threshold.""" - - max_tokens_limit: int = 1200 # : :meta private: - # input keys - queries_key: str = "queries" - most_recent_memories_token_key: str = "recent_memories_token" - add_memory_key: str = "add_memory" - # output keys - relevant_memories_key: str = "relevant_memories" - relevant_memories_simple_key: str = "relevant_memories_simple" - most_recent_memories_key: str = "most_recent_memories" - now_key: str = "now" - reflecting: bool = False - - def chain(self, prompt: PromptTemplate) -> LLMChain: - return LLMChain(llm=self.llm, prompt=prompt, verbose=self.verbose) - - @staticmethod - def _parse_list(text: str) -> List[str]: - """Parse a newline-separated string into a list of strings.""" - lines = re.split(r"\n", text.strip()) - lines = [line for line in lines if line.strip()] # remove empty lines - return [re.sub(r"^\s*\d+\.\s*", "", line).strip() for line in lines] - - def _get_topics_of_reflection(self, last_k: int = 50) -> List[str]: - """Return the 3 most salient high-level questions about recent observations.""" - prompt = PromptTemplate.from_template( - "{observations}\n\n" - "Given only the information above, what are the 3 most salient " - "high-level questions we can answer about the subjects in the statements?\n" - "Provide each question on a new line." - ) - observations = self.memory_retriever.memory_stream[-last_k:] - observation_str = "\n".join( - [self._format_memory_detail(o) for o in observations] - ) - result = self.chain(prompt).run(observations=observation_str) - return self._parse_list(result) - - def _get_insights_on_topic( - self, topic: str, now: Optional[datetime] = None - ) -> List[str]: - """Generate 'insights' on a topic of reflection, based on pertinent memories.""" - prompt = PromptTemplate.from_template( - "Statements relevant to: '{topic}'\n" - "---\n" - "{related_statements}\n" - "---\n" - "What 5 high-level novel insights can you infer from the above statements " - "that are relevant for answering the following question?\n" - "Do not include any insights that are not relevant to the question.\n" - "Do not repeat any insights that have already been made.\n\n" - "Question: {topic}\n\n" - "(example format: insight (because of 1, 5, 3))\n" - ) - - related_memories = self.fetch_memories(topic, now=now) - related_statements = "\n".join( - [ - self._format_memory_detail(memory, prefix=f"{i+1}. ") - for i, memory in enumerate(related_memories) - ] - ) - result = self.chain(prompt).run( - topic=topic, related_statements=related_statements - ) - # TODO: Parse the connections between memories and insights - return self._parse_list(result) - - def pause_to_reflect(self, now: Optional[datetime] = None) -> List[str]: - """Reflect on recent observations and generate 'insights'.""" - if self.verbose: - logger.info("Character is reflecting") - new_insights = [] - topics = self._get_topics_of_reflection() - for topic in topics: - insights = self._get_insights_on_topic(topic, now=now) - for insight in insights: - self.add_memory(insight, now=now) - new_insights.extend(insights) - return new_insights - - def _score_memory_importance(self, memory_content: str) -> float: - """Score the absolute importance of the given memory.""" - prompt = PromptTemplate.from_template( - "On the scale of 1 to 10, where 1 is purely mundane" - + " (e.g., brushing teeth, making bed) and 10 is" - + " extremely poignant (e.g., a break up, college" - + " acceptance), rate the likely poignancy of the" - + " following piece of memory. Respond with a single integer." - + "\nMemory: {memory_content}" - + "\nRating: " - ) - score = self.chain(prompt).run(memory_content=memory_content).strip() - if self.verbose: - logger.info(f"Importance score: {score}") - match = re.search(r"^\D*(\d+)", score) - if match: - return (float(match.group(1)) / 10) * self.importance_weight - else: - return 0.0 - - def _score_memories_importance(self, memory_content: str) -> List[float]: - """Score the absolute importance of the given memory.""" - prompt = PromptTemplate.from_template( - "On the scale of 1 to 10, where 1 is purely mundane" - + " (e.g., brushing teeth, making bed) and 10 is" - + " extremely poignant (e.g., a break up, college" - + " acceptance), rate the likely poignancy of the" - + " following piece of memory. Always answer with only a list of numbers." - + " If just given one memory still respond in a list." - + " Memories are separated by semi colans (;)" - + "\nMemories: {memory_content}" - + "\nRating: " - ) - scores = self.chain(prompt).run(memory_content=memory_content).strip() - - if self.verbose: - logger.info(f"Importance scores: {scores}") - - # Split into list of strings and convert to floats - scores_list = [float(x) for x in scores.split(";")] - - return scores_list - - def add_memories( - self, memory_content: str, now: Optional[datetime] = None - ) -> List[str]: - """Add an observations or memories to the agent's memory.""" - importance_scores = self._score_memories_importance(memory_content) - - self.aggregate_importance += max(importance_scores) - memory_list = memory_content.split(";") - documents = [] - - for i in range(len(memory_list)): - documents.append( - Document( - page_content=memory_list[i], - metadata={"importance": importance_scores[i]}, - ) - ) - - result = self.memory_retriever.add_documents(documents, current_time=now) - - # After an agent has processed a certain amount of memories (as measured by - # aggregate importance), it is time to reflect on recent events to add - # more synthesized memories to the agent's memory stream. - if ( - self.reflection_threshold is not None - and self.aggregate_importance > self.reflection_threshold - and not self.reflecting - ): - self.reflecting = True - self.pause_to_reflect(now=now) - # Hack to clear the importance from reflection - self.aggregate_importance = 0.0 - self.reflecting = False - return result - - def add_memory( - self, memory_content: str, now: Optional[datetime] = None - ) -> List[str]: - """Add an observation or memory to the agent's memory.""" - importance_score = self._score_memory_importance(memory_content) - self.aggregate_importance += importance_score - document = Document( - page_content=memory_content, metadata={"importance": importance_score} - ) - result = self.memory_retriever.add_documents([document], current_time=now) - - # After an agent has processed a certain amount of memories (as measured by - # aggregate importance), it is time to reflect on recent events to add - # more synthesized memories to the agent's memory stream. - if ( - self.reflection_threshold is not None - and self.aggregate_importance > self.reflection_threshold - and not self.reflecting - ): - self.reflecting = True - self.pause_to_reflect(now=now) - # Hack to clear the importance from reflection - self.aggregate_importance = 0.0 - self.reflecting = False - return result - - def fetch_memories( - self, observation: str, now: Optional[datetime] = None - ) -> List[Document]: - """Fetch related memories.""" - if now is not None: - with mock_now(now): - return self.memory_retriever.invoke(observation) - else: - return self.memory_retriever.invoke(observation) - - def format_memories_detail(self, relevant_memories: List[Document]) -> str: - content = [] - for mem in relevant_memories: - content.append(self._format_memory_detail(mem, prefix="- ")) - return "\n".join([f"{mem}" for mem in content]) - - def _format_memory_detail(self, memory: Document, prefix: str = "") -> str: - created_time = memory.metadata["created_at"].strftime("%B %d, %Y, %I:%M %p") - return f"{prefix}[{created_time}] {memory.page_content.strip()}" - - def format_memories_simple(self, relevant_memories: List[Document]) -> str: - return "; ".join([f"{mem.page_content}" for mem in relevant_memories]) - - def _get_memories_until_limit(self, consumed_tokens: int) -> str: - """Reduce the number of tokens in the documents.""" - result = [] - for doc in self.memory_retriever.memory_stream[::-1]: - if consumed_tokens >= self.max_tokens_limit: - break - consumed_tokens += self.llm.get_num_tokens(doc.page_content) - if consumed_tokens < self.max_tokens_limit: - result.append(doc) - return self.format_memories_simple(result) - - @property - def memory_variables(self) -> List[str]: - """Input keys this memory class will load dynamically.""" - return [] - - def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: - """Return key-value pairs given the text input to the chain.""" - queries = inputs.get(self.queries_key) - now = inputs.get(self.now_key) - if queries is not None: - relevant_memories = [ - mem for query in queries for mem in self.fetch_memories(query, now=now) - ] - return { - self.relevant_memories_key: self.format_memories_detail( - relevant_memories - ), - self.relevant_memories_simple_key: self.format_memories_simple( - relevant_memories - ), - } - - most_recent_memories_token = inputs.get(self.most_recent_memories_token_key) - if most_recent_memories_token is not None: - return { - self.most_recent_memories_key: self._get_memories_until_limit( - most_recent_memories_token - ) - } - return {} - - def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) -> None: - """Save the context of this model run to memory.""" - # TODO: fix the save memory key - mem = outputs.get(self.add_memory_key) - now = outputs.get(self.now_key) - if mem: - self.add_memory(mem, now=now) - - def clear(self) -> None: - """Clear memory contents.""" - # TODO diff --git a/libs/experimental/langchain_experimental/graph_transformers/__init__.py b/libs/experimental/langchain_experimental/graph_transformers/__init__.py deleted file mode 100644 index fd01190dc85c2..0000000000000 --- a/libs/experimental/langchain_experimental/graph_transformers/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -"""**Graph Transformers** transform Documents into Graph Documents.""" - -from langchain_experimental.graph_transformers.diffbot import DiffbotGraphTransformer -from langchain_experimental.graph_transformers.gliner import GlinerGraphTransformer -from langchain_experimental.graph_transformers.llm import LLMGraphTransformer -from langchain_experimental.graph_transformers.relik import RelikGraphTransformer - -__all__ = [ - "DiffbotGraphTransformer", - "LLMGraphTransformer", - "RelikGraphTransformer", - "GlinerGraphTransformer", -] diff --git a/libs/experimental/langchain_experimental/graph_transformers/diffbot.py b/libs/experimental/langchain_experimental/graph_transformers/diffbot.py deleted file mode 100644 index 8adb0b9079099..0000000000000 --- a/libs/experimental/langchain_experimental/graph_transformers/diffbot.py +++ /dev/null @@ -1,365 +0,0 @@ -from enum import Enum -from typing import Any, Dict, List, Optional, Sequence, Tuple, Union - -import requests -from langchain.utils import get_from_env -from langchain_community.graphs.graph_document import GraphDocument, Node, Relationship -from langchain_core.documents import Document - - -class TypeOption(str, Enum): - FACTS = "facts" - ENTITIES = "entities" - SENTIMENT = "sentiment" - - -def format_property_key(s: str) -> str: - """Formats a string to be used as a property key.""" - - words = s.split() - if not words: - return s - first_word = words[0].lower() - capitalized_words = [word.capitalize() for word in words[1:]] - return "".join([first_word] + capitalized_words) - - -class NodesList: - """List of nodes with associated properties. - - Attributes: - nodes (Dict[Tuple, Any]): Stores nodes as keys and their properties as values. - Each key is a tuple where the first element is the - node ID and the second is the node type. - """ - - def __init__(self) -> None: - self.nodes: Dict[Tuple[Union[str, int], str], Any] = dict() - - def add_node_property( - self, node: Tuple[Union[str, int], str], properties: Dict[str, Any] - ) -> None: - """ - Adds or updates node properties. - - If the node does not exist in the list, it's added along with its properties. - If the node already exists, its properties are updated with the new values. - - Args: - node (Tuple): A tuple containing the node ID and node type. - properties (Dict): A dictionary of properties to add or update for the node. - """ - if node not in self.nodes: - self.nodes[node] = properties - else: - self.nodes[node].update(properties) - - def return_node_list(self) -> List[Node]: - """ - Returns the nodes as a list of Node objects. - - Each Node object will have its ID, type, and properties populated. - - Returns: - List[Node]: A list of Node objects. - """ - nodes = [ - Node(id=key[0], type=key[1], properties=self.nodes[key]) - for key in self.nodes - ] - return nodes - - -# Properties that should be treated as node properties instead of relationships -FACT_TO_PROPERTY_TYPE = [ - "Date", - "Number", - "Job title", - "Cause of death", - "Organization type", - "Academic title", -] - - -schema_mapping = [ - ("HEADQUARTERS", "ORGANIZATION_LOCATIONS"), - ("RESIDENCE", "PERSON_LOCATION"), - ("ALL_PERSON_LOCATIONS", "PERSON_LOCATION"), - ("CHILD", "HAS_CHILD"), - ("PARENT", "HAS_PARENT"), - ("CUSTOMERS", "HAS_CUSTOMER"), - ("SKILLED_AT", "INTERESTED_IN"), -] - - -class SimplifiedSchema: - """Simplified schema mapping. - - Attributes: - schema (Dict): A dictionary containing the mapping to simplified schema types. - """ - - def __init__(self) -> None: - """Initializes the schema dictionary based on the predefined list.""" - self.schema = dict() - for row in schema_mapping: - self.schema[row[0]] = row[1] - - def get_type(self, type: str) -> str: - """ - Retrieves the simplified schema type for a given original type. - - Args: - type (str): The original schema type to find the simplified type for. - - Returns: - str: The simplified schema type if it exists; - otherwise, returns the original type. - """ - try: - return self.schema[type] - except KeyError: - return type - - -class DiffbotGraphTransformer: - """Transform documents into graph documents using Diffbot NLP API. - - A graph document transformation system takes a sequence of Documents and returns a - sequence of Graph Documents. - - Example: - .. code-block:: python - from langchain_experimental.graph_transformers import DiffbotGraphTransformer - from langchain_core.documents import Document - - diffbot_api_key = "DIFFBOT_API_KEY" - diffbot_nlp = DiffbotGraphTransformer(diffbot_api_key=diffbot_api_key) - - document = Document(page_content="Mike Tunge is the CEO of Diffbot.") - graph_documents = diffbot_nlp.convert_to_graph_documents([document]) - - """ - - def __init__( - self, - diffbot_api_key: Optional[str] = None, - fact_confidence_threshold: float = 0.7, - include_qualifiers: bool = True, - include_evidence: bool = True, - simplified_schema: bool = True, - extract_types: List[TypeOption] = [TypeOption.FACTS], - *, - include_confidence: bool = False, - ) -> None: - """ - Initialize the graph transformer with various options. - - Args: - diffbot_api_key (str): - The API key for Diffbot's NLP services. - - fact_confidence_threshold (float): - Minimum confidence level for facts to be included. - include_qualifiers (bool): - Whether to include qualifiers in the relationships. - include_evidence (bool): - Whether to include evidence for the relationships. - simplified_schema (bool): - Whether to use a simplified schema for relationships. - extract_types (List[TypeOption]): - A list of data types to extract. Facts, entities, and - sentiment are supported. By default, the option is - set to facts. A fact represents a combination of - source and target nodes with a relationship type. - include_confidence (bool): - Whether to include confidence scores on nodes and rels - """ - self.diffbot_api_key = diffbot_api_key or get_from_env( - "diffbot_api_key", "DIFFBOT_API_KEY" - ) - self.fact_threshold_confidence = fact_confidence_threshold - self.include_qualifiers = include_qualifiers - self.include_evidence = include_evidence - self.include_confidence = include_confidence - self.simplified_schema = None - if simplified_schema: - self.simplified_schema = SimplifiedSchema() - if not extract_types: - raise ValueError( - "`extract_types` cannot be an empty array. " - "Allowed values are 'facts', 'entities', or both." - ) - - self.extract_types = extract_types - - def nlp_request(self, text: str) -> Dict[str, Any]: - """ - Make an API request to the Diffbot NLP endpoint. - - Args: - text (str): The text to be processed. - - Returns: - Dict[str, Any]: The JSON response from the API. - """ - - # Relationship extraction only works for English - payload = { - "content": text, - "lang": "en", - } - - FIELDS = ",".join(self.extract_types) - HOST = "nl.diffbot.com" - url = ( - f"https://{HOST}/v1/?fields={FIELDS}&" - f"token={self.diffbot_api_key}&language=en" - ) - result = requests.post(url, data=payload) - return result.json() - - def process_response( - self, payload: Dict[str, Any], document: Document - ) -> GraphDocument: - """ - Transform the Diffbot NLP response into a GraphDocument. - - Args: - payload (Dict[str, Any]): The JSON response from Diffbot's NLP API. - document (Document): The original document. - - Returns: - GraphDocument: The transformed document as a graph. - """ - - # Return empty result if there are no facts - if ("facts" not in payload or not payload["facts"]) and ( - "entities" not in payload or not payload["entities"] - ): - return GraphDocument(nodes=[], relationships=[], source=document) - - # Nodes are a custom class because we need to deduplicate - nodes_list = NodesList() - if "entities" in payload and payload["entities"]: - for record in payload["entities"]: - # Ignore if it doesn't have a type - if not record["allTypes"]: - continue - - # Define source node - source_id = ( - record["allUris"][0] if record["allUris"] else record["name"] - ) - source_label = record["allTypes"][0]["name"].capitalize() - source_name = record["name"] - nodes_list.add_node_property( - (source_id, source_label), {"name": source_name} - ) - if record.get("sentiment") is not None: - nodes_list.add_node_property( - (source_id, source_label), - {"sentiment": record.get("sentiment")}, - ) - if self.include_confidence: - nodes_list.add_node_property( - (source_id, source_label), - {"confidence": record.get("confidence")}, - ) - - relationships = list() - # Relationships are a list because we don't deduplicate nor anything else - if "facts" in payload and payload["facts"]: - for record in payload["facts"]: - # Skip if the fact is below the threshold confidence - if record["confidence"] < self.fact_threshold_confidence: - continue - - # TODO: It should probably be treated as a node property - if not record["value"]["allTypes"]: - continue - - # Define source node - source_id = ( - record["entity"]["allUris"][0] - if record["entity"]["allUris"] - else record["entity"]["name"] - ) - source_label = record["entity"]["allTypes"][0]["name"].capitalize() - source_name = record["entity"]["name"] - source_node = Node(id=source_id, type=source_label) - nodes_list.add_node_property( - (source_id, source_label), {"name": source_name} - ) - - # Define target node - target_id = ( - record["value"]["allUris"][0] - if record["value"]["allUris"] - else record["value"]["name"] - ) - target_label = record["value"]["allTypes"][0]["name"].capitalize() - target_name = record["value"]["name"] - # Some facts are better suited as node properties - if target_label in FACT_TO_PROPERTY_TYPE: - nodes_list.add_node_property( - (source_id, source_label), - {format_property_key(record["property"]["name"]): target_name}, - ) - else: # Define relationship - # Define target node object - target_node = Node(id=target_id, type=target_label) - nodes_list.add_node_property( - (target_id, target_label), {"name": target_name} - ) - # Define relationship type - rel_type = record["property"]["name"].replace(" ", "_").upper() - if self.simplified_schema: - rel_type = self.simplified_schema.get_type(rel_type) - - # Relationship qualifiers/properties - rel_properties = dict() - relationship_evidence = [ - el["passage"] for el in record["evidence"] - ][0] - if self.include_evidence: - rel_properties.update({"evidence": relationship_evidence}) - if self.include_confidence: - rel_properties.update({"confidence": record["confidence"]}) - if self.include_qualifiers and record.get("qualifiers"): - for property in record["qualifiers"]: - prop_key = format_property_key(property["property"]["name"]) - rel_properties[prop_key] = property["value"]["name"] - - relationship = Relationship( - source=source_node, - target=target_node, - type=rel_type, - properties=rel_properties, - ) - relationships.append(relationship) - - return GraphDocument( - nodes=nodes_list.return_node_list(), - relationships=relationships, - source=document, - ) - - def convert_to_graph_documents( - self, documents: Sequence[Document] - ) -> List[GraphDocument]: - """Convert a sequence of documents into graph documents. - - Args: - documents (Sequence[Document]): The original documents. - kwargs: Additional keyword arguments. - - Returns: - Sequence[GraphDocument]: The transformed documents as graphs. - """ - results = [] - for document in documents: - raw_results = self.nlp_request(document.page_content) - graph_document = self.process_response(raw_results, document) - results.append(graph_document) - return results diff --git a/libs/experimental/langchain_experimental/graph_transformers/gliner.py b/libs/experimental/langchain_experimental/graph_transformers/gliner.py deleted file mode 100644 index 85566e2f7af45..0000000000000 --- a/libs/experimental/langchain_experimental/graph_transformers/gliner.py +++ /dev/null @@ -1,175 +0,0 @@ -from typing import Any, Dict, List, Sequence, Union - -from langchain_community.graphs.graph_document import GraphDocument, Node, Relationship -from langchain_core.documents import Document - -DEFAULT_NODE_TYPE = "Node" - - -class GlinerGraphTransformer: - """ - A transformer class for converting documents into graph structures - using the GLiNER and GLiREL models. - - This class leverages GLiNER for named entity recognition and GLiREL for - relationship extraction from text documents, converting them into a graph format. - The extracted entities and relationships are filtered based on specified - confidence thresholds and allowed types. - - For more details on GLiNER and GLiREL, visit their respective repositories: - GLiNER: https://github.com/urchade/GLiNER - GLiREL: https://github.com/jackboyla/GLiREL/tree/main - - Args: - allowed_nodes (List[str]): A list of allowed node types for entity extraction. - allowed_relationships (Union[List[str], Dict[str, Any]]): A list of allowed - relationship types or a dictionary with additional configuration for - relationship extraction. - gliner_model (str): The name of the pretrained GLiNER model to use. - Default is "urchade/gliner_mediumv2.1". - glirel_model (str): The name of the pretrained GLiREL model to use. - Default is "jackboyla/glirel_beta". - entity_confidence_threshold (float): The confidence threshold for - filtering extracted entities. Default is 0.1. - relationship_confidence_threshold (float): The confidence threshold for - filtering extracted relationships. Default is 0.1. - device (str): The device to use for model inference ('cpu' or 'cuda'). - Default is "cpu". - ignore_self_loops (bool): Whether to ignore relationships where the - source and target nodes are the same. Default is True. - """ - - def __init__( - self, - allowed_nodes: List[str], - allowed_relationships: Union[List[str], Dict[str, Any]], - gliner_model: str = "urchade/gliner_mediumv2.1", - glirel_model: str = "jackboyla/glirel_beta", - entity_confidence_threshold: float = 0.1, - relationship_confidence_threshold: float = 0.1, - device: str = "cpu", - ignore_self_loops: bool = True, - ) -> None: - try: - import gliner_spacy # type: ignore # noqa: F401 - except ImportError: - raise ImportError( - "Could not import relik python package. " - "Please install it with `pip install gliner-spacy`." - ) - try: - import spacy # type: ignore - except ImportError: - raise ImportError( - "Could not import relik python package. " - "Please install it with `pip install spacy`." - ) - try: - import glirel # type: ignore # noqa: F401 - except ImportError: - raise ImportError( - "Could not import relik python package. " - "Please install it with `pip install gliner`." - ) - - gliner_config = { - "gliner_model": gliner_model, - "chunk_size": 250, - "labels": allowed_nodes, - "style": "ent", - "threshold": entity_confidence_threshold, - "map_location": device, - } - glirel_config = {"model": glirel_model, "device": device} - self.nlp = spacy.blank("en") - # Add the GliNER component to the pipeline - self.nlp.add_pipe("gliner_spacy", config=gliner_config) - # Add the GLiREL component to the pipeline - self.nlp.add_pipe("glirel", after="gliner_spacy", config=glirel_config) - self.allowed_relationships = ( - {"glirel_labels": allowed_relationships} - if isinstance(allowed_relationships, list) - else allowed_relationships - ) - self.relationship_confidence_threshold = relationship_confidence_threshold - self.ignore_self_loops = ignore_self_loops - - def process_document(self, document: Document) -> GraphDocument: - # Extraction as SpaCy pipeline - docs = list( - self.nlp.pipe( - [(document.page_content, self.allowed_relationships)], as_tuples=True - ) - ) - # Convert nodes - nodes = [] - for node in docs[0][0].ents: - nodes.append( - Node( - id=node.text, - type=node.label_, - ) - ) - # Convert relationships - relationships = [] - relations = docs[0][0]._.relations - # Deduplicate based on label, head text, and tail text - # Use a list comprehension with max() function - deduplicated_rels = [] - seen = set() - - for item in relations: - key = (tuple(item["head_text"]), tuple(item["tail_text"]), item["label"]) - - if key not in seen: - seen.add(key) - - # Find all items matching the current key - matching_items = [ - rel - for rel in relations - if (tuple(rel["head_text"]), tuple(rel["tail_text"]), rel["label"]) - == key - ] - - # Find the item with the maximum score - max_item = max(matching_items, key=lambda x: x["score"]) - deduplicated_rels.append(max_item) - for rel in deduplicated_rels: - # Relationship confidence threshold - if rel["score"] < self.relationship_confidence_threshold: - continue - source_id = docs[0][0][rel["head_pos"][0] : rel["head_pos"][1]].text - target_id = docs[0][0][rel["tail_pos"][0] : rel["tail_pos"][1]].text - # Ignore self loops - if self.ignore_self_loops and source_id == target_id: - continue - source_node = [n for n in nodes if n.id == source_id][0] - target_node = [n for n in nodes if n.id == target_id][0] - relationships.append( - Relationship( - source=source_node, - target=target_node, - type=rel["label"].replace(" ", "_").upper(), - ) - ) - - return GraphDocument(nodes=nodes, relationships=relationships, source=document) - - def convert_to_graph_documents( - self, documents: Sequence[Document] - ) -> List[GraphDocument]: - """Convert a sequence of documents into graph documents. - - Args: - documents (Sequence[Document]): The original documents. - kwargs: Additional keyword arguments. - - Returns: - Sequence[GraphDocument]: The transformed documents as graphs. - """ - results = [] - for document in documents: - graph_document = self.process_document(document) - results.append(graph_document) - return results diff --git a/libs/experimental/langchain_experimental/graph_transformers/llm.py b/libs/experimental/langchain_experimental/graph_transformers/llm.py deleted file mode 100644 index 79ebe8b841f5f..0000000000000 --- a/libs/experimental/langchain_experimental/graph_transformers/llm.py +++ /dev/null @@ -1,887 +0,0 @@ -import asyncio -import json -from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union, cast - -from langchain_community.graphs.graph_document import GraphDocument, Node, Relationship -from langchain_core.documents import Document -from langchain_core.language_models import BaseLanguageModel -from langchain_core.messages import SystemMessage -from langchain_core.output_parsers import JsonOutputParser -from langchain_core.prompts import ( - ChatPromptTemplate, - HumanMessagePromptTemplate, - PromptTemplate, -) -from langchain_core.runnables import RunnableConfig -from pydantic import BaseModel, Field, create_model - -examples = [ - { - "text": ( - "Adam is a software engineer in Microsoft since 2009, " - "and last year he got an award as the Best Talent" - ), - "head": "Adam", - "head_type": "Person", - "relation": "WORKS_FOR", - "tail": "Microsoft", - "tail_type": "Company", - }, - { - "text": ( - "Adam is a software engineer in Microsoft since 2009, " - "and last year he got an award as the Best Talent" - ), - "head": "Adam", - "head_type": "Person", - "relation": "HAS_AWARD", - "tail": "Best Talent", - "tail_type": "Award", - }, - { - "text": ( - "Microsoft is a tech company that provide " - "several products such as Microsoft Word" - ), - "head": "Microsoft Word", - "head_type": "Product", - "relation": "PRODUCED_BY", - "tail": "Microsoft", - "tail_type": "Company", - }, - { - "text": "Microsoft Word is a lightweight app that accessible offline", - "head": "Microsoft Word", - "head_type": "Product", - "relation": "HAS_CHARACTERISTIC", - "tail": "lightweight app", - "tail_type": "Characteristic", - }, - { - "text": "Microsoft Word is a lightweight app that accessible offline", - "head": "Microsoft Word", - "head_type": "Product", - "relation": "HAS_CHARACTERISTIC", - "tail": "accessible offline", - "tail_type": "Characteristic", - }, -] - -system_prompt = ( - "# Knowledge Graph Instructions for GPT-4\n" - "## 1. Overview\n" - "You are a top-tier algorithm designed for extracting information in structured " - "formats to build a knowledge graph.\n" - "Try to capture as much information from the text as possible without " - "sacrificing accuracy. Do not add any information that is not explicitly " - "mentioned in the text.\n" - "- **Nodes** represent entities and concepts.\n" - "- The aim is to achieve simplicity and clarity in the knowledge graph, making it\n" - "accessible for a vast audience.\n" - "## 2. Labeling Nodes\n" - "- **Consistency**: Ensure you use available types for node labels.\n" - "Ensure you use basic or elementary types for node labels.\n" - "- For example, when you identify an entity representing a person, " - "always label it as **'person'**. Avoid using more specific terms " - "like 'mathematician' or 'scientist'." - "- **Node IDs**: Never utilize integers as node IDs. Node IDs should be " - "names or human-readable identifiers found in the text.\n" - "- **Relationships** represent connections between entities or concepts.\n" - "Ensure consistency and generality in relationship types when constructing " - "knowledge graphs. Instead of using specific and momentary types " - "such as 'BECAME_PROFESSOR', use more general and timeless relationship types " - "like 'PROFESSOR'. Make sure to use general and timeless relationship types!\n" - "## 3. Coreference Resolution\n" - "- **Maintain Entity Consistency**: When extracting entities, it's vital to " - "ensure consistency.\n" - 'If an entity, such as "John Doe", is mentioned multiple times in the text ' - 'but is referred to by different names or pronouns (e.g., "Joe", "he"),' - "always use the most complete identifier for that entity throughout the " - 'knowledge graph. In this example, use "John Doe" as the entity ID.\n' - "Remember, the knowledge graph should be coherent and easily understandable, " - "so maintaining consistency in entity references is crucial.\n" - "## 4. Strict Compliance\n" - "Adhere to the rules strictly. Non-compliance will result in termination." -) - -default_prompt = ChatPromptTemplate.from_messages( - [ - ( - "system", - system_prompt, - ), - ( - "human", - ( - "Tip: Make sure to answer in the correct format and do " - "not include any explanations. " - "Use the given format to extract information from the " - "following input: {input}" - ), - ), - ] -) - - -def _get_additional_info(input_type: str) -> str: - # Check if the input_type is one of the allowed values - if input_type not in ["node", "relationship", "property"]: - raise ValueError("input_type must be 'node', 'relationship', or 'property'") - - # Perform actions based on the input_type - if input_type == "node": - return ( - "Ensure you use basic or elementary types for node labels.\n" - "For example, when you identify an entity representing a person, " - "always label it as **'Person'**. Avoid using more specific terms " - "like 'Mathematician' or 'Scientist'" - ) - elif input_type == "relationship": - return ( - "Instead of using specific and momentary types such as " - "'BECAME_PROFESSOR', use more general and timeless relationship types " - "like 'PROFESSOR'. However, do not sacrifice any accuracy for generality" - ) - elif input_type == "property": - return "" - return "" - - -def optional_enum_field( - enum_values: Optional[List[str]] = None, - description: str = "", - input_type: str = "node", - llm_type: Optional[str] = None, - **field_kwargs: Any, -) -> Any: - """Utility function to conditionally create a field with an enum constraint.""" - # Only openai supports enum param - if enum_values and llm_type == "openai-chat": - return Field( - ..., - enum=enum_values, # type: ignore[call-arg] - description=f"{description}. Available options are {enum_values}", - **field_kwargs, - ) - elif enum_values: - return Field( - ..., - description=f"{description}. Available options are {enum_values}", - **field_kwargs, - ) - else: - additional_info = _get_additional_info(input_type) - return Field(..., description=description + additional_info, **field_kwargs) - - -class _Graph(BaseModel): - nodes: Optional[List] - relationships: Optional[List] - - -class UnstructuredRelation(BaseModel): - head: str = Field( - description=( - "extracted head entity like Microsoft, Apple, John. " - "Must use human-readable unique identifier." - ) - ) - head_type: str = Field( - description="type of the extracted head entity like Person, Company, etc" - ) - relation: str = Field(description="relation between the head and the tail entities") - tail: str = Field( - description=( - "extracted tail entity like Microsoft, Apple, John. " - "Must use human-readable unique identifier." - ) - ) - tail_type: str = Field( - description="type of the extracted tail entity like Person, Company, etc" - ) - - -def create_unstructured_prompt( - node_labels: Optional[List[str]] = None, rel_types: Optional[List[str]] = None -) -> ChatPromptTemplate: - node_labels_str = str(node_labels) if node_labels else "" - rel_types_str = str(rel_types) if rel_types else "" - base_string_parts = [ - "You are a top-tier algorithm designed for extracting information in " - "structured formats to build a knowledge graph. Your task is to identify " - "the entities and relations requested with the user prompt from a given " - "text. You must generate the output in a JSON format containing a list " - 'with JSON objects. Each object should have the keys: "head", ' - '"head_type", "relation", "tail", and "tail_type". The "head" ' - "key must contain the text of the extracted entity with one of the types " - "from the provided list in the user prompt.", - f'The "head_type" key must contain the type of the extracted head entity, ' - f"which must be one of the types from {node_labels_str}." - if node_labels - else "", - f'The "relation" key must contain the type of relation between the "head" ' - f'and the "tail", which must be one of the relations from {rel_types_str}.' - if rel_types - else "", - f'The "tail" key must represent the text of an extracted entity which is ' - f'the tail of the relation, and the "tail_type" key must contain the type ' - f"of the tail entity from {node_labels_str}." - if node_labels - else "", - "Attempt to extract as many entities and relations as you can. Maintain " - "Entity Consistency: When extracting entities, it's vital to ensure " - 'consistency. If an entity, such as "John Doe", is mentioned multiple ' - "times in the text but is referred to by different names or pronouns " - '(e.g., "Joe", "he"), always use the most complete identifier for ' - "that entity. The knowledge graph should be coherent and easily " - "understandable, so maintaining consistency in entity references is " - "crucial.", - "IMPORTANT NOTES:\n- Don't add any explanation and text.", - ] - system_prompt = "\n".join(filter(None, base_string_parts)) - - system_message = SystemMessage(content=system_prompt) - parser = JsonOutputParser(pydantic_object=UnstructuredRelation) - - human_string_parts = [ - "Based on the following example, extract entities and " - "relations from the provided text.\n\n", - "Use the following entity types, don't use other entity " - "that is not defined below:" - "# ENTITY TYPES:" - "{node_labels}" - if node_labels - else "", - "Use the following relation types, don't use other relation " - "that is not defined below:" - "# RELATION TYPES:" - "{rel_types}" - if rel_types - else "", - "Below are a number of examples of text and their extracted " - "entities and relationships." - "{examples}\n" - "For the following text, extract entities and relations as " - "in the provided example." - "{format_instructions}\nText: {input}", - ] - human_prompt_string = "\n".join(filter(None, human_string_parts)) - human_prompt = PromptTemplate( - template=human_prompt_string, - input_variables=["input"], - partial_variables={ - "format_instructions": parser.get_format_instructions(), - "node_labels": node_labels, - "rel_types": rel_types, - "examples": examples, - }, - ) - - human_message_prompt = HumanMessagePromptTemplate(prompt=human_prompt) - - chat_prompt = ChatPromptTemplate.from_messages( - [system_message, human_message_prompt] - ) - return chat_prompt - - -def create_simple_model( - node_labels: Optional[List[str]] = None, - rel_types: Optional[List[str]] = None, - node_properties: Union[bool, List[str]] = False, - llm_type: Optional[str] = None, - relationship_properties: Union[bool, List[str]] = False, -) -> Type[_Graph]: - """ - Create a simple graph model with optional constraints on node - and relationship types. - - Args: - node_labels (Optional[List[str]]): Specifies the allowed node types. - Defaults to None, allowing all node types. - rel_types (Optional[List[str]]): Specifies the allowed relationship types. - Defaults to None, allowing all relationship types. - node_properties (Union[bool, List[str]]): Specifies if node properties should - be included. If a list is provided, only properties with keys in the list - will be included. If True, all properties are included. Defaults to False. - relationship_properties (Union[bool, List[str]]): Specifies if relationship - properties should be included. If a list is provided, only properties with - keys in the list will be included. If True, all properties are included. - Defaults to False. - llm_type (Optional[str]): The type of the language model. Defaults to None. - Only openai supports enum param: openai-chat. - - Returns: - Type[_Graph]: A graph model with the specified constraints. - - Raises: - ValueError: If 'id' is included in the node or relationship properties list. - """ - - node_fields: Dict[str, Tuple[Any, Any]] = { - "id": ( - str, - Field(..., description="Name or human-readable unique identifier."), - ), - "type": ( - str, - optional_enum_field( - node_labels, - description="The type or label of the node.", - input_type="node", - llm_type=llm_type, - ), - ), - } - - if node_properties: - if isinstance(node_properties, list) and "id" in node_properties: - raise ValueError("The node property 'id' is reserved and cannot be used.") - # Map True to empty array - node_properties_mapped: List[str] = ( - [] if node_properties is True else node_properties - ) - - class Property(BaseModel): - """A single property consisting of key and value""" - - key: str = optional_enum_field( - node_properties_mapped, - description="Property key.", - input_type="property", - llm_type=llm_type, - ) - value: str = Field(..., description="value") - - node_fields["properties"] = ( - Optional[List[Property]], - Field(None, description="List of node properties"), - ) - SimpleNode = create_model("SimpleNode", **node_fields) # type: ignore - - relationship_fields: Dict[str, Tuple[Any, Any]] = { - "source_node_id": ( - str, - Field( - ..., - description="Name or human-readable unique identifier of source node", - ), - ), - "source_node_type": ( - str, - optional_enum_field( - node_labels, - description="The type or label of the source node.", - input_type="node", - llm_type=llm_type, - ), - ), - "target_node_id": ( - str, - Field( - ..., - description="Name or human-readable unique identifier of target node", - ), - ), - "target_node_type": ( - str, - optional_enum_field( - node_labels, - description="The type or label of the target node.", - input_type="node", - llm_type=llm_type, - ), - ), - "type": ( - str, - optional_enum_field( - rel_types, - description="The type of the relationship.", - input_type="relationship", - llm_type=llm_type, - ), - ), - } - if relationship_properties: - if ( - isinstance(relationship_properties, list) - and "id" in relationship_properties - ): - raise ValueError( - "The relationship property 'id' is reserved and cannot be used." - ) - # Map True to empty array - relationship_properties_mapped: List[str] = ( - [] if relationship_properties is True else relationship_properties - ) - - class RelationshipProperty(BaseModel): - """A single property consisting of key and value""" - - key: str = optional_enum_field( - relationship_properties_mapped, - description="Property key.", - input_type="property", - llm_type=llm_type, - ) - value: str = Field(..., description="value") - - relationship_fields["properties"] = ( - Optional[List[RelationshipProperty]], - Field(None, description="List of relationship properties"), - ) - SimpleRelationship = create_model("SimpleRelationship", **relationship_fields) # type: ignore - - class DynamicGraph(_Graph): - """Represents a graph document consisting of nodes and relationships.""" - - nodes: Optional[List[SimpleNode]] = Field(description="List of nodes") # type: ignore - relationships: Optional[List[SimpleRelationship]] = Field( # type: ignore - description="List of relationships" - ) - - return DynamicGraph - - -def map_to_base_node(node: Any) -> Node: - """Map the SimpleNode to the base Node.""" - properties = {} - if hasattr(node, "properties") and node.properties: - for p in node.properties: - properties[format_property_key(p.key)] = p.value - return Node(id=node.id, type=node.type, properties=properties) - - -def map_to_base_relationship(rel: Any) -> Relationship: - """Map the SimpleRelationship to the base Relationship.""" - source = Node(id=rel.source_node_id, type=rel.source_node_type) - target = Node(id=rel.target_node_id, type=rel.target_node_type) - properties = {} - if hasattr(rel, "properties") and rel.properties: - for p in rel.properties: - properties[format_property_key(p.key)] = p.value - return Relationship( - source=source, target=target, type=rel.type, properties=properties - ) - - -def _parse_and_clean_json( - argument_json: Dict[str, Any], -) -> Tuple[List[Node], List[Relationship]]: - nodes = [] - for node in argument_json["nodes"]: - if not node.get("id"): # Id is mandatory, skip this node - continue - node_properties = {} - if "properties" in node and node["properties"]: - for p in node["properties"]: - node_properties[format_property_key(p["key"])] = p["value"] - nodes.append( - Node( - id=node["id"], - type=node.get("type", "Node"), - properties=node_properties, - ) - ) - relationships = [] - for rel in argument_json["relationships"]: - # Mandatory props - if ( - not rel.get("source_node_id") - or not rel.get("target_node_id") - or not rel.get("type") - ): - continue - - # Node type copying if needed from node list - if not rel.get("source_node_type"): - try: - rel["source_node_type"] = [ - el.get("type") - for el in argument_json["nodes"] - if el["id"] == rel["source_node_id"] - ][0] - except IndexError: - rel["source_node_type"] = None - if not rel.get("target_node_type"): - try: - rel["target_node_type"] = [ - el.get("type") - for el in argument_json["nodes"] - if el["id"] == rel["target_node_id"] - ][0] - except IndexError: - rel["target_node_type"] = None - - rel_properties = {} - if "properties" in rel and rel["properties"]: - for p in rel["properties"]: - rel_properties[format_property_key(p["key"])] = p["value"] - - source_node = Node( - id=rel["source_node_id"], - type=rel["source_node_type"], - ) - target_node = Node( - id=rel["target_node_id"], - type=rel["target_node_type"], - ) - relationships.append( - Relationship( - source=source_node, - target=target_node, - type=rel["type"], - properties=rel_properties, - ) - ) - return nodes, relationships - - -def _format_nodes(nodes: List[Node]) -> List[Node]: - return [ - Node( - id=el.id.title() if isinstance(el.id, str) else el.id, - type=el.type.capitalize() # type: ignore[arg-type] - if el.type - else None, # handle empty strings # type: ignore[arg-type] - properties=el.properties, - ) - for el in nodes - ] - - -def _format_relationships(rels: List[Relationship]) -> List[Relationship]: - return [ - Relationship( - source=_format_nodes([el.source])[0], - target=_format_nodes([el.target])[0], - type=el.type.replace(" ", "_").upper(), - properties=el.properties, - ) - for el in rels - ] - - -def format_property_key(s: str) -> str: - words = s.split() - if not words: - return s - first_word = words[0].lower() - capitalized_words = [word.capitalize() for word in words[1:]] - return "".join([first_word] + capitalized_words) - - -def _convert_to_graph_document( - raw_schema: Dict[Any, Any], -) -> Tuple[List[Node], List[Relationship]]: - # If there are validation errors - if not raw_schema["parsed"]: - try: - try: # OpenAI type response - argument_json = json.loads( - raw_schema["raw"].additional_kwargs["tool_calls"][0]["function"][ - "arguments" - ] - ) - except Exception: # Google type response - try: - argument_json = json.loads( - raw_schema["raw"].additional_kwargs["function_call"][ - "arguments" - ] - ) - except Exception: # Ollama type response - argument_json = raw_schema["raw"].tool_calls[0]["args"] - if isinstance(argument_json["nodes"], str): - argument_json["nodes"] = json.loads(argument_json["nodes"]) - if isinstance(argument_json["relationships"], str): - argument_json["relationships"] = json.loads( - argument_json["relationships"] - ) - - nodes, relationships = _parse_and_clean_json(argument_json) - except Exception: # If we can't parse JSON - return ([], []) - else: # If there are no validation errors use parsed pydantic object - parsed_schema: _Graph = raw_schema["parsed"] - nodes = ( - [map_to_base_node(node) for node in parsed_schema.nodes if node.id] - if parsed_schema.nodes - else [] - ) - - relationships = ( - [ - map_to_base_relationship(rel) - for rel in parsed_schema.relationships - if rel.type and rel.source_node_id and rel.target_node_id - ] - if parsed_schema.relationships - else [] - ) - # Title / Capitalize - return _format_nodes(nodes), _format_relationships(relationships) - - -class LLMGraphTransformer: - """Transform documents into graph-based documents using a LLM. - - It allows specifying constraints on the types of nodes and relationships to include - in the output graph. The class supports extracting properties for both nodes and - relationships. - - Args: - llm (BaseLanguageModel): An instance of a language model supporting structured - output. - allowed_nodes (List[str], optional): Specifies which node types are - allowed in the graph. Defaults to an empty list, allowing all node types. - allowed_relationships (List[str], optional): Specifies which relationship types - are allowed in the graph. Defaults to an empty list, allowing all relationship - types. - prompt (Optional[ChatPromptTemplate], optional): The prompt to pass to - the LLM with additional instructions. - strict_mode (bool, optional): Determines whether the transformer should apply - filtering to strictly adhere to `allowed_nodes` and `allowed_relationships`. - Defaults to True. - node_properties (Union[bool, List[str]]): If True, the LLM can extract any - node properties from text. Alternatively, a list of valid properties can - be provided for the LLM to extract, restricting extraction to those specified. - relationship_properties (Union[bool, List[str]]): If True, the LLM can extract - any relationship properties from text. Alternatively, a list of valid - properties can be provided for the LLM to extract, restricting extraction to - those specified. - ignore_tool_usage (bool): Indicates whether the transformer should - bypass the use of structured output functionality of the language model. - If set to True, the transformer will not use the language model's native - function calling capabilities to handle structured output. Defaults to False. - - Example: - .. code-block:: python - from langchain_experimental.graph_transformers import LLMGraphTransformer - from langchain_core.documents import Document - from langchain_openai import ChatOpenAI - - llm=ChatOpenAI(temperature=0) - transformer = LLMGraphTransformer( - llm=llm, - allowed_nodes=["Person", "Organization"]) - - doc = Document(page_content="Elon Musk is suing OpenAI") - graph_documents = transformer.convert_to_graph_documents([doc]) - """ - - def __init__( - self, - llm: BaseLanguageModel, - allowed_nodes: List[str] = [], - allowed_relationships: List[str] = [], - prompt: Optional[ChatPromptTemplate] = None, - strict_mode: bool = True, - node_properties: Union[bool, List[str]] = False, - relationship_properties: Union[bool, List[str]] = False, - ignore_tool_usage: bool = False, - ) -> None: - self.allowed_nodes = allowed_nodes - self.allowed_relationships = allowed_relationships - self.strict_mode = strict_mode - self._function_call = not ignore_tool_usage - # Check if the LLM really supports structured output - if self._function_call: - try: - llm.with_structured_output(_Graph) - except NotImplementedError: - self._function_call = False - if not self._function_call: - if node_properties or relationship_properties: - raise ValueError( - "The 'node_properties' and 'relationship_properties' parameters " - "cannot be used in combination with a LLM that doesn't support " - "native function calling." - ) - try: - import json_repair # type: ignore - - self.json_repair = json_repair - except ImportError: - raise ImportError( - "Could not import json_repair python package. " - "Please install it with `pip install json-repair`." - ) - prompt = prompt or create_unstructured_prompt( - allowed_nodes, allowed_relationships - ) - self.chain = prompt | llm - else: - # Define chain - try: - llm_type = llm._llm_type # type: ignore - except AttributeError: - llm_type = None - schema = create_simple_model( - allowed_nodes, - allowed_relationships, - node_properties, - llm_type, - relationship_properties, - ) - structured_llm = llm.with_structured_output(schema, include_raw=True) - prompt = prompt or default_prompt - self.chain = prompt | structured_llm - - def process_response( - self, document: Document, config: Optional[RunnableConfig] = None - ) -> GraphDocument: - """ - Processes a single document, transforming it into a graph document using - an LLM based on the model's schema and constraints. - """ - text = document.page_content - raw_schema = self.chain.invoke({"input": text}, config=config) - if self._function_call: - raw_schema = cast(Dict[Any, Any], raw_schema) - nodes, relationships = _convert_to_graph_document(raw_schema) - else: - nodes_set = set() - relationships = [] - if not isinstance(raw_schema, str): - raw_schema = raw_schema.content - parsed_json = self.json_repair.loads(raw_schema) - if isinstance(parsed_json, dict): - parsed_json = [parsed_json] - for rel in parsed_json: - # Check if mandatory properties are there - if ( - not rel.get("head") - or not rel.get("tail") - or not rel.get("relation") - ): - continue - # Nodes need to be deduplicated using a set - # Use default Node label for nodes if missing - nodes_set.add((rel["head"], rel.get("head_type", "Node"))) - nodes_set.add((rel["tail"], rel.get("tail_type", "Node"))) - - source_node = Node(id=rel["head"], type=rel.get("head_type", "Node")) - target_node = Node(id=rel["tail"], type=rel.get("tail_type", "Node")) - relationships.append( - Relationship( - source=source_node, target=target_node, type=rel["relation"] - ) - ) - # Create nodes list - nodes = [Node(id=el[0], type=el[1]) for el in list(nodes_set)] - - # Strict mode filtering - if self.strict_mode and (self.allowed_nodes or self.allowed_relationships): - if self.allowed_nodes: - lower_allowed_nodes = [el.lower() for el in self.allowed_nodes] - nodes = [ - node for node in nodes if node.type.lower() in lower_allowed_nodes - ] - relationships = [ - rel - for rel in relationships - if rel.source.type.lower() in lower_allowed_nodes - and rel.target.type.lower() in lower_allowed_nodes - ] - if self.allowed_relationships: - relationships = [ - rel - for rel in relationships - if rel.type.lower() - in [el.lower() for el in self.allowed_relationships] - ] - - return GraphDocument(nodes=nodes, relationships=relationships, source=document) - - def convert_to_graph_documents( - self, documents: Sequence[Document], config: Optional[RunnableConfig] = None - ) -> List[GraphDocument]: - """Convert a sequence of documents into graph documents. - - Args: - documents (Sequence[Document]): The original documents. - kwargs: Additional keyword arguments. - - Returns: - Sequence[GraphDocument]: The transformed documents as graphs. - """ - return [self.process_response(document, config) for document in documents] - - async def aprocess_response( - self, document: Document, config: Optional[RunnableConfig] = None - ) -> GraphDocument: - """ - Asynchronously processes a single document, transforming it into a - graph document. - """ - text = document.page_content - raw_schema = await self.chain.ainvoke({"input": text}, config=config) - if self._function_call: - raw_schema = cast(Dict[Any, Any], raw_schema) - nodes, relationships = _convert_to_graph_document(raw_schema) - else: - nodes_set = set() - relationships = [] - if not isinstance(raw_schema, str): - raw_schema = raw_schema.content - parsed_json = self.json_repair.loads(raw_schema) - if isinstance(parsed_json, dict): - parsed_json = [parsed_json] - for rel in parsed_json: - # Check if mandatory properties are there - if ( - not rel.get("head") - or not rel.get("tail") - or not rel.get("relation") - ): - continue - # Nodes need to be deduplicated using a set - # Use default Node label for nodes if missing - nodes_set.add((rel["head"], rel.get("head_type", "Node"))) - nodes_set.add((rel["tail"], rel.get("tail_type", "Node"))) - - source_node = Node(id=rel["head"], type=rel.get("head_type", "Node")) - target_node = Node(id=rel["tail"], type=rel.get("tail_type", "Node")) - relationships.append( - Relationship( - source=source_node, target=target_node, type=rel["relation"] - ) - ) - # Create nodes list - nodes = [Node(id=el[0], type=el[1]) for el in list(nodes_set)] - - if self.strict_mode and (self.allowed_nodes or self.allowed_relationships): - if self.allowed_nodes: - lower_allowed_nodes = [el.lower() for el in self.allowed_nodes] - nodes = [ - node for node in nodes if node.type.lower() in lower_allowed_nodes - ] - relationships = [ - rel - for rel in relationships - if rel.source.type.lower() in lower_allowed_nodes - and rel.target.type.lower() in lower_allowed_nodes - ] - if self.allowed_relationships: - relationships = [ - rel - for rel in relationships - if rel.type.lower() - in [el.lower() for el in self.allowed_relationships] - ] - - return GraphDocument(nodes=nodes, relationships=relationships, source=document) - - async def aconvert_to_graph_documents( - self, documents: Sequence[Document], config: Optional[RunnableConfig] = None - ) -> List[GraphDocument]: - """ - Asynchronously convert a sequence of documents into graph documents. - """ - tasks = [ - asyncio.create_task(self.aprocess_response(document, config)) - for document in documents - ] - results = await asyncio.gather(*tasks) - return results diff --git a/libs/experimental/langchain_experimental/graph_transformers/relik.py b/libs/experimental/langchain_experimental/graph_transformers/relik.py deleted file mode 100644 index a1e7dd0f6b072..0000000000000 --- a/libs/experimental/langchain_experimental/graph_transformers/relik.py +++ /dev/null @@ -1,115 +0,0 @@ -import logging -from typing import Any, Dict, List, Sequence - -from langchain_community.graphs.graph_document import GraphDocument, Node, Relationship -from langchain_core.documents import Document - -DEFAULT_NODE_TYPE = "Node" - - -class RelikGraphTransformer: - """ - A transformer class for converting documents into graph structures - using the Relik library and models. - - This class leverages relik models for extracting relationships - and nodes from text documents and converting them into a graph format. - The relationships are filtered based on a specified confidence threshold. - - For more details on the Relik library, visit their GitHub repository: - https://github.com/SapienzaNLP/relik - - Args: - model (str): The name of the pretrained Relik model to use. - Default is "relik-ie/relik-relation-extraction-small-wikipedia". - relationship_confidence_threshold (float): The confidence threshold for - filtering relationships. Default is 0.1. - model_config (Dict[str, any]): Additional configuration options for the - Relik model. Default is an empty dictionary. - ignore_self_loops (bool): Whether to ignore relationships where the - source and target nodes are the same. Default is True. - """ - - def __init__( - self, - model: str = "relik-ie/relik-relation-extraction-small", - relationship_confidence_threshold: float = 0.1, - model_config: Dict[str, Any] = {}, - ignore_self_loops: bool = True, - ) -> None: - try: - import relik # type: ignore - - # Remove default INFO logging - logging.getLogger("relik").setLevel(logging.WARNING) - except ImportError: - raise ImportError( - "Could not import relik python package. " - "Please install it with `pip install relik`." - ) - self.relik_model = relik.Relik.from_pretrained(model, **model_config) - self.relationship_confidence_threshold = relationship_confidence_threshold - self.ignore_self_loops = ignore_self_loops - - def process_document(self, document: Document) -> GraphDocument: - relik_out = self.relik_model(document.page_content) - nodes = [] - # Extract nodes - for node in relik_out.spans: - nodes.append( - Node( - id=node.text, - type=DEFAULT_NODE_TYPE - if node.label.strip() == "--NME--" - else node.label.strip(), - ) - ) - - relationships = [] - # Extract relationships - for triple in relik_out.triplets: - # Ignore relationship if below confidence threshold - if triple.confidence < self.relationship_confidence_threshold: - continue - # Ignore self loops - if self.ignore_self_loops and triple.subject.text == triple.object.text: - continue - source_node = Node( - id=triple.subject.text, - type=DEFAULT_NODE_TYPE - if triple.subject.label.strip() == "--NME--" - else triple.subject.label.strip(), - ) - target_node = Node( - id=triple.object.text, - type=DEFAULT_NODE_TYPE - if triple.object.label.strip() == "--NME--" - else triple.object.label.strip(), - ) - - relationship = Relationship( - source=source_node, - target=target_node, - type=triple.label.replace(" ", "_").upper(), - ) - relationships.append(relationship) - - return GraphDocument(nodes=nodes, relationships=relationships, source=document) - - def convert_to_graph_documents( - self, documents: Sequence[Document] - ) -> List[GraphDocument]: - """Convert a sequence of documents into graph documents. - - Args: - documents (Sequence[Document]): The original documents. - kwargs: Additional keyword arguments. - - Returns: - Sequence[GraphDocument]: The transformed documents as graphs. - """ - results = [] - for document in documents: - graph_document = self.process_document(document) - results.append(graph_document) - return results diff --git a/libs/experimental/langchain_experimental/llm_bash/__init__.py b/libs/experimental/langchain_experimental/llm_bash/__init__.py deleted file mode 100644 index cab96624b0d0f..0000000000000 --- a/libs/experimental/langchain_experimental/llm_bash/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -"""**LLM bash** is a chain that uses LLM to interpret a prompt and -executes **bash** code.""" diff --git a/libs/experimental/langchain_experimental/llm_bash/base.py b/libs/experimental/langchain_experimental/llm_bash/base.py deleted file mode 100644 index bd9e0eed3e1c0..0000000000000 --- a/libs/experimental/langchain_experimental/llm_bash/base.py +++ /dev/null @@ -1,127 +0,0 @@ -"""Chain that interprets a prompt and executes bash operations.""" - -from __future__ import annotations - -import logging -import warnings -from typing import Any, Dict, List, Optional - -from langchain.chains.base import Chain -from langchain.chains.llm import LLMChain -from langchain.schema import BasePromptTemplate, OutputParserException -from langchain_core.callbacks.manager import CallbackManagerForChainRun -from langchain_core.language_models import BaseLanguageModel -from pydantic import ConfigDict, Field, model_validator - -from langchain_experimental.llm_bash.bash import BashProcess -from langchain_experimental.llm_bash.prompt import PROMPT - -logger = logging.getLogger(__name__) - - -class LLMBashChain(Chain): - """Chain that interprets a prompt and executes bash operations. - - Example: - .. code-block:: python - - from langchain.chains import LLMBashChain - from langchain_community.llms import OpenAI - llm_bash = LLMBashChain.from_llm(OpenAI()) - """ - - llm_chain: LLMChain - llm: Optional[BaseLanguageModel] = None - """[Deprecated] LLM wrapper to use.""" - input_key: str = "question" #: :meta private: - output_key: str = "answer" #: :meta private: - prompt: BasePromptTemplate = PROMPT - """[Deprecated]""" - bash_process: BashProcess = Field(default_factory=BashProcess) #: :meta private: - - model_config = ConfigDict( - arbitrary_types_allowed=True, - extra="forbid", - ) - - @model_validator(mode="before") - @classmethod - def raise_deprecation(cls, values: Dict) -> Any: - if "llm" in values: - warnings.warn( - "Directly instantiating an LLMBashChain with an llm is deprecated. " - "Please instantiate with llm_chain or using the from_llm class method." - ) - if "llm_chain" not in values and values["llm"] is not None: - prompt = values.get("prompt", PROMPT) - values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt) - return values - - @model_validator(mode="before") - @classmethod - def validate_prompt(cls, values: Dict) -> Any: - if values["llm_chain"].prompt.output_parser is None: - raise ValueError( - "The prompt used by llm_chain is expected to have an output_parser." - ) - return values - - @property - def input_keys(self) -> List[str]: - """Expect input key. - - :meta private: - """ - return [self.input_key] - - @property - def output_keys(self) -> List[str]: - """Expect output key. - - :meta private: - """ - return [self.output_key] - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() - _run_manager.on_text(inputs[self.input_key], verbose=self.verbose) - - t = self.llm_chain.predict( - question=inputs[self.input_key], callbacks=_run_manager.get_child() - ) - _run_manager.on_text(t, color="green", verbose=self.verbose) - t = t.strip() - try: - parser = self.llm_chain.prompt.output_parser - command_list = parser.parse(t) # type: ignore[union-attr] - except OutputParserException as e: - _run_manager.on_chain_error(e, verbose=self.verbose) - raise e - - if self.verbose: - _run_manager.on_text("\nCode: ", verbose=self.verbose) - _run_manager.on_text( - str(command_list), color="yellow", verbose=self.verbose - ) - output = self.bash_process.run(command_list) - _run_manager.on_text("\nAnswer: ", verbose=self.verbose) - _run_manager.on_text(output, color="yellow", verbose=self.verbose) - return {self.output_key: output} - - @property - def _chain_type(self) -> str: - return "llm_bash_chain" - - @classmethod - def from_llm( - cls, - llm: BaseLanguageModel, - prompt: BasePromptTemplate = PROMPT, - **kwargs: Any, - ) -> LLMBashChain: - llm_chain = LLMChain(llm=llm, prompt=prompt) - return cls(llm_chain=llm_chain, **kwargs) diff --git a/libs/experimental/langchain_experimental/llm_bash/bash.py b/libs/experimental/langchain_experimental/llm_bash/bash.py deleted file mode 100644 index f34e0d839941f..0000000000000 --- a/libs/experimental/langchain_experimental/llm_bash/bash.py +++ /dev/null @@ -1,185 +0,0 @@ -"""Wrapper around subprocess to run commands.""" - -from __future__ import annotations - -import platform -import re -import subprocess -from typing import TYPE_CHECKING, List, Union -from uuid import uuid4 - -if TYPE_CHECKING: - import pexpect - - -class BashProcess: - """Wrapper for starting subprocesses. - - Uses the python built-in subprocesses.run() - Persistent processes are **not** available - on Windows systems, as pexpect makes use of - Unix pseudoterminals (ptys). MacOS and Linux - are okay. - - Example: - .. code-block:: python - - from langchain_community.utilities.bash import BashProcess - - bash = BashProcess( - strip_newlines = False, - return_err_output = False, - persistent = False - ) - bash.run('echo \'hello world\'') - - """ - - strip_newlines: bool = False - """Whether or not to run .strip() on the output""" - return_err_output: bool = False - """Whether or not to return the output of a failed - command, or just the error message and stacktrace""" - persistent: bool = False - """Whether or not to spawn a persistent session - NOTE: Unavailable for Windows environments""" - - def __init__( - self, - strip_newlines: bool = False, - return_err_output: bool = False, - persistent: bool = False, - ): - """ - Initializes with default settings - """ - self.strip_newlines = strip_newlines - self.return_err_output = return_err_output - self.prompt = "" - self.process = None - if persistent: - self.prompt = str(uuid4()) - self.process = self._initialize_persistent_process(self, self.prompt) - - @staticmethod - def _lazy_import_pexpect() -> pexpect: - """Import pexpect only when needed.""" - if platform.system() == "Windows": - raise ValueError( - "Persistent bash processes are not yet supported on Windows." - ) - try: - import pexpect - - except ImportError: - raise ImportError( - "pexpect required for persistent bash processes." - " To install, run `pip install pexpect`." - ) - return pexpect - - @staticmethod - def _initialize_persistent_process(self: BashProcess, prompt: str) -> pexpect.spawn: - # Start bash in a clean environment - # Doesn't work on windows - """ - Initializes a persistent bash setting in a - clean environment. - NOTE: Unavailable on Windows - - Args: - Prompt(str): the bash command to execute - """ - pexpect = self._lazy_import_pexpect() - process = pexpect.spawn( - "env", ["-i", "bash", "--norc", "--noprofile"], encoding="utf-8" - ) - # Set the custom prompt - process.sendline("PS1=" + prompt) - - process.expect_exact(prompt, timeout=10) - return process - - def run(self, commands: Union[str, List[str]]) -> str: - """ - Run commands in either an existing persistent - subprocess or on in a new subprocess environment. - - Args: - commands(List[str]): a list of commands to - execute in the session - """ - if isinstance(commands, str): - commands = [commands] - commands = ";".join(commands) - if self.process is not None: - return self._run_persistent( - commands, - ) - else: - return self._run(commands) - - def _run(self, command: str) -> str: - """ - Runs a command in a subprocess and returns - the output. - - Args: - command: The command to run - """ - try: - output = subprocess.run( - command, - shell=True, - check=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - ).stdout.decode() - except subprocess.CalledProcessError as error: - if self.return_err_output: - return error.stdout.decode() - return str(error) - if self.strip_newlines: - output = output.strip() - return output - - def process_output(self, output: str, command: str) -> str: - """ - Uses regex to remove the command from the output - - Args: - output: a process' output string - command: the executed command - """ - pattern = re.escape(command) + r"\s*\n" - output = re.sub(pattern, "", output, count=1) - return output.strip() - - def _run_persistent(self, command: str) -> str: - """ - Runs commands in a persistent environment - and returns the output. - - Args: - command: the command to execute - """ - pexpect = self._lazy_import_pexpect() - if self.process is None: - raise ValueError("Process not initialized") - self.process.sendline(command) - - # Clear the output with an empty string - self.process.expect(self.prompt, timeout=10) - self.process.sendline("") - - try: - self.process.expect([self.prompt, pexpect.EOF], timeout=10) - except pexpect.TIMEOUT: - return f"Timeout error while executing command {command}" - if self.process.after == pexpect.EOF: - return f"Exited with error status: {self.process.exitstatus}" - output = self.process.before - output = self.process_output(output, command) - if self.strip_newlines: - return output.strip() - return output diff --git a/libs/experimental/langchain_experimental/llm_bash/prompt.py b/libs/experimental/langchain_experimental/llm_bash/prompt.py deleted file mode 100644 index 1c6aaf9adb108..0000000000000 --- a/libs/experimental/langchain_experimental/llm_bash/prompt.py +++ /dev/null @@ -1,67 +0,0 @@ -# flake8: noqa -from __future__ import annotations - -import re -from typing import List - -from langchain_core.prompts.prompt import PromptTemplate -from langchain_core.output_parsers import BaseOutputParser -from langchain_core.exceptions import OutputParserException - -_PROMPT_TEMPLATE = """If someone asks you to perform a task, your job is to come up with a series of bash commands that will perform the task. There is no need to put "#!/bin/bash" in your answer. Make sure to reason step by step, using this format: - -Question: "copy the files in the directory named 'target' into a new directory at the same level as target called 'myNewDirectory'" - -I need to take the following actions: -- List all files in the directory -- Create a new directory -- Copy the files from the first directory into the second directory -```bash -ls -mkdir myNewDirectory -cp -r target/* myNewDirectory -``` - -That is the format. Begin! - -Question: {question}""" - - -class BashOutputParser(BaseOutputParser): - """Parser for bash output.""" - - def parse(self, text: str) -> List[str]: - """Parse the output of a bash command.""" - - if "```bash" in text: - return self.get_code_blocks(text) - else: - raise OutputParserException( - f"Failed to parse bash output. Got: {text}", - ) - - @staticmethod - def get_code_blocks(t: str) -> List[str]: - """Get multiple code blocks from the LLM result.""" - code_blocks: List[str] = [] - # Bash markdown code blocks - pattern = re.compile(r"```bash(.*?)(?:\n\s*)```", re.DOTALL) - for match in pattern.finditer(t): - matched = match.group(1).strip() - if matched: - code_blocks.extend( - [line for line in matched.split("\n") if line.strip()] - ) - - return code_blocks - - @property - def _type(self) -> str: - return "bash" - - -PROMPT = PromptTemplate( - input_variables=["question"], - template=_PROMPT_TEMPLATE, - output_parser=BashOutputParser(), -) diff --git a/libs/experimental/langchain_experimental/llm_symbolic_math/__init__.py b/libs/experimental/langchain_experimental/llm_symbolic_math/__init__.py deleted file mode 100644 index a6c24bc0d3dda..0000000000000 --- a/libs/experimental/langchain_experimental/llm_symbolic_math/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -"""Chain that interprets a prompt and **executes python code to do math**. - -Heavily borrowed from `llm_math`, uses the [SymPy](https://www.sympy.org/) package. -""" diff --git a/libs/experimental/langchain_experimental/llm_symbolic_math/base.py b/libs/experimental/langchain_experimental/llm_symbolic_math/base.py deleted file mode 100644 index ca6e912f0feb4..0000000000000 --- a/libs/experimental/langchain_experimental/llm_symbolic_math/base.py +++ /dev/null @@ -1,250 +0,0 @@ -"""Chain that interprets a prompt and executes python code to do symbolic math.""" - -from __future__ import annotations - -import re -from typing import Any, Dict, List, Optional - -from langchain.base_language import BaseLanguageModel -from langchain.chains.base import Chain -from langchain.chains.llm import LLMChain -from langchain_core.callbacks.manager import ( - AsyncCallbackManagerForChainRun, - CallbackManagerForChainRun, -) -from langchain_core.prompts.base import BasePromptTemplate -from pydantic import ConfigDict - -from langchain_experimental.llm_symbolic_math.prompt import PROMPT - - -class LLMSymbolicMathChain(Chain): - """Chain that interprets a prompt and executes python code to do symbolic math. - - It is based on the sympy library and can be used to evaluate - mathematical expressions. - See https://www.sympy.org/ for more information. - - Example: - .. code-block:: python - - from langchain.chains import LLMSymbolicMathChain - from langchain_community.llms import OpenAI - llm_symbolic_math = LLMSymbolicMathChain.from_llm(OpenAI()) - """ - - llm_chain: LLMChain - input_key: str = "question" #: :meta private: - output_key: str = "answer" #: :meta private: - - model_config = ConfigDict( - arbitrary_types_allowed=True, - extra="forbid", - ) - - allow_dangerous_requests: bool # Assign no default. - """Must be set by the user to allow dangerous requests or not. - - We recommend a default of False to allow only pre-defined symbolic operations. - - When set to True, the chain will allow any kind of input. This is - STRONGLY DISCOURAGED unless you fully trust the input (and believe that - the LLM itself cannot behave in a malicious way). - You should absolutely NOT be deploying this in a production environment - with allow_dangerous_requests=True. As this would allow a malicious actor - to execute arbitrary code on your system. - Use default=True at your own risk. - - - When set to False, the chain will only allow pre-defined symbolic operations. - If the some symbolic expressions are failing to evaluate, you can open a PR - to add them to extend the list of allowed operations. - """ - - def __init__(self, **kwargs: Any) -> None: - if "allow_dangerous_requests" not in kwargs: - raise ValueError( - "LLMSymbolicMathChain requires allow_dangerous_requests to be set. " - "We recommend that you set `allow_dangerous_requests=False` to allow " - "only pre-defined symbolic operations. " - "If the some symbolic expressions are failing to evaluate, you can " - "open a PR to add them to extend the list of allowed operations. " - "Alternatively, you can set `allow_dangerous_requests=True` to allow " - "any kind of input but this is STRONGLY DISCOURAGED unless you " - "fully trust the input (and believe that the LLM itself cannot behave " - "in a malicious way)." - "You should absolutely NOT be deploying this in a production " - "environment with allow_dangerous_requests=True. As " - "this would allow a malicious actor to execute arbitrary code on " - "your system." - ) - super().__init__(**kwargs) - - @property - def input_keys(self) -> List[str]: - """Expect input key. - - :meta private: - """ - return [self.input_key] - - @property - def output_keys(self) -> List[str]: - """Expect output key. - - :meta private: - """ - return [self.output_key] - - def _evaluate_expression(self, expression: str) -> str: - try: - import sympy - except ImportError as e: - raise ImportError( - "Unable to import sympy, please install it with `pip install sympy`." - ) from e - - try: - if self.allow_dangerous_requests: - output = str(sympy.sympify(expression, evaluate=True)) - else: - allowed_symbols = { - # Basic arithmetic and trigonometry - "sin": sympy.sin, - "cos": sympy.cos, - "tan": sympy.tan, - "cot": sympy.cot, - "sec": sympy.sec, - "csc": sympy.csc, - "asin": sympy.asin, - "acos": sympy.acos, - "atan": sympy.atan, - # Hyperbolic functions - "sinh": sympy.sinh, - "cosh": sympy.cosh, - "tanh": sympy.tanh, - "asinh": sympy.asinh, - "acosh": sympy.acosh, - "atanh": sympy.atanh, - # Exponentials and logarithms - "exp": sympy.exp, - "log": sympy.log, - "ln": sympy.log, # natural log sympy defaults to natural log - "log10": lambda x: sympy.log(x, 10), # log base 10 (use sympy.log) - # Powers and roots - "sqrt": sympy.sqrt, - "cbrt": lambda x: sympy.Pow(x, sympy.Rational(1, 3)), - # Combinatorics and other math functions - "factorial": sympy.factorial, - "binomial": sympy.binomial, - "gcd": sympy.gcd, - "lcm": sympy.lcm, - "abs": sympy.Abs, - "sign": sympy.sign, - "mod": sympy.Mod, - # Constants - "pi": sympy.pi, - "e": sympy.E, - "I": sympy.I, - "oo": sympy.oo, - "NaN": sympy.nan, - } - - # Use parse_expr with strict settings - output = str( - sympy.parse_expr( - expression, local_dict=allowed_symbols, evaluate=True - ) - ) - except Exception as e: - raise ValueError( - f'LLMSymbolicMathChain._evaluate("{expression}") raised error: {e}.' - " Please try again with a valid numerical expression" - ) - - # Remove any leading and trailing brackets from the output - return re.sub(r"^\[|\]$", "", output) - - def _process_llm_result( - self, llm_output: str, run_manager: CallbackManagerForChainRun - ) -> Dict[str, str]: - run_manager.on_text(llm_output, color="green", verbose=self.verbose) - llm_output = llm_output.strip() - text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL) - if text_match: - expression = text_match.group(1) - output = self._evaluate_expression(expression) - run_manager.on_text("\nAnswer: ", verbose=self.verbose) - run_manager.on_text(output, color="yellow", verbose=self.verbose) - answer = "Answer: " + output - elif llm_output.startswith("Answer:"): - answer = llm_output - elif "Answer:" in llm_output: - answer = "Answer: " + llm_output.split("Answer:")[-1] - else: - raise ValueError(f"unknown format from LLM: {llm_output}") - return {self.output_key: answer} - - async def _aprocess_llm_result( - self, - llm_output: str, - run_manager: AsyncCallbackManagerForChainRun, - ) -> Dict[str, str]: - await run_manager.on_text(llm_output, color="green", verbose=self.verbose) - llm_output = llm_output.strip() - text_match = re.search(r"^```text(.*?)```", llm_output, re.DOTALL) - if text_match: - expression = text_match.group(1) - output = self._evaluate_expression(expression) - await run_manager.on_text("\nAnswer: ", verbose=self.verbose) - await run_manager.on_text(output, color="yellow", verbose=self.verbose) - answer = "Answer: " + output - elif llm_output.startswith("Answer:"): - answer = llm_output - elif "Answer:" in llm_output: - answer = "Answer: " + llm_output.split("Answer:")[-1] - else: - raise ValueError(f"unknown format from LLM: {llm_output}") - return {self.output_key: answer} - - def _call( - self, - inputs: Dict[str, str], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() - _run_manager.on_text(inputs[self.input_key]) - llm_output = self.llm_chain.predict( - question=inputs[self.input_key], - stop=["```output"], - callbacks=_run_manager.get_child(), - ) - return self._process_llm_result(llm_output, _run_manager) - - async def _acall( - self, - inputs: Dict[str, str], - run_manager: Optional[AsyncCallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager() - await _run_manager.on_text(inputs[self.input_key]) - llm_output = await self.llm_chain.apredict( - question=inputs[self.input_key], - stop=["```output"], - callbacks=_run_manager.get_child(), - ) - return await self._aprocess_llm_result(llm_output, _run_manager) - - @property - def _chain_type(self) -> str: - return "llm_symbolic_math_chain" - - @classmethod - def from_llm( - cls, - llm: BaseLanguageModel, - prompt: BasePromptTemplate = PROMPT, - **kwargs: Any, - ) -> LLMSymbolicMathChain: - llm_chain = LLMChain(llm=llm, prompt=prompt) - return cls(llm_chain=llm_chain, **kwargs) diff --git a/libs/experimental/langchain_experimental/llm_symbolic_math/prompt.py b/libs/experimental/langchain_experimental/llm_symbolic_math/prompt.py deleted file mode 100644 index 2a436eea5d378..0000000000000 --- a/libs/experimental/langchain_experimental/llm_symbolic_math/prompt.py +++ /dev/null @@ -1,51 +0,0 @@ -# flake8: noqa -from langchain_core.prompts.prompt import PromptTemplate - -_PROMPT_TEMPLATE = """Translate a math problem into a expression that can be executed using Python's SymPy library. Use the output of running this code to answer the question. - -Question: ${{Question with math problem.}} -```text -${{single line sympy expression that solves the problem}} -``` -...sympy.sympify(text, evaluate=True)... -```output -${{Output of running the code}} -``` -Answer: ${{Answer}} - -Begin. - -Question: What is the limit of sin(x) / x as x goes to 0 -```text -limit(sin(x)/x, x, 0) -``` -...sympy.sympify("limit(sin(x)/x, x, 0)")... -```output -1 -``` -Answer: 1 - -Question: What is the integral of e^-x from 0 to infinity -```text -integrate(exp(-x), (x, 0, oo)) -``` -...sympy.sympify("integrate(exp(-x), (x, 0, oo))")... -```output -1 -``` - -Question: What are the solutions to this equation x**2 - x? -```text -solveset(x**2 - x, x) -``` -...sympy.sympify("solveset(x**2 - x, x)")... -```output -[0, 1] -``` -Question: {question} -""" - -PROMPT = PromptTemplate( - input_variables=["question"], - template=_PROMPT_TEMPLATE, -) diff --git a/libs/experimental/langchain_experimental/llms/__init__.py b/libs/experimental/langchain_experimental/llms/__init__.py deleted file mode 100644 index 7171c090dc34a..0000000000000 --- a/libs/experimental/langchain_experimental/llms/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Experimental **LLM** classes provide -access to the large language model (**LLM**) APIs and services. -""" - -from langchain_experimental.llms.jsonformer_decoder import JsonFormer -from langchain_experimental.llms.llamaapi import ChatLlamaAPI -from langchain_experimental.llms.lmformatenforcer_decoder import LMFormatEnforcer -from langchain_experimental.llms.rellm_decoder import RELLM - -__all__ = ["RELLM", "JsonFormer", "ChatLlamaAPI", "LMFormatEnforcer"] diff --git a/libs/experimental/langchain_experimental/llms/anthropic_functions.py b/libs/experimental/langchain_experimental/llms/anthropic_functions.py deleted file mode 100644 index e9e62ca6e425c..0000000000000 --- a/libs/experimental/langchain_experimental/llms/anthropic_functions.py +++ /dev/null @@ -1,228 +0,0 @@ -import json -from collections import defaultdict -from html.parser import HTMLParser -from typing import Any, DefaultDict, Dict, List, Optional, cast - -from langchain.schema import ( - ChatGeneration, - ChatResult, -) -from langchain_community.chat_models.anthropic import ChatAnthropic -from langchain_core._api.deprecation import deprecated -from langchain_core.callbacks.manager import ( - CallbackManagerForLLMRun, -) -from langchain_core.language_models import BaseChatModel -from langchain_core.messages import ( - AIMessage, - BaseMessage, - SystemMessage, -) -from pydantic import model_validator - -prompt = """In addition to responding, you can use tools. \ -You have access to the following tools. - -{tools} - -In order to use a tool, you can use to specify the name, \ -and the tags to specify the parameters. \ -Each parameter should be passed in as <$param_name>$value, \ -Where $param_name is the name of the specific parameter, and $value \ -is the value for that parameter. - -You will then get back a response in the form -For example, if you have a tool called 'search' that accepts a single \ -parameter 'query' that could run a google search, in order to search \ -for the weather in SF you would respond: - -searchweather in SF -64 degrees""" - - -class TagParser(HTMLParser): - """Parser for the tool tags.""" - - def __init__(self) -> None: - """A heavy-handed solution, but it's fast for prototyping. - - Might be re-implemented later to restrict scope to the limited grammar, and - more efficiency. - - Uses an HTML parser to parse a limited grammar that allows - for syntax of the form: - - INPUT -> JUNK? VALUE* - JUNK -> JUNK_CHARACTER+ - JUNK_CHARACTER -> whitespace | , - VALUE -> DATA | OBJECT - OBJECT -> VALUE+ - IDENTIFIER -> [a-Z][a-Z0-9_]* - DATA -> .* - - Interprets the data to allow repetition of tags and recursion - to support representation of complex types. - - ^ Just another approximately wrong grammar specification. - """ - super().__init__() - - self.parse_data: DefaultDict[str, List[Any]] = defaultdict(list) - self.stack: List[DefaultDict[str, List[str]]] = [self.parse_data] - self.success = True - self.depth = 0 - self.data: Optional[str] = None - - def handle_starttag(self, tag: str, attrs: Any) -> None: - """Hook when a new tag is encountered.""" - self.depth += 1 - self.stack.append(defaultdict(list)) - self.data = None - - def handle_endtag(self, tag: str) -> None: - """Hook when a tag is closed.""" - self.depth -= 1 - top_of_stack = dict(self.stack.pop(-1)) # Pop the dictionary we don't need it - - # If a lead node - is_leaf = self.data is not None - # Annoying to type here, code is tested, hopefully OK - value = self.data if is_leaf else top_of_stack - # Difficult to type this correctly with mypy (maybe impossible?) - # Can be nested indefinitely, so requires self referencing type - self.stack[-1][tag].append(value) # type: ignore - # Reset the data so we if we encounter a sequence of end tags, we - # don't confuse an outer end tag for belonging to a leaf node. - self.data = None - - def handle_data(self, data: str) -> None: - """Hook when handling data.""" - stripped_data = data.strip() - # The only data that's allowed is whitespace or a comma surrounded by whitespace - if self.depth == 0 and stripped_data not in (",", ""): - # If this is triggered the parse should be considered invalid. - self.success = False - if stripped_data: # ignore whitespace-only strings - self.data = stripped_data - - -def _destrip(tool_input: Any) -> Any: - if isinstance(tool_input, dict): - return {k: _destrip(v) for k, v in tool_input.items()} - elif isinstance(tool_input, list): - if isinstance(tool_input[0], str): - if len(tool_input) == 1: - return tool_input[0] - else: - raise ValueError - elif isinstance(tool_input[0], dict): - return [_destrip(v) for v in tool_input] - else: - raise ValueError - else: - raise ValueError - - -@deprecated( - since="0.0.54", - removal="1.0", - alternative_import="langchain_anthropic.experimental.ChatAnthropicTools", -) -class AnthropicFunctions(BaseChatModel): - """Chat model for interacting with Anthropic functions.""" - - llm: BaseChatModel - - @model_validator(mode="before") - @classmethod - def validate_environment(cls, values: Dict) -> Any: - values["llm"] = values.get("llm") or ChatAnthropic(**values) - return values - - @property - def model(self) -> BaseChatModel: - """For backwards compatibility.""" - return self.llm - - def _generate( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> ChatResult: - forced = False - function_call = "" - if "functions" in kwargs: - # get the function call method - if "function_call" in kwargs: - function_call = kwargs["function_call"] - del kwargs["function_call"] - else: - function_call = "auto" - - # should function calling be used - if function_call != "none": - content = prompt.format(tools=json.dumps(kwargs["functions"], indent=2)) - system = SystemMessage(content=content) - messages = [system] + messages - - # is the function call a dictionary (forced function calling) - if isinstance(function_call, dict): - forced = True - function_call_name = function_call["name"] - messages.append(AIMessage(content=f"{function_call_name}")) - - del kwargs["functions"] - if stop is None: - stop = [""] - else: - stop.append("") - else: - if "function_call" in kwargs: - raise ValueError( - "if `function_call` provided, `functions` must also be" - ) - response = self.model.invoke( - messages, stop=stop, callbacks=run_manager, **kwargs - ) - completion = cast(str, response.content) - if forced: - tag_parser = TagParser() - - if "" in completion: - tag_parser.feed(completion.strip() + "") - v1 = tag_parser.parse_data["tool_input"][0] - arguments = json.dumps(_destrip(v1)) - else: - v1 = completion - arguments = "" - - kwargs = { - "function_call": { - "name": function_call_name, # type: ignore[has-type] - "arguments": arguments, - } - } - message = AIMessage(content="", additional_kwargs=kwargs) - return ChatResult(generations=[ChatGeneration(message=message)]) - elif "" in completion: - tag_parser = TagParser() - tag_parser.feed(completion.strip() + "") - msg = completion.split("")[0].strip() - v1 = tag_parser.parse_data["tool_input"][0] - kwargs = { - "function_call": { - "name": tag_parser.parse_data["tool"][0], - "arguments": json.dumps(_destrip(v1)), - } - } - message = AIMessage(content=msg, additional_kwargs=kwargs) - return ChatResult(generations=[ChatGeneration(message=message)]) - else: - response.content = cast(str, response.content).strip() - return ChatResult(generations=[ChatGeneration(message=response)]) - - @property - def _llm_type(self) -> str: - return "anthropic_functions" diff --git a/libs/experimental/langchain_experimental/llms/jsonformer_decoder.py b/libs/experimental/langchain_experimental/llms/jsonformer_decoder.py deleted file mode 100644 index 97410118b89e3..0000000000000 --- a/libs/experimental/langchain_experimental/llms/jsonformer_decoder.py +++ /dev/null @@ -1,67 +0,0 @@ -"""Experimental implementation of jsonformer wrapped LLM.""" - -from __future__ import annotations - -import json -from typing import TYPE_CHECKING, Any, List, Optional, cast - -from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline -from langchain_core.callbacks.manager import CallbackManagerForLLMRun -from pydantic import Field, model_validator - -if TYPE_CHECKING: - import jsonformer - - -def import_jsonformer() -> jsonformer: - """Lazily import of the jsonformer package.""" - try: - import jsonformer - except ImportError: - raise ImportError( - "Could not import jsonformer python package. " - "Please install it with `pip install jsonformer`." - ) - return jsonformer - - -class JsonFormer(HuggingFacePipeline): - """Jsonformer wrapped LLM using HuggingFace Pipeline API. - - This pipeline is experimental and not yet stable. - """ - - json_schema: dict = Field(..., description="The JSON Schema to complete.") - max_new_tokens: int = Field( - default=200, description="Maximum number of new tokens to generate." - ) - debug: bool = Field(default=False, description="Debug mode.") - - @model_validator(mode="before") - @classmethod - def check_jsonformer_installation(cls, values: dict) -> Any: - import_jsonformer() - return values - - def _call( - self, - prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> str: - jsonformer = import_jsonformer() - from transformers import Text2TextGenerationPipeline - - pipeline = cast(Text2TextGenerationPipeline, self.pipeline) - - model = jsonformer.Jsonformer( - model=pipeline.model, - tokenizer=pipeline.tokenizer, - json_schema=self.json_schema, - prompt=prompt, - max_number_tokens=self.max_new_tokens, - debug=self.debug, - ) - text = model() - return json.dumps(text) diff --git a/libs/experimental/langchain_experimental/llms/llamaapi.py b/libs/experimental/langchain_experimental/llms/llamaapi.py deleted file mode 100644 index 6f96ceebfa590..0000000000000 --- a/libs/experimental/langchain_experimental/llms/llamaapi.py +++ /dev/null @@ -1,126 +0,0 @@ -import json -import logging -from typing import ( - Any, - Dict, - List, - Mapping, - Optional, - Tuple, -) - -from langchain.schema import ( - ChatGeneration, - ChatResult, -) -from langchain_core.callbacks.manager import CallbackManagerForLLMRun -from langchain_core.language_models import BaseChatModel -from langchain_core.messages import ( - AIMessage, - BaseMessage, - ChatMessage, - FunctionMessage, - HumanMessage, - SystemMessage, -) - -logger = logging.getLogger(__name__) - - -def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: - role = _dict["role"] - if role == "user": - return HumanMessage(content=_dict["content"]) - elif role == "assistant": - # Fix for azure - # Also OpenAI returns None for tool invocations - content = _dict.get("content") or "" - if _dict.get("function_call"): - _dict["function_call"]["arguments"] = json.dumps( - _dict["function_call"]["arguments"] - ) - additional_kwargs = {"function_call": dict(_dict["function_call"])} - else: - additional_kwargs = {} - return AIMessage(content=content, additional_kwargs=additional_kwargs) - elif role == "system": - return SystemMessage(content=_dict["content"]) - elif role == "function": - return FunctionMessage(content=_dict["content"], name=_dict["name"]) - else: - return ChatMessage(content=_dict["content"], role=role) - - -def _convert_message_to_dict(message: BaseMessage) -> dict: - if isinstance(message, ChatMessage): - message_dict = {"role": message.role, "content": message.content} - elif isinstance(message, HumanMessage): - message_dict = {"role": "user", "content": message.content} - elif isinstance(message, AIMessage): - message_dict = {"role": "assistant", "content": message.content} - if "function_call" in message.additional_kwargs: - message_dict["function_call"] = message.additional_kwargs["function_call"] - elif isinstance(message, SystemMessage): - message_dict = {"role": "system", "content": message.content} - elif isinstance(message, FunctionMessage): - message_dict = { - "role": "function", - "content": message.content, - "name": message.name, - } - else: - raise ValueError(f"Got unknown type {message}") - if "name" in message.additional_kwargs: - message_dict["name"] = message.additional_kwargs["name"] - return message_dict - - -class ChatLlamaAPI(BaseChatModel): - """Chat model using the Llama API.""" - - client: Any #: :meta private: - - def _generate( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> ChatResult: - message_dicts, params = self._create_message_dicts(messages, stop) - _params = {"messages": message_dicts} - final_params = {**params, **kwargs, **_params} - response = self.client.run(final_params).json() - return self._create_chat_result(response) - - def _create_message_dicts( - self, messages: List[BaseMessage], stop: Optional[List[str]] - ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: - params = dict(self._client_params) - if stop is not None: - if "stop" in params: - raise ValueError("`stop` found in both the input and default params.") - params["stop"] = stop - message_dicts = [_convert_message_to_dict(m) for m in messages] - return message_dicts, params - - def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: - generations = [] - for res in response["choices"]: - message = _convert_dict_to_message(res["message"]) - gen = ChatGeneration( - message=message, - generation_info=dict(finish_reason=res.get("finish_reason")), - ) - generations.append(gen) - return ChatResult(generations=generations) - - @property - def _client_params(self) -> Mapping[str, Any]: - """Get the parameters used for the client.""" - return {} - - @property - def _llm_type(self) -> str: - """Return type of chat model.""" - return "llama-api" diff --git a/libs/experimental/langchain_experimental/llms/lmformatenforcer_decoder.py b/libs/experimental/langchain_experimental/llms/lmformatenforcer_decoder.py deleted file mode 100644 index 82cf28d2a6527..0000000000000 --- a/libs/experimental/langchain_experimental/llms/lmformatenforcer_decoder.py +++ /dev/null @@ -1,83 +0,0 @@ -"""Experimental implementation of lm-format-enforcer wrapped LLM.""" - -from __future__ import annotations - -from typing import TYPE_CHECKING, Any, List, Optional - -from langchain.schema import LLMResult -from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline -from langchain_core.callbacks.manager import CallbackManagerForLLMRun -from pydantic import Field - -if TYPE_CHECKING: - import lmformatenforcer - - -def import_lmformatenforcer() -> lmformatenforcer: - """Lazily import of the lmformatenforcer package.""" - try: - import lmformatenforcer - except ImportError: - raise ImportError( - "Could not import lmformatenforcer python package. " - "Please install it with `pip install lm-format-enforcer`." - ) - return lmformatenforcer - - -class LMFormatEnforcer(HuggingFacePipeline): - """LMFormatEnforcer wrapped LLM using HuggingFace Pipeline API. - - This pipeline is experimental and not yet stable. - """ - - json_schema: Optional[dict] = Field( - description="The JSON Schema to complete.", default=None - ) - regex: Optional[str] = Field( - description="The regular expression to complete.", default=None - ) - - def _generate( - self, - prompts: List[str], - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> LLMResult: - lmformatenforcer = import_lmformatenforcer() - import lmformatenforcer.integrations.transformers as hf_integration - - # We integrate lmformatenforcer by adding a prefix_allowed_tokens_fn. - # It has to be done on each call, because the prefix function is stateful. - if "prefix_allowed_tokens_fn" in self.pipeline._forward_params: - raise ValueError( - "prefix_allowed_tokens_fn param is forbidden with LMFormatEnforcer." - ) - - has_json_schema = self.json_schema is not None - has_regex = self.regex is not None - if has_json_schema == has_regex: - raise ValueError( - "You must specify exactly one of json_schema or a regex, but not both." - ) - - if has_json_schema: - parser = lmformatenforcer.JsonSchemaParser(self.json_schema) - else: - parser = lmformatenforcer.RegexParser(self.regex) - - prefix_function = hf_integration.build_transformers_prefix_allowed_tokens_fn( - self.pipeline.tokenizer, parser - ) - self.pipeline._forward_params["prefix_allowed_tokens_fn"] = prefix_function - - result = super()._generate( - prompts, - stop=stop, - run_manager=run_manager, - **kwargs, - ) - - del self.pipeline._forward_params["prefix_allowed_tokens_fn"] - return result diff --git a/libs/experimental/langchain_experimental/llms/ollama_functions.py b/libs/experimental/langchain_experimental/llms/ollama_functions.py deleted file mode 100644 index e41635be88c99..0000000000000 --- a/libs/experimental/langchain_experimental/llms/ollama_functions.py +++ /dev/null @@ -1,462 +0,0 @@ -import json -import uuid -from operator import itemgetter -from typing import ( - Any, - Callable, - Dict, - List, - Optional, - Sequence, - Type, - TypedDict, - TypeVar, - Union, -) - -from langchain_community.chat_models.ollama import ChatOllama -from langchain_core._api import deprecated -from langchain_core.callbacks import ( - AsyncCallbackManagerForLLMRun, - CallbackManagerForLLMRun, -) -from langchain_core.language_models import LanguageModelInput -from langchain_core.messages import ( - AIMessage, - BaseMessage, - ToolCall, -) -from langchain_core.output_parsers.base import OutputParserLike -from langchain_core.output_parsers.json import JsonOutputParser -from langchain_core.output_parsers.pydantic import PydanticOutputParser -from langchain_core.outputs import ChatGeneration, ChatResult -from langchain_core.prompts import SystemMessagePromptTemplate -from langchain_core.runnables import Runnable, RunnableLambda -from langchain_core.runnables.base import RunnableMap -from langchain_core.runnables.passthrough import RunnablePassthrough -from langchain_core.tools import BaseTool -from langchain_core.utils.pydantic import is_basemodel_instance, is_basemodel_subclass -from pydantic import ( - BaseModel, -) - -DEFAULT_SYSTEM_TEMPLATE = """You have access to the following tools: - -{tools} - -You must always select one of the above tools and respond with only a JSON object matching the following schema: - -{{ - "tool": , - "tool_input": -}} -""" # noqa: E501 - -DEFAULT_RESPONSE_FUNCTION = { - "name": "__conversational_response", - "description": ( - "Respond conversationally if no other tools should be called for a given query." - ), - "parameters": { - "type": "object", - "properties": { - "response": { - "type": "string", - "description": "Conversational response to the user.", - }, - }, - "required": ["response"], - }, -} - -_BM = TypeVar("_BM", bound=BaseModel) -_DictOrPydantic = Union[Dict, _BM] - - -def _is_pydantic_class(obj: Any) -> bool: - return isinstance(obj, type) and ( - is_basemodel_subclass(obj) or BaseModel in obj.__bases__ - ) - - -def convert_to_ollama_tool(tool: Any) -> Dict: - """Convert a tool to an Ollama tool.""" - description = None - if _is_pydantic_class(tool): - schema = tool.model_construct().model_json_schema() - name = schema["title"] - elif isinstance(tool, BaseTool): - schema = tool.tool_call_schema.model_json_schema() - name = tool.get_name() - description = tool.description - elif is_basemodel_instance(tool): - schema = tool.get_input_schema().model_json_schema() - name = tool.get_name() - description = tool.description - elif isinstance(tool, dict) and "name" in tool and "parameters" in tool: - return tool.copy() - else: - raise ValueError( - f"""Cannot convert {tool} to an Ollama tool. - {tool} needs to be a Pydantic class, model, or a dict.""" - ) - definition = {"name": name, "parameters": schema} - if description: - definition["description"] = description - - return definition - - -class _AllReturnType(TypedDict): - raw: BaseMessage - parsed: Optional[_DictOrPydantic] - parsing_error: Optional[BaseException] - - -def parse_response(message: BaseMessage) -> str: - """Extract `function_call` from `AIMessage`.""" - if isinstance(message, AIMessage): - kwargs = message.additional_kwargs - tool_calls = message.tool_calls - if len(tool_calls) > 0: - tool_call = tool_calls[-1] - args = tool_call.get("args") - return json.dumps(args) - elif "function_call" in kwargs: - if "arguments" in kwargs["function_call"]: - return kwargs["function_call"]["arguments"] - raise ValueError( - f"`arguments` missing from `function_call` within AIMessage: {message}" - ) - else: - raise ValueError("`tool_calls` missing from AIMessage: {message}") - raise ValueError(f"`message` is not an instance of `AIMessage`: {message}") - - -@deprecated( # type: ignore[arg-type] - since="0.0.64", removal="1.0", alternative_import="langchain_ollama.ChatOllama" -) -class OllamaFunctions(ChatOllama): - """Function chat model that uses Ollama API.""" - - tool_system_prompt_template: str = DEFAULT_SYSTEM_TEMPLATE - - def __init__(self, **kwargs: Any) -> None: - super().__init__(**kwargs) - - def bind_tools( - self, - tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]], - **kwargs: Any, - ) -> Runnable[LanguageModelInput, BaseMessage]: - return self.bind(functions=tools, **kwargs) - - def with_structured_output( - self, - schema: Union[Dict, Type[BaseModel]], - *, - include_raw: bool = False, - **kwargs: Any, - ) -> Runnable[LanguageModelInput, Union[Dict, BaseModel]]: - """Model wrapper that returns outputs formatted to match the given schema. - - Args: - schema: The output schema as a dict or a Pydantic class. If a Pydantic class - then the model output will be an object of that class. If a dict then - the model output will be a dict. With a Pydantic class the returned - attributes will be validated, whereas with a dict they will not be. - include_raw: If False then only the parsed structured output is returned. If - an error occurs during model output parsing it will be raised. If True - then both the raw model response (a BaseMessage) and the parsed model - response will be returned. If an error occurs during output parsing it - will be caught and returned as well. The final output is always a dict - with keys "raw", "parsed", and "parsing_error". - - Returns: - A Runnable that takes any ChatModel input and returns as output: - - If include_raw is True then a dict with keys: - raw: BaseMessage - parsed: Optional[_DictOrPydantic] - parsing_error: Optional[BaseException] - - If include_raw is False then just _DictOrPydantic is returned, - where _DictOrPydantic depends on the schema: - - If schema is a Pydantic class then _DictOrPydantic is the Pydantic - class. - - If schema is a dict then _DictOrPydantic is a dict. - - Example: Pydantic schema (include_raw=False): - .. code-block:: python - - from langchain_experimental.llms import OllamaFunctions - from pydantic import BaseModel - - class AnswerWithJustification(BaseModel): - '''An answer to the user question along with justification for the answer.''' - answer: str - justification: str - - llm = OllamaFunctions(model="phi3", format="json", temperature=0) - structured_llm = llm.with_structured_output(AnswerWithJustification) - - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") - - # -> AnswerWithJustification( - # answer='They weigh the same', - # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.' - # ) - - Example: Pydantic schema (include_raw=True): - .. code-block:: python - - from langchain_experimental.llms import OllamaFunctions - from pydantic import BaseModel - - class AnswerWithJustification(BaseModel): - '''An answer to the user question along with justification for the answer.''' - answer: str - justification: str - - llm = OllamaFunctions(model="phi3", format="json", temperature=0) - structured_llm = llm.with_structured_output(AnswerWithJustification, include_raw=True) - - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") - # -> { - # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}), - # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'), - # 'parsing_error': None - # } - - Example: dict schema (method="include_raw=False): - .. code-block:: python - - from langchain_experimental.llms import OllamaFunctions, convert_to_ollama_tool - from pydantic import BaseModel - - class AnswerWithJustification(BaseModel): - '''An answer to the user question along with justification for the answer.''' - answer: str - justification: str - - dict_schema = convert_to_ollama_tool(AnswerWithJustification) - llm = OllamaFunctions(model="phi3", format="json", temperature=0) - structured_llm = llm.with_structured_output(dict_schema) - - structured_llm.invoke("What weighs more a pound of bricks or a pound of feathers") - # -> { - # 'answer': 'They weigh the same', - # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' - # } - - - """ # noqa: E501 - if kwargs: - raise ValueError(f"Received unsupported arguments {kwargs}") - is_pydantic_schema = _is_pydantic_class(schema) - if schema is None: - raise ValueError( - "schema must be specified when method is 'function_calling'. " - "Received None." - ) - llm = self.bind_tools(tools=[schema], format="json") - if is_pydantic_schema: - output_parser: OutputParserLike = PydanticOutputParser( # type: ignore[type-var] - pydantic_object=schema # type: ignore[arg-type] - ) - else: - output_parser = JsonOutputParser() - - parser_chain = RunnableLambda(parse_response) | output_parser - if include_raw: - parser_assign = RunnablePassthrough.assign( - parsed=itemgetter("raw") | parser_chain, parsing_error=lambda _: None - ) - parser_none = RunnablePassthrough.assign(parsed=lambda _: None) - parser_with_fallback = parser_assign.with_fallbacks( - [parser_none], exception_key="parsing_error" - ) - return RunnableMap(raw=llm) | parser_with_fallback - else: - return llm | parser_chain - - def _generate( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> ChatResult: - functions = kwargs.get("functions", []) - if "functions" in kwargs: - del kwargs["functions"] - if "function_call" in kwargs: - functions = [ - fn for fn in functions if fn["name"] == kwargs["function_call"]["name"] - ] - if not functions: - raise ValueError( - "If `function_call` is specified, you must also pass a " - "matching function in `functions`." - ) - del kwargs["function_call"] - functions = [convert_to_ollama_tool(fn) for fn in functions] - functions.append(DEFAULT_RESPONSE_FUNCTION) - system_message_prompt_template = SystemMessagePromptTemplate.from_template( - self.tool_system_prompt_template - ) - system_message = system_message_prompt_template.format( - tools=json.dumps(functions, indent=2) - ) - response_message = super()._generate( - [system_message] + messages, stop=stop, run_manager=run_manager, **kwargs - ) - chat_generation_content = response_message.generations[0].text - if not isinstance(chat_generation_content, str): - raise ValueError("OllamaFunctions does not support non-string output.") - try: - parsed_chat_result = json.loads(chat_generation_content) - except json.JSONDecodeError: - raise ValueError( - f"""'{self.model}' did not respond with valid JSON. - Please try again. - Response: {chat_generation_content}""" - ) - called_tool_name = ( - parsed_chat_result["tool"] if "tool" in parsed_chat_result else None - ) - called_tool = next( - (fn for fn in functions if fn["name"] == called_tool_name), None - ) - if ( - called_tool is None - or called_tool["name"] == DEFAULT_RESPONSE_FUNCTION["name"] - ): - if ( - "tool_input" in parsed_chat_result - and "response" in parsed_chat_result["tool_input"] - ): - response = parsed_chat_result["tool_input"]["response"] - elif "response" in parsed_chat_result: - response = parsed_chat_result["response"] - else: - raise ValueError( - f"Failed to parse a response from {self.model} output: " - f"{chat_generation_content}" - ) - return ChatResult( - generations=[ - ChatGeneration( - message=AIMessage( - content=response, - ) - ) - ] - ) - - called_tool_arguments = ( - parsed_chat_result["tool_input"] - if "tool_input" in parsed_chat_result - else {} - ) - - response_message_with_functions = AIMessage( - content="", - tool_calls=[ - ToolCall( - name=called_tool_name, - args=called_tool_arguments if called_tool_arguments else {}, - id=f"call_{str(uuid.uuid4()).replace('-', '')}", - ) - ], - ) - - return ChatResult( - generations=[ChatGeneration(message=response_message_with_functions)] - ) - - async def _agenerate( - self, - messages: List[BaseMessage], - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> ChatResult: - functions = kwargs.get("functions", []) - if "functions" in kwargs: - del kwargs["functions"] - if "function_call" in kwargs: - functions = [ - fn for fn in functions if fn["name"] == kwargs["function_call"]["name"] - ] - if not functions: - raise ValueError( - "If `function_call` is specified, you must also pass a " - "matching function in `functions`." - ) - del kwargs["function_call"] - elif not functions: - functions.append(DEFAULT_RESPONSE_FUNCTION) - if _is_pydantic_class(functions[0]): - functions = [convert_to_ollama_tool(fn) for fn in functions] - system_message_prompt_template = SystemMessagePromptTemplate.from_template( - self.tool_system_prompt_template - ) - system_message = system_message_prompt_template.format( - tools=json.dumps(functions, indent=2) - ) - response_message = await super()._agenerate( - [system_message] + messages, stop=stop, run_manager=run_manager, **kwargs - ) - chat_generation_content = response_message.generations[0].text - if not isinstance(chat_generation_content, str): - raise ValueError("OllamaFunctions does not support non-string output.") - try: - parsed_chat_result = json.loads(chat_generation_content) - except json.JSONDecodeError: - raise ValueError( - f"""'{self.model}' did not respond with valid JSON. - Please try again. - Response: {chat_generation_content}""" - ) - called_tool_name = parsed_chat_result["tool"] - called_tool_arguments = parsed_chat_result["tool_input"] - called_tool = next( - (fn for fn in functions if fn["name"] == called_tool_name), None - ) - if called_tool is None: - raise ValueError( - f"Failed to parse a function call from {self.model} output: " - f"{chat_generation_content}" - ) - if called_tool["name"] == DEFAULT_RESPONSE_FUNCTION["name"]: - return ChatResult( - generations=[ - ChatGeneration( - message=AIMessage( - content=called_tool_arguments["response"], - ) - ) - ] - ) - - response_message_with_functions = AIMessage( - content="", - additional_kwargs={ - "function_call": { - "name": called_tool_name, - "arguments": json.dumps(called_tool_arguments) - if called_tool_arguments - else "", - }, - }, - ) - return ChatResult( - generations=[ChatGeneration(message=response_message_with_functions)] - ) - - @property - def _llm_type(self) -> str: - return "ollama_functions" diff --git a/libs/experimental/langchain_experimental/llms/rellm_decoder.py b/libs/experimental/langchain_experimental/llms/rellm_decoder.py deleted file mode 100644 index 2d911d038d6d4..0000000000000 --- a/libs/experimental/langchain_experimental/llms/rellm_decoder.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Experimental implementation of RELLM wrapped LLM.""" - -from __future__ import annotations - -from typing import TYPE_CHECKING, Any, List, Optional, cast - -from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline -from langchain_community.llms.utils import enforce_stop_tokens -from langchain_core.callbacks.manager import CallbackManagerForLLMRun -from pydantic import Field, model_validator - -if TYPE_CHECKING: - import rellm - from regex import Pattern as RegexPattern -else: - try: - from regex import Pattern as RegexPattern - except ImportError: - pass - - -def import_rellm() -> rellm: - """Lazily import of the rellm package.""" - try: - import rellm - except ImportError: - raise ImportError( - "Could not import rellm python package. " - "Please install it with `pip install rellm`." - ) - return rellm - - -class RELLM(HuggingFacePipeline): - """RELLM wrapped LLM using HuggingFace Pipeline API.""" - - regex: RegexPattern = Field(..., description="The structured format to complete.") - max_new_tokens: int = Field( - default=200, description="Maximum number of new tokens to generate." - ) - - @model_validator(mode="before") - @classmethod - def check_rellm_installation(cls, values: dict) -> Any: - import_rellm() - return values - - def _call( - self, - prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> str: - rellm = import_rellm() - from transformers import Text2TextGenerationPipeline - - pipeline = cast(Text2TextGenerationPipeline, self.pipeline) - - text = rellm.complete_re( - prompt, - self.regex, - tokenizer=pipeline.tokenizer, - model=pipeline.model, - max_new_tokens=self.max_new_tokens, - ) - if stop is not None: - # This is a bit hacky, but I can't figure out a better way to enforce - # stop tokens when making calls to huggingface_hub. - text = enforce_stop_tokens(text, stop) - return text diff --git a/libs/experimental/langchain_experimental/open_clip/__init__.py b/libs/experimental/langchain_experimental/open_clip/__init__.py deleted file mode 100644 index 81064a82552fa..0000000000000 --- a/libs/experimental/langchain_experimental/open_clip/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -"""**OpenCLIP Embeddings** model. - -OpenCLIP is a multimodal model that can encode text and images into a shared space. - -See this paper for more details: https://arxiv.org/abs/2103.00020 -and [this repository](https://github.com/mlfoundations/open_clip) for details. - -""" - -from .open_clip import OpenCLIPEmbeddings - -__all__ = ["OpenCLIPEmbeddings"] diff --git a/libs/experimental/langchain_experimental/open_clip/open_clip.py b/libs/experimental/langchain_experimental/open_clip/open_clip.py deleted file mode 100644 index 33d18b6e9b14d..0000000000000 --- a/libs/experimental/langchain_experimental/open_clip/open_clip.py +++ /dev/null @@ -1,95 +0,0 @@ -from typing import Any, Dict, List - -from langchain_core.embeddings import Embeddings -from langchain_core.utils.pydantic import get_fields -from pydantic import BaseModel, ConfigDict, model_validator - - -class OpenCLIPEmbeddings(BaseModel, Embeddings): - """OpenCLIP Embeddings model.""" - - model: Any - preprocess: Any - tokenizer: Any - # Select model: https://github.com/mlfoundations/open_clip - model_name: str = "ViT-H-14" - checkpoint: str = "laion2b_s32b_b79k" - - model_config = ConfigDict(protected_namespaces=()) - - @model_validator(mode="before") - @classmethod - def validate_environment(cls, values: Dict) -> Any: - """Validate that open_clip and torch libraries are installed.""" - try: - import open_clip - - # Fall back to class defaults if not provided - model_name = values.get("model_name", get_fields(cls)["model_name"].default) - checkpoint = values.get("checkpoint", get_fields(cls)["checkpoint"].default) - - # Load model - model, _, preprocess = open_clip.create_model_and_transforms( - model_name=model_name, pretrained=checkpoint - ) - tokenizer = open_clip.get_tokenizer(model_name) - values["model"] = model - values["preprocess"] = preprocess - values["tokenizer"] = tokenizer - - except ImportError: - raise ImportError( - "Please ensure both open_clip and torch libraries are installed. " - "pip install open_clip_torch torch" - ) - return values - - def embed_documents(self, texts: List[str]) -> List[List[float]]: - text_features = [] - for text in texts: - # Tokenize the text - tokenized_text = self.tokenizer(text) - - # Encode the text to get the embeddings - embeddings_tensor = self.model.encode_text(tokenized_text) - - # Normalize the embeddings - norm = embeddings_tensor.norm(p=2, dim=1, keepdim=True) - normalized_embeddings_tensor = embeddings_tensor.div(norm) - - # Convert normalized tensor to list and add to the text_features list - embeddings_list = normalized_embeddings_tensor.squeeze(0).tolist() - text_features.append(embeddings_list) - - return text_features - - def embed_query(self, text: str) -> List[float]: - return self.embed_documents([text])[0] - - def embed_image(self, uris: List[str]) -> List[List[float]]: - try: - from PIL import Image as _PILImage - except ImportError: - raise ImportError("Please install the PIL library: pip install pillow") - - # Open images directly as PIL images - pil_images = [_PILImage.open(uri) for uri in uris] - - image_features = [] - for pil_image in pil_images: - # Preprocess the image for the model - preprocessed_image = self.preprocess(pil_image).unsqueeze(0) - - # Encode the image to get the embeddings - embeddings_tensor = self.model.encode_image(preprocessed_image) - - # Normalize the embeddings tensor - norm = embeddings_tensor.norm(p=2, dim=1, keepdim=True) - normalized_embeddings_tensor = embeddings_tensor.div(norm) - - # Convert tensor to list and add to the image_features list - embeddings_list = normalized_embeddings_tensor.squeeze(0).tolist() - - image_features.append(embeddings_list) - - return image_features diff --git a/libs/experimental/langchain_experimental/openai_assistant/__init__.py b/libs/experimental/langchain_experimental/openai_assistant/__init__.py deleted file mode 100644 index 84ab8035ac393..0000000000000 --- a/libs/experimental/langchain_experimental/openai_assistant/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from langchain_experimental.openai_assistant.base import OpenAIAssistantRunnable - -__all__ = ["OpenAIAssistantRunnable"] diff --git a/libs/experimental/langchain_experimental/openai_assistant/base.py b/libs/experimental/langchain_experimental/openai_assistant/base.py deleted file mode 100644 index c6f6a34bb9608..0000000000000 --- a/libs/experimental/langchain_experimental/openai_assistant/base.py +++ /dev/null @@ -1,8 +0,0 @@ -# flake8: noqa - -# For backwards compatibility. -from langchain.agents.openai_assistant.base import ( - OpenAIAssistantAction, - OpenAIAssistantFinish, - OpenAIAssistantRunnable, -) diff --git a/libs/experimental/langchain_experimental/pal_chain/__init__.py b/libs/experimental/langchain_experimental/pal_chain/__init__.py deleted file mode 100644 index 9879946f7316f..0000000000000 --- a/libs/experimental/langchain_experimental/pal_chain/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -"""**PAL Chain** implements **Program-Aided Language** Models. - -See the paper: https://arxiv.org/pdf/2211.10435.pdf. - -This chain is vulnerable to [arbitrary code execution](https://github.com/langchain-ai/langchain/issues/5872). -""" - -from langchain_experimental.pal_chain.base import PALChain - -__all__ = ["PALChain"] diff --git a/libs/experimental/langchain_experimental/pal_chain/base.py b/libs/experimental/langchain_experimental/pal_chain/base.py deleted file mode 100644 index 9311420e85651..0000000000000 --- a/libs/experimental/langchain_experimental/pal_chain/base.py +++ /dev/null @@ -1,371 +0,0 @@ -"""Implements Program-Aided Language Models. - -This module implements the Program-Aided Language Models (PAL) for generating code -solutions. PAL is a technique described in the paper "Program-Aided Language Models" -(https://arxiv.org/pdf/2211.10435.pdf). -""" - -from __future__ import annotations - -import ast -from typing import Any, Dict, List, Optional - -from langchain.chains.base import Chain -from langchain.chains.llm import LLMChain -from langchain_core.callbacks.manager import CallbackManagerForChainRun -from langchain_core.language_models import BaseLanguageModel -from pydantic import ConfigDict, Field, model_validator -from typing_extensions import Self - -from langchain_experimental.pal_chain.colored_object_prompt import COLORED_OBJECT_PROMPT -from langchain_experimental.pal_chain.math_prompt import MATH_PROMPT -from langchain_experimental.utilities import PythonREPL - -COMMAND_EXECUTION_FUNCTIONS = [ - "system", - "exec", - "execfile", - "eval", - "__import__", - "compile", -] -COMMAND_EXECUTION_ATTRIBUTES = [ - "__import__", - "__subclasses__", - "__builtins__", - "__globals__", - "__getattribute__", - "__code__", - "__bases__", - "__mro__", - "__base__", -] - - -class PALValidation: - """Validation for PAL generated code.""" - - SOLUTION_EXPRESSION_TYPE_FUNCTION = ast.FunctionDef - SOLUTION_EXPRESSION_TYPE_VARIABLE = ast.Name - - def __init__( - self, - solution_expression_name: Optional[str] = None, - solution_expression_type: Optional[type] = None, - allow_imports: bool = False, - allow_command_exec: bool = False, - ): - """Initialize a PALValidation instance. - - Args: - solution_expression_name (str): Name of the expected solution expression. - If passed, solution_expression_type must be passed as well. - solution_expression_type (str): AST type of the expected solution - expression. If passed, solution_expression_name must be passed as well. - Must be one of PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION, - PALValidation.SOLUTION_EXPRESSION_TYPE_VARIABLE. - allow_imports (bool): Allow import statements. - allow_command_exec (bool): Allow using known command execution functions. - """ - self.solution_expression_name = solution_expression_name - self.solution_expression_type = solution_expression_type - - if solution_expression_name is not None: - if not isinstance(self.solution_expression_name, str): - raise ValueError( - f"Expected solution_expression_name to be str, " - f"instead found {type(self.solution_expression_name)}" - ) - if solution_expression_type is not None: - if ( - self.solution_expression_type - is not self.SOLUTION_EXPRESSION_TYPE_FUNCTION - and self.solution_expression_type - is not self.SOLUTION_EXPRESSION_TYPE_VARIABLE - ): - raise ValueError( - f"Expected solution_expression_type to be one of " - f"({self.SOLUTION_EXPRESSION_TYPE_FUNCTION}," - f"{self.SOLUTION_EXPRESSION_TYPE_VARIABLE})," - f"instead found {self.solution_expression_type}" - ) - - if solution_expression_name is not None and solution_expression_type is None: - raise TypeError( - "solution_expression_name " - "requires solution_expression_type to be passed as well" - ) - if solution_expression_name is None and solution_expression_type is not None: - raise TypeError( - "solution_expression_type " - "requires solution_expression_name to be passed as well" - ) - - self.allow_imports = allow_imports - self.allow_command_exec = allow_command_exec - - -class PALChain(Chain): - """Chain that implements Program-Aided Language Models (PAL). - - This class implements the Program-Aided Language Models (PAL) for generating code - solutions. PAL is a technique described in the paper "Program-Aided Language Models" - (https://arxiv.org/pdf/2211.10435.pdf). - - *Security note*: This class implements an AI technique that generates and evaluates - Python code, which can be dangerous and requires a specially sandboxed - environment to be safely used. While this class implements some basic guardrails - by limiting available locals/globals and by parsing and inspecting - the generated Python AST using `PALValidation`, those guardrails will not - deter sophisticated attackers and are not a replacement for a proper sandbox. - Do not use this class on untrusted inputs, with elevated permissions, - or without consulting your security team about proper sandboxing! - """ - - llm_chain: LLMChain - stop: str = "\n\n" - """Stop token to use when generating code.""" - get_answer_expr: str = "print(solution())" - """Expression to use to get the answer from the generated code.""" - python_globals: Optional[Dict[str, Any]] = None - """Python globals and locals to use when executing the generated code.""" - python_locals: Optional[Dict[str, Any]] = None - """Python globals and locals to use when executing the generated code.""" - output_key: str = "result" #: :meta private: - return_intermediate_steps: bool = False - """Whether to return intermediate steps in the generated code.""" - code_validations: PALValidation = Field(default_factory=PALValidation) - """Validations to perform on the generated code.""" - timeout: Optional[int] = 10 - """Timeout in seconds for the generated code to execute.""" - allow_dangerous_code: bool = False - """This chain relies on the execution of generated code, which can be dangerous. - - This class implements an AI technique that generates and evaluates - Python code, which can be dangerous and requires a specially sandboxed - environment to be safely used. While this class implements some basic guardrails - by limiting available locals/globals and by parsing and inspecting - the generated Python AST using `PALValidation`, those guardrails will not - deter sophisticated attackers and are not a replacement for a proper sandbox. - Do not use this class on untrusted inputs, with elevated permissions, - or without consulting your security team about proper sandboxing! - - Failure to properly sandbox this class can lead to arbitrary code execution - vulnerabilities, which can lead to data breaches, data loss, or other security - incidents. - """ - - @model_validator(mode="after") - def post_init(self) -> Self: - if not self.allow_dangerous_code: - raise ValueError( - "This chain relies on the execution of generated code, " - "which can be dangerous. " - "Please read the security notice for this class, and only " - "use it if you understand the security implications. " - "If you want to proceed, you will need to opt-in, by setting " - "`allow_dangerous_code` to `True`." - ) - - return self - - model_config = ConfigDict( - arbitrary_types_allowed=True, - extra="forbid", - ) - - @property - def input_keys(self) -> List[str]: - """Return the singular input key. - - :meta private: - """ - return self.llm_chain.prompt.input_variables - - @property - def output_keys(self) -> List[str]: - """Return the singular output key. - - :meta private: - """ - if not self.return_intermediate_steps: - return [self.output_key] - else: - return [self.output_key, "intermediate_steps"] - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() - code = self.llm_chain.predict( - stop=[self.stop], callbacks=_run_manager.get_child(), **inputs - ) - _run_manager.on_text(code, color="green", end="\n", verbose=self.verbose) - PALChain.validate_code(code, self.code_validations) - - # TODO: look into why mypy thinks PythonREPL's type here is `Any` - # and therefore not callable - repl = PythonREPL( - _globals=self.python_globals, - _locals=self.python_locals, - ) # type: ignore[misc] - res = repl.run(code + f"\n{self.get_answer_expr}", timeout=self.timeout) - output = {self.output_key: res.strip()} - if self.return_intermediate_steps: - output["intermediate_steps"] = code - return output - - @classmethod - def validate_code(cls, code: str, code_validations: PALValidation) -> None: - try: - code_tree = ast.parse(code) - except (SyntaxError, UnicodeDecodeError): - raise ValueError(f"Generated code is not valid python code: {code}") - except TypeError: - raise ValueError( - f"Generated code is expected to be a string, " - f"instead found {type(code)}" - ) - except OverflowError: - raise ValueError( - f"Generated code too long / complex to be parsed by ast: {code}" - ) - - found_solution_expr = False - if code_validations.solution_expression_name is None: - # Skip validation if no solution_expression_name was given - found_solution_expr = True - - has_imports = False - top_level_nodes = list(ast.iter_child_nodes(code_tree)) - for node in top_level_nodes: - if ( - code_validations.solution_expression_name is not None - and code_validations.solution_expression_type is not None - ): - # Check root nodes (like func def) - if ( - isinstance(node, code_validations.solution_expression_type) - and hasattr(node, "name") - and node.name == code_validations.solution_expression_name - ): - found_solution_expr = True - # Check assigned nodes (like answer variable) - if isinstance(node, ast.Assign): - for target_node in node.targets: - if ( - isinstance( - target_node, code_validations.solution_expression_type - ) - and hasattr(target_node, "id") - and target_node.id - == code_validations.solution_expression_name - ): - found_solution_expr = True - if isinstance(node, ast.Import) or isinstance(node, ast.ImportFrom): - has_imports = True - - if not found_solution_expr: - raise ValueError( - f"Generated code is missing the solution expression: " - f"{code_validations.solution_expression_name} of type: " - f"{code_validations.solution_expression_type}" - ) - - if not code_validations.allow_imports and has_imports: - raise ValueError(f"Generated code has disallowed imports: {code}") - - if ( - not code_validations.allow_command_exec - or not code_validations.allow_imports - ): - for node in ast.walk(code_tree): - if ( - not code_validations.allow_command_exec - and isinstance(node, ast.Attribute) - and node.attr in COMMAND_EXECUTION_ATTRIBUTES - ): - raise ValueError( - f"Found illegal command execution function " - f"{node.attr} in code {code}" - ) - if (not code_validations.allow_command_exec) and isinstance( - node, ast.Call - ): - if ( - hasattr(node.func, "id") - and node.func.id in COMMAND_EXECUTION_FUNCTIONS - ): - raise ValueError( - f"Found illegal command execution function " - f"{node.func.id} in code {code}" - ) - - if ( - isinstance(node.func, ast.Attribute) - and node.func.attr in COMMAND_EXECUTION_FUNCTIONS - ): - raise ValueError( - f"Found illegal command execution function " - f"{node.func.attr} in code {code}" - ) - - if (not code_validations.allow_imports) and ( - isinstance(node, ast.Import) or isinstance(node, ast.ImportFrom) - ): - raise ValueError(f"Generated code has disallowed imports: {code}") - - @classmethod - def from_math_prompt(cls, llm: BaseLanguageModel, **kwargs: Any) -> PALChain: - """Load PAL from math prompt. - - Args: - llm (BaseLanguageModel): The language model to use for generating code. - - Returns: - PALChain: An instance of PALChain. - """ - llm_chain = LLMChain(llm=llm, prompt=MATH_PROMPT) - code_validations = PALValidation( - solution_expression_name="solution", - solution_expression_type=PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION, - ) - - return cls( - llm_chain=llm_chain, - stop="\n\n", - get_answer_expr="print(solution())", - code_validations=code_validations, - **kwargs, - ) - - @classmethod - def from_colored_object_prompt( - cls, llm: BaseLanguageModel, **kwargs: Any - ) -> PALChain: - """Load PAL from colored object prompt. - - Args: - llm (BaseLanguageModel): The language model to use for generating code. - - Returns: - PALChain: An instance of PALChain. - """ - llm_chain = LLMChain(llm=llm, prompt=COLORED_OBJECT_PROMPT) - code_validations = PALValidation( - solution_expression_name="answer", - solution_expression_type=PALValidation.SOLUTION_EXPRESSION_TYPE_VARIABLE, - ) - return cls( - llm_chain=llm_chain, - stop="\n\n\n", - get_answer_expr="print(answer)", - code_validations=code_validations, - **kwargs, - ) - - @property - def _chain_type(self) -> str: - return "pal_chain" diff --git a/libs/experimental/langchain_experimental/pal_chain/colored_object_prompt.py b/libs/experimental/langchain_experimental/pal_chain/colored_object_prompt.py deleted file mode 100644 index ef6db2e6f54a8..0000000000000 --- a/libs/experimental/langchain_experimental/pal_chain/colored_object_prompt.py +++ /dev/null @@ -1,77 +0,0 @@ -# flake8: noqa -from langchain_core.prompts.prompt import PromptTemplate - -template = ( - """ -# Generate Python3 Code to solve problems -# Q: On the nightstand, there is a red pencil, a purple mug, a burgundy keychain, a fuchsia teddy bear, a black plate, and a blue stress ball. What color is the stress ball? -# Put objects into a dictionary for quick look up -objects = dict() -objects['pencil'] = 'red' -objects['mug'] = 'purple' -objects['keychain'] = 'burgundy' -objects['teddy bear'] = 'fuchsia' -objects['plate'] = 'black' -objects['stress ball'] = 'blue' - -# Look up the color of stress ball -stress_ball_color = objects['stress ball'] -answer = stress_ball_color - - -# Q: On the table, you see a bunch of objects arranged in a row: a purple paperclip, a pink stress ball, a brown keychain, a green scrunchiephone charger, a mauve fidget spinner, and a burgundy pen. What is the color of the object directly to the right of the stress ball? -# Put objects into a list to record ordering -objects = [] -objects += [('paperclip', 'purple')] * 1 -objects += [('stress ball', 'pink')] * 1 -objects += [('keychain', 'brown')] * 1 -objects += [('scrunchiephone charger', 'green')] * 1 -objects += [('fidget spinner', 'mauve')] * 1 -objects += [('pen', 'burgundy')] * 1 - -# Find the index of the stress ball -stress_ball_idx = None -for i, object in enumerate(objects): - if object[0] == 'stress ball': - stress_ball_idx = i - break - -# Find the directly right object -direct_right = objects[i+1] - -# Check the directly right object's color -direct_right_color = direct_right[1] -answer = direct_right_color - - -# Q: On the nightstand, you see the following items arranged in a row: a teal plate, a burgundy keychain, a yellow scrunchiephone charger, an orange mug, a pink notebook, and a grey cup. How many non-orange items do you see to the left of the teal item? -# Put objects into a list to record ordering -objects = [] -objects += [('plate', 'teal')] * 1 -objects += [('keychain', 'burgundy')] * 1 -objects += [('scrunchiephone charger', 'yellow')] * 1 -objects += [('mug', 'orange')] * 1 -objects += [('notebook', 'pink')] * 1 -objects += [('cup', 'grey')] * 1 - -# Find the index of the teal item -teal_idx = None -for i, object in enumerate(objects): - if object[1] == 'teal': - teal_idx = i - break - -# Find non-orange items to the left of the teal item -non_orange = [object for object in objects[:i] if object[1] != 'orange'] - -# Count number of non-orange objects -num_non_orange = len(non_orange) -answer = num_non_orange - - -# Q: {question} -""".strip() - + "\n" -) - -COLORED_OBJECT_PROMPT = PromptTemplate(input_variables=["question"], template=template) diff --git a/libs/experimental/langchain_experimental/pal_chain/math_prompt.py b/libs/experimental/langchain_experimental/pal_chain/math_prompt.py deleted file mode 100644 index 873f678368951..0000000000000 --- a/libs/experimental/langchain_experimental/pal_chain/math_prompt.py +++ /dev/null @@ -1,157 +0,0 @@ -# flake8: noqa -from langchain_core.prompts.prompt import PromptTemplate - -template = ( - ''' -Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left? - -# solution in Python: - - -def solution(): - """Olivia has $23. She bought five bagels for $3 each. How much money does she have left?""" - money_initial = 23 - bagels = 5 - bagel_cost = 3 - money_spent = bagels * bagel_cost - money_left = money_initial - money_spent - result = money_left - return result - - - - - -Q: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday? - -# solution in Python: - - -def solution(): - """Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?""" - golf_balls_initial = 58 - golf_balls_lost_tuesday = 23 - golf_balls_lost_wednesday = 2 - golf_balls_left = golf_balls_initial - golf_balls_lost_tuesday - golf_balls_lost_wednesday - result = golf_balls_left - return result - - - - - -Q: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room? - -# solution in Python: - - -def solution(): - """There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?""" - computers_initial = 9 - computers_per_day = 5 - num_days = 4 # 4 days between monday and thursday - computers_added = computers_per_day * num_days - computers_total = computers_initial + computers_added - result = computers_total - return result - - - - - -Q: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now? - -# solution in Python: - - -def solution(): - """Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?""" - toys_initial = 5 - mom_toys = 2 - dad_toys = 2 - total_received = mom_toys + dad_toys - total_toys = toys_initial + total_received - result = total_toys - return result - - - - - -Q: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny? - -# solution in Python: - - -def solution(): - """Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?""" - jason_lollipops_initial = 20 - jason_lollipops_after = 12 - denny_lollipops = jason_lollipops_initial - jason_lollipops_after - result = denny_lollipops - return result - - - - - -Q: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total? - -# solution in Python: - - -def solution(): - """Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?""" - leah_chocolates = 32 - sister_chocolates = 42 - total_chocolates = leah_chocolates + sister_chocolates - chocolates_eaten = 35 - chocolates_left = total_chocolates - chocolates_eaten - result = chocolates_left - return result - - - - - -Q: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot? - -# solution in Python: - - -def solution(): - """If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?""" - cars_initial = 3 - cars_arrived = 2 - total_cars = cars_initial + cars_arrived - result = total_cars - return result - - - - - -Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today? - -# solution in Python: - - -def solution(): - """There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?""" - trees_initial = 15 - trees_after = 21 - trees_added = trees_after - trees_initial - result = trees_added - return result - - - - - -Q: {question} - -# solution in Python: -'''.strip() - + "\n\n\n" -) -MATH_PROMPT = PromptTemplate(input_variables=["question"], template=template) diff --git a/libs/experimental/langchain_experimental/plan_and_execute/__init__.py b/libs/experimental/langchain_experimental/plan_and_execute/__init__.py deleted file mode 100644 index 85363cd1c3997..0000000000000 --- a/libs/experimental/langchain_experimental/plan_and_execute/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -"""**Plan-and-execute agents** are planning tasks with a language model (LLM) and -executing them with a separate agent. - -""" - -from langchain_experimental.plan_and_execute.agent_executor import PlanAndExecute -from langchain_experimental.plan_and_execute.executors.agent_executor import ( - load_agent_executor, -) -from langchain_experimental.plan_and_execute.planners.chat_planner import ( - load_chat_planner, -) - -__all__ = ["PlanAndExecute", "load_agent_executor", "load_chat_planner"] diff --git a/libs/experimental/langchain_experimental/plan_and_execute/agent_executor.py b/libs/experimental/langchain_experimental/plan_and_execute/agent_executor.py deleted file mode 100644 index d30842829304d..0000000000000 --- a/libs/experimental/langchain_experimental/plan_and_execute/agent_executor.py +++ /dev/null @@ -1,100 +0,0 @@ -from typing import Any, Dict, List, Optional - -from langchain.chains.base import Chain -from langchain_core.callbacks.manager import ( - AsyncCallbackManagerForChainRun, - CallbackManagerForChainRun, -) -from pydantic import Field - -from langchain_experimental.plan_and_execute.executors.base import BaseExecutor -from langchain_experimental.plan_and_execute.planners.base import BasePlanner -from langchain_experimental.plan_and_execute.schema import ( - BaseStepContainer, - ListStepContainer, -) - - -class PlanAndExecute(Chain): - """Plan and execute a chain of steps.""" - - planner: BasePlanner - """The planner to use.""" - executor: BaseExecutor - """The executor to use.""" - step_container: BaseStepContainer = Field(default_factory=ListStepContainer) - """The step container to use.""" - input_key: str = "input" - output_key: str = "output" - - @property - def input_keys(self) -> List[str]: - return [self.input_key] - - @property - def output_keys(self) -> List[str]: - return [self.output_key] - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, Any]: - plan = self.planner.plan( - inputs, - callbacks=run_manager.get_child() if run_manager else None, - ) - if run_manager: - run_manager.on_text(str(plan), verbose=self.verbose) - for step in plan.steps: - _new_inputs = { - "previous_steps": self.step_container, - "current_step": step, - "objective": inputs[self.input_key], - } - new_inputs = {**_new_inputs, **inputs} - response = self.executor.step( - new_inputs, - callbacks=run_manager.get_child() if run_manager else None, - ) - if run_manager: - run_manager.on_text( - f"*****\n\nStep: {step.value}", verbose=self.verbose - ) - run_manager.on_text( - f"\n\nResponse: {response.response}", verbose=self.verbose - ) - self.step_container.add_step(step, response) - return {self.output_key: self.step_container.get_final_response()} - - async def _acall( - self, - inputs: Dict[str, Any], - run_manager: Optional[AsyncCallbackManagerForChainRun] = None, - ) -> Dict[str, Any]: - plan = await self.planner.aplan( - inputs, - callbacks=run_manager.get_child() if run_manager else None, - ) - if run_manager: - await run_manager.on_text(str(plan), verbose=self.verbose) - for step in plan.steps: - _new_inputs = { - "previous_steps": self.step_container, - "current_step": step, - "objective": inputs[self.input_key], - } - new_inputs = {**_new_inputs, **inputs} - response = await self.executor.astep( - new_inputs, - callbacks=run_manager.get_child() if run_manager else None, - ) - if run_manager: - await run_manager.on_text( - f"*****\n\nStep: {step.value}", verbose=self.verbose - ) - await run_manager.on_text( - f"\n\nResponse: {response.response}", verbose=self.verbose - ) - self.step_container.add_step(step, response) - return {self.output_key: self.step_container.get_final_response()} diff --git a/libs/experimental/langchain_experimental/plan_and_execute/executors/__init__.py b/libs/experimental/langchain_experimental/plan_and_execute/executors/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/langchain_experimental/plan_and_execute/executors/agent_executor.py b/libs/experimental/langchain_experimental/plan_and_execute/executors/agent_executor.py deleted file mode 100644 index e0c24263cf94f..0000000000000 --- a/libs/experimental/langchain_experimental/plan_and_execute/executors/agent_executor.py +++ /dev/null @@ -1,55 +0,0 @@ -from typing import List - -from langchain.agents.agent import AgentExecutor -from langchain.agents.structured_chat.base import StructuredChatAgent -from langchain_core.language_models import BaseLanguageModel -from langchain_core.tools import BaseTool - -from langchain_experimental.plan_and_execute.executors.base import ChainExecutor - -HUMAN_MESSAGE_TEMPLATE = """Previous steps: {previous_steps} - -Current objective: {current_step} - -{agent_scratchpad}""" - -TASK_PREFIX = """{objective} - -""" - - -def load_agent_executor( - llm: BaseLanguageModel, - tools: List[BaseTool], - verbose: bool = False, - include_task_in_prompt: bool = False, -) -> ChainExecutor: - """ - Load an agent executor. - - Args: - llm: BaseLanguageModel - tools: List[BaseTool] - verbose: bool. Defaults to False. - include_task_in_prompt: bool. Defaults to False. - - Returns: - ChainExecutor - """ - input_variables = ["previous_steps", "current_step", "agent_scratchpad"] - template = HUMAN_MESSAGE_TEMPLATE - - if include_task_in_prompt: - input_variables.append("objective") - template = TASK_PREFIX + template - - agent = StructuredChatAgent.from_llm_and_tools( - llm, - tools, - human_message_template=template, - input_variables=input_variables, - ) - agent_executor = AgentExecutor.from_agent_and_tools( - agent=agent, tools=tools, verbose=verbose - ) - return ChainExecutor(chain=agent_executor) diff --git a/libs/experimental/langchain_experimental/plan_and_execute/executors/base.py b/libs/experimental/langchain_experimental/plan_and_execute/executors/base.py deleted file mode 100644 index e95f1657a6303..0000000000000 --- a/libs/experimental/langchain_experimental/plan_and_execute/executors/base.py +++ /dev/null @@ -1,45 +0,0 @@ -from abc import abstractmethod -from typing import Any - -from langchain.chains.base import Chain -from langchain_core.callbacks.manager import Callbacks -from pydantic import BaseModel - -from langchain_experimental.plan_and_execute.schema import StepResponse - - -class BaseExecutor(BaseModel): - """Base executor.""" - - @abstractmethod - def step( - self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any - ) -> StepResponse: - """Take step.""" - - @abstractmethod - async def astep( - self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any - ) -> StepResponse: - """Take async step.""" - - -class ChainExecutor(BaseExecutor): - """Chain executor.""" - - chain: Chain - """The chain to use.""" - - def step( - self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any - ) -> StepResponse: - """Take step.""" - response = self.chain.run(**inputs, callbacks=callbacks) - return StepResponse(response=response) - - async def astep( - self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any - ) -> StepResponse: - """Take step.""" - response = await self.chain.arun(**inputs, callbacks=callbacks) - return StepResponse(response=response) diff --git a/libs/experimental/langchain_experimental/plan_and_execute/planners/__init__.py b/libs/experimental/langchain_experimental/plan_and_execute/planners/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/langchain_experimental/plan_and_execute/planners/base.py b/libs/experimental/langchain_experimental/plan_and_execute/planners/base.py deleted file mode 100644 index 3c86eb15ec0d4..0000000000000 --- a/libs/experimental/langchain_experimental/plan_and_execute/planners/base.py +++ /dev/null @@ -1,47 +0,0 @@ -from abc import abstractmethod -from typing import Any, List, Optional - -from langchain.chains.llm import LLMChain -from langchain_core.callbacks.manager import Callbacks -from pydantic import BaseModel - -from langchain_experimental.plan_and_execute.schema import Plan, PlanOutputParser - - -class BasePlanner(BaseModel): - """Base planner.""" - - @abstractmethod - def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan: - """Given input, decide what to do.""" - - @abstractmethod - async def aplan( - self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any - ) -> Plan: - """Given input, asynchronously decide what to do.""" - - -class LLMPlanner(BasePlanner): - """LLM planner.""" - - llm_chain: LLMChain - """The LLM chain to use.""" - output_parser: PlanOutputParser - """The output parser to use.""" - stop: Optional[List] = None - """The stop list to use.""" - - def plan(self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any) -> Plan: - """Given input, decide what to do.""" - llm_response = self.llm_chain.run(**inputs, stop=self.stop, callbacks=callbacks) - return self.output_parser.parse(llm_response) - - async def aplan( - self, inputs: dict, callbacks: Callbacks = None, **kwargs: Any - ) -> Plan: - """Given input, asynchronously decide what to do.""" - llm_response = await self.llm_chain.arun( - **inputs, stop=self.stop, callbacks=callbacks - ) - return self.output_parser.parse(llm_response) diff --git a/libs/experimental/langchain_experimental/plan_and_execute/planners/chat_planner.py b/libs/experimental/langchain_experimental/plan_and_execute/planners/chat_planner.py deleted file mode 100644 index 704543e54cd7b..0000000000000 --- a/libs/experimental/langchain_experimental/plan_and_execute/planners/chat_planner.py +++ /dev/null @@ -1,59 +0,0 @@ -import re - -from langchain.chains import LLMChain -from langchain_core.language_models import BaseLanguageModel -from langchain_core.messages import SystemMessage -from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate - -from langchain_experimental.plan_and_execute.planners.base import LLMPlanner -from langchain_experimental.plan_and_execute.schema import ( - Plan, - PlanOutputParser, - Step, -) - -SYSTEM_PROMPT = ( - "Let's first understand the problem and devise a plan to solve the problem." - " Please output the plan starting with the header 'Plan:' " - "and then followed by a numbered list of steps. " - "Please make the plan the minimum number of steps required " - "to accurately complete the task. If the task is a question, " - "the final step should almost always be 'Given the above steps taken, " - "please respond to the users original question'. " - "At the end of your plan, say ''" -) - - -class PlanningOutputParser(PlanOutputParser): - """Planning output parser.""" - - def parse(self, text: str) -> Plan: - steps = [Step(value=v) for v in re.split("\n\s*\d+\. ", text)[1:]] - return Plan(steps=steps) - - -def load_chat_planner( - llm: BaseLanguageModel, system_prompt: str = SYSTEM_PROMPT -) -> LLMPlanner: - """ - Load a chat planner. - - Args: - llm: Language model. - system_prompt: System prompt. - - Returns: - LLMPlanner - """ - prompt_template = ChatPromptTemplate.from_messages( - [ - SystemMessage(content=system_prompt), - HumanMessagePromptTemplate.from_template("{input}"), - ] - ) - llm_chain = LLMChain(llm=llm, prompt=prompt_template) - return LLMPlanner( - llm_chain=llm_chain, - output_parser=PlanningOutputParser(), - stop=[""], - ) diff --git a/libs/experimental/langchain_experimental/plan_and_execute/schema.py b/libs/experimental/langchain_experimental/plan_and_execute/schema.py deleted file mode 100644 index 66586c56905ea..0000000000000 --- a/libs/experimental/langchain_experimental/plan_and_execute/schema.py +++ /dev/null @@ -1,62 +0,0 @@ -from abc import abstractmethod -from typing import List, Tuple - -from langchain_core.output_parsers import BaseOutputParser -from pydantic import BaseModel, Field - - -class Step(BaseModel): - """Step.""" - - value: str - """The value.""" - - -class Plan(BaseModel): - """Plan.""" - - steps: List[Step] - """The steps.""" - - -class StepResponse(BaseModel): - """Step response.""" - - response: str - """The response.""" - - -class BaseStepContainer(BaseModel): - """Base step container.""" - - @abstractmethod - def add_step(self, step: Step, step_response: StepResponse) -> None: - """Add step and step response to the container.""" - - @abstractmethod - def get_final_response(self) -> str: - """Return the final response based on steps taken.""" - - -class ListStepContainer(BaseStepContainer): - """Container for List of steps.""" - - steps: List[Tuple[Step, StepResponse]] = Field(default_factory=list) - """The steps.""" - - def add_step(self, step: Step, step_response: StepResponse) -> None: - self.steps.append((step, step_response)) - - def get_steps(self) -> List[Tuple[Step, StepResponse]]: - return self.steps - - def get_final_response(self) -> str: - return self.steps[-1][1].response - - -class PlanOutputParser(BaseOutputParser): - """Plan output parser.""" - - @abstractmethod - def parse(self, text: str) -> Plan: - """Parse into a plan.""" diff --git a/libs/experimental/langchain_experimental/prompt_injection_identifier/__init__.py b/libs/experimental/langchain_experimental/prompt_injection_identifier/__init__.py deleted file mode 100644 index aba9151fc1aa3..0000000000000 --- a/libs/experimental/langchain_experimental/prompt_injection_identifier/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -"""**HuggingFace Injection Identifier** is a tool that uses -[HuggingFace Prompt Injection model](https://huggingface.co/deepset/deberta-v3-base-injection) -to detect prompt injection attacks. -""" - -from langchain_experimental.prompt_injection_identifier.hugging_face_identifier import ( - HuggingFaceInjectionIdentifier, -) - -__all__ = ["HuggingFaceInjectionIdentifier"] diff --git a/libs/experimental/langchain_experimental/prompt_injection_identifier/hugging_face_identifier.py b/libs/experimental/langchain_experimental/prompt_injection_identifier/hugging_face_identifier.py deleted file mode 100644 index 92159f2f93284..0000000000000 --- a/libs/experimental/langchain_experimental/prompt_injection_identifier/hugging_face_identifier.py +++ /dev/null @@ -1,102 +0,0 @@ -"""Tool for the identification of prompt injection attacks.""" - -from __future__ import annotations - -from typing import TYPE_CHECKING, Any, Union - -from langchain_core.tools import BaseTool -from pydantic import Field, model_validator - -if TYPE_CHECKING: - from transformers import Pipeline - - -class PromptInjectionException(ValueError): - """Exception raised when prompt injection attack is detected.""" - - def __init__( - self, message: str = "Prompt injection attack detected", score: float = 1.0 - ): - self.message = message - self.score = score - - super().__init__(self.message) - - -def _model_default_factory( - model_name: str = "protectai/deberta-v3-base-prompt-injection-v2", -) -> Pipeline: - try: - from transformers import ( - AutoModelForSequenceClassification, - AutoTokenizer, - pipeline, - ) - except ImportError as e: - raise ImportError( - "Cannot import transformers, please install with " - "`pip install transformers`." - ) from e - - tokenizer = AutoTokenizer.from_pretrained(model_name) - model = AutoModelForSequenceClassification.from_pretrained(model_name) - - return pipeline( - "text-classification", - model=model, - tokenizer=tokenizer, - max_length=512, # default length of BERT models - truncation=True, # otherwise it will fail on long prompts - ) - - -class HuggingFaceInjectionIdentifier(BaseTool): - """Tool that uses HuggingFace Prompt Injection model to - detect prompt injection attacks.""" - - name: str = "hugging_face_injection_identifier" - description: str = ( - "A wrapper around HuggingFace Prompt Injection security model. " - "Useful for when you need to ensure that prompt is free of injection attacks. " - "Input should be any message from the user." - ) - model: Union[Pipeline, str, None] = Field(default_factory=_model_default_factory) - """Model to use for prompt injection detection. - - Can be specified as transformers Pipeline or string. String should correspond to the - model name of a text-classification transformers model. Defaults to - ``protectai/deberta-v3-base-prompt-injection-v2`` model. - """ - threshold: float = Field( - description="Threshold for prompt injection detection.", default=0.5 - ) - """Threshold for prompt injection detection. - - Defaults to 0.5.""" - injection_label: str = Field( - description="Label of the injection for prompt injection detection.", - default="INJECTION", - ) - """Label for prompt injection detection model. - - Defaults to ``INJECTION``. Value depends on the model used.""" - - @model_validator(mode="before") - @classmethod - def validate_environment(cls, values: dict) -> Any: - if isinstance(values.get("model"), str): - values["model"] = _model_default_factory(model_name=values["model"]) - return values - - def _run(self, query: str) -> str: - """Use the tool.""" - result = self.model(query) # type: ignore - score = ( - result[0]["score"] - if result[0]["label"] == self.injection_label - else 1 - result[0]["score"] - ) - if score > self.threshold: - raise PromptInjectionException("Prompt injection attack detected", score) - - return query diff --git a/libs/experimental/langchain_experimental/prompts/__init__.py b/libs/experimental/langchain_experimental/prompts/__init__.py deleted file mode 100644 index 24a1a7351a9f4..0000000000000 --- a/libs/experimental/langchain_experimental/prompts/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Unified method for **loading a prompt** from LangChainHub or local file system.""" - -from langchain_experimental.prompts.load import load_prompt - -__all__ = ["load_prompt"] diff --git a/libs/experimental/langchain_experimental/prompts/load.py b/libs/experimental/langchain_experimental/prompts/load.py deleted file mode 100644 index 22bd8c9a9b254..0000000000000 --- a/libs/experimental/langchain_experimental/prompts/load.py +++ /dev/null @@ -1,3 +0,0 @@ -from langchain_core.prompts.loading import load_prompt - -__all__ = ["load_prompt"] diff --git a/libs/experimental/langchain_experimental/py.typed b/libs/experimental/langchain_experimental/py.typed deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/langchain_experimental/pydantic_v1/__init__.py b/libs/experimental/langchain_experimental/pydantic_v1/__init__.py deleted file mode 100644 index 826f1c5008799..0000000000000 --- a/libs/experimental/langchain_experimental/pydantic_v1/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -import typing -from importlib import metadata - -## Create namespaces for pydantic v1 and v2. -# This code must stay at the top of the file before other modules may -# attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules. -# -# This hack is done for the following reasons: -# * Langchain will attempt to remain compatible with both pydantic v1 and v2 since -# both dependencies and dependents may be stuck on either version of v1 or v2. -# * Creating namespaces for pydantic v1 and v2 should allow us to write code that -# unambiguously uses either v1 or v2 API. -# * This change is easier to roll out and roll back. - -# It's currently impossible to support mypy for both pydantic v1 and v2 at once: -# https://github.com/pydantic/pydantic/issues/6022 -# -# In the lint environment, pydantic is currently v1. -# When we upgrade it to pydantic v2, we'll need -# to replace this with `from pydantic.v1 import *`. -if typing.TYPE_CHECKING: - from pydantic import * # noqa: F403 -else: - try: - from pydantic.v1 import * # noqa: F403 - except ImportError: - from pydantic import * # noqa: F403 - -try: - _PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0]) -except metadata.PackageNotFoundError: - _PYDANTIC_MAJOR_VERSION = 0 diff --git a/libs/experimental/langchain_experimental/pydantic_v1/dataclasses.py b/libs/experimental/langchain_experimental/pydantic_v1/dataclasses.py deleted file mode 100644 index 25a7810a046dd..0000000000000 --- a/libs/experimental/langchain_experimental/pydantic_v1/dataclasses.py +++ /dev/null @@ -1,15 +0,0 @@ -import typing - -# It's currently impossible to support mypy for both pydantic v1 and v2 at once: -# https://github.com/pydantic/pydantic/issues/6022 -# -# In the lint environment, pydantic is currently v1. -# When we upgrade it to pydantic v2, we'll need to -# replace this with `from pydantic.v1.dataclasses import *`. -if typing.TYPE_CHECKING: - from pydantic.dataclasses import * # noqa: F403 -else: - try: - from pydantic.v1.dataclasses import * # noqa: F403 - except ImportError: - from pydantic.dataclasses import * # noqa: F403 diff --git a/libs/experimental/langchain_experimental/pydantic_v1/main.py b/libs/experimental/langchain_experimental/pydantic_v1/main.py deleted file mode 100644 index 2fa4c995872e3..0000000000000 --- a/libs/experimental/langchain_experimental/pydantic_v1/main.py +++ /dev/null @@ -1,15 +0,0 @@ -import typing - -# It's currently impossible to support mypy for both pydantic v1 and v2 at once: -# https://github.com/pydantic/pydantic/issues/6022 -# -# In the lint environment, pydantic is currently v1. -# When we upgrade it to pydantic v2, we'll need -# to replace this with `from pydantic.v1.main import *`. -if typing.TYPE_CHECKING: - from pydantic.main import * # noqa: F403 -else: - try: - from pydantic.v1.main import * # noqa: F403 - except ImportError: - from pydantic.main import * # noqa: F403 diff --git a/libs/experimental/langchain_experimental/recommenders/__init__.py b/libs/experimental/langchain_experimental/recommenders/__init__.py deleted file mode 100644 index 2c2ec65f8a491..0000000000000 --- a/libs/experimental/langchain_experimental/recommenders/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -"""**Amazon Personalize** primitives. - -[Amazon Personalize](https://docs.aws.amazon.com/personalize/latest/dg/what-is-personalize.html) -is a fully managed machine learning service that uses your data to generate -item recommendations for your users. -""" - -from langchain_experimental.recommenders.amazon_personalize import AmazonPersonalize -from langchain_experimental.recommenders.amazon_personalize_chain import ( - AmazonPersonalizeChain, -) - -__all__ = ["AmazonPersonalize", "AmazonPersonalizeChain"] diff --git a/libs/experimental/langchain_experimental/recommenders/amazon_personalize.py b/libs/experimental/langchain_experimental/recommenders/amazon_personalize.py deleted file mode 100644 index 6f24bc02c845a..0000000000000 --- a/libs/experimental/langchain_experimental/recommenders/amazon_personalize.py +++ /dev/null @@ -1,199 +0,0 @@ -from typing import Any, List, Mapping, Optional, Sequence - - -class AmazonPersonalize: - """Amazon Personalize Runtime wrapper for executing real-time operations. - - See [this link for more details](https://docs.aws.amazon.com/personalize/latest/dg/API_Operations_Amazon_Personalize_Runtime.html). - - Args: - campaign_arn: str, Optional: The Amazon Resource Name (ARN) of the campaign - to use for getting recommendations. - recommender_arn: str, Optional: The Amazon Resource Name (ARN) of the - recommender to use to get recommendations - client: Optional: boto3 client - credentials_profile_name: str, Optional :AWS profile name - region_name: str, Optional: AWS region, e.g., us-west-2 - - Example: - .. code-block:: python - - personalize_client = AmazonPersonalize ( - campaignArn='' ) - """ - - def __init__( - self, - campaign_arn: Optional[str] = None, - recommender_arn: Optional[str] = None, - client: Optional[Any] = None, - credentials_profile_name: Optional[str] = None, - region_name: Optional[str] = None, - ): - self.campaign_arn = campaign_arn - self.recommender_arn = recommender_arn - - if campaign_arn and recommender_arn: - raise ValueError( - "Cannot initialize AmazonPersonalize with both " - "campaign_arn and recommender_arn." - ) - - if not campaign_arn and not recommender_arn: - raise ValueError( - "Cannot initialize AmazonPersonalize. Provide one of " - "campaign_arn or recommender_arn" - ) - - try: - if client is not None: - self.client = client - else: - import boto3 - import botocore.config - - if credentials_profile_name is not None: - session = boto3.Session(profile_name=credentials_profile_name) - else: - # use default credentials - session = boto3.Session() - - client_params = {} - if region_name: - client_params["region_name"] = region_name - - service = "personalize-runtime" - session_config = botocore.config.Config(user_agent_extra="langchain") - client_params["config"] = session_config - self.client = session.client(service, **client_params) - - except ImportError: - raise ModuleNotFoundError( - "Could not import boto3 python package. " - "Please install it with `pip install boto3`." - ) - - def get_recommendations( - self, - user_id: Optional[str] = None, - item_id: Optional[str] = None, - filter_arn: Optional[str] = None, - filter_values: Optional[Mapping[str, str]] = None, - num_results: Optional[int] = 10, - context: Optional[Mapping[str, str]] = None, - promotions: Optional[Sequence[Mapping[str, Any]]] = None, - metadata_columns: Optional[Mapping[str, Sequence[str]]] = None, - **kwargs: Any, - ) -> Mapping[str, Any]: - """Get recommendations from Amazon Personalize service. - - See more details at: - https://docs.aws.amazon.com/personalize/latest/dg/API_RS_GetRecommendations.html - - Args: - user_id: str, Optional: The user identifier - for which to retrieve recommendations - item_id: str, Optional: The item identifier - for which to retrieve recommendations - filter_arn: str, Optional: The ARN of the filter - to apply to the returned recommendations - filter_values: Mapping, Optional: The values - to use when filtering recommendations. - num_results: int, Optional: Default=10: The number of results to return - context: Mapping, Optional: The contextual metadata - to use when getting recommendations - promotions: Sequence, Optional: The promotions - to apply to the recommendation request. - metadata_columns: Mapping, Optional: The metadata Columns to be returned - as part of the response. - - Returns: - response: Mapping[str, Any]: Returns an itemList and recommendationId. - - Example: - .. code-block:: python - - personalize_client = AmazonPersonalize(campaignArn='' )\n - response = personalize_client.get_recommendations(user_id="1") - - """ - if not user_id and not item_id: - raise ValueError("One of user_id or item_id is required") - - if filter_arn: - kwargs["filterArn"] = filter_arn - if filter_values: - kwargs["filterValues"] = filter_values - if user_id: - kwargs["userId"] = user_id - if num_results: - kwargs["numResults"] = num_results - if context: - kwargs["context"] = context - if promotions: - kwargs["promotions"] = promotions - if item_id: - kwargs["itemId"] = item_id - if metadata_columns: - kwargs["metadataColumns"] = metadata_columns - if self.campaign_arn: - kwargs["campaignArn"] = self.campaign_arn - if self.recommender_arn: - kwargs["recommenderArn"] = self.recommender_arn - - return self.client.get_recommendations(**kwargs) - - def get_personalized_ranking( - self, - user_id: str, - input_list: List[str], - filter_arn: Optional[str] = None, - filter_values: Optional[Mapping[str, str]] = None, - context: Optional[Mapping[str, str]] = None, - metadata_columns: Optional[Mapping[str, Sequence[str]]] = None, - **kwargs: Any, - ) -> Mapping[str, Any]: - """Re-ranks a list of recommended items for the given user. - - https://docs.aws.amazon.com/personalize/latest/dg/API_RS_GetPersonalizedRanking.html - - Args: - user_id: str, Required: The user identifier - for which to retrieve recommendations - input_list: List[str], Required: A list of items (by itemId) to rank - filter_arn: str, Optional: The ARN of the filter to apply - filter_values: Mapping, Optional: The values to use - when filtering recommendations. - context: Mapping, Optional: The contextual metadata - to use when getting recommendations - metadata_columns: Mapping, Optional: The metadata Columns to be returned - as part of the response. - - Returns: - response: Mapping[str, Any]: Returns personalizedRanking - and recommendationId. - - Example: - .. code-block:: python - - personalize_client = AmazonPersonalize(campaignArn='' )\n - response = personalize_client.get_personalized_ranking(user_id="1", - input_list=["123,"256"]) - - """ - - if filter_arn: - kwargs["filterArn"] = filter_arn - if filter_values: - kwargs["filterValues"] = filter_values - if user_id: - kwargs["userId"] = user_id - if input_list: - kwargs["inputList"] = input_list - if context: - kwargs["context"] = context - if metadata_columns: - kwargs["metadataColumns"] = metadata_columns - kwargs["campaignArn"] = self.campaign_arn - - return self.client.get_personalized_ranking(kwargs) diff --git a/libs/experimental/langchain_experimental/recommenders/amazon_personalize_chain.py b/libs/experimental/langchain_experimental/recommenders/amazon_personalize_chain.py deleted file mode 100644 index a7252010e04ee..0000000000000 --- a/libs/experimental/langchain_experimental/recommenders/amazon_personalize_chain.py +++ /dev/null @@ -1,192 +0,0 @@ -from __future__ import annotations - -from typing import Any, Dict, List, Mapping, Optional, cast - -from langchain.chains import LLMChain -from langchain.chains.base import Chain -from langchain.schema.language_model import BaseLanguageModel -from langchain_core.callbacks.manager import ( - CallbackManagerForChainRun, -) -from langchain_core.prompts.prompt import PromptTemplate - -from langchain_experimental.recommenders.amazon_personalize import AmazonPersonalize - -SUMMARIZE_PROMPT_QUERY = """ -Summarize the recommended items for a user from the items list in tag below. -Make correlation into the items in the list and provide a summary. - - {result} - -""" - -SUMMARIZE_PROMPT = PromptTemplate( - input_variables=["result"], template=SUMMARIZE_PROMPT_QUERY -) - -INTERMEDIATE_STEPS_KEY = "intermediate_steps" - -# Input Key Names to be used -USER_ID_INPUT_KEY = "user_id" -ITEM_ID_INPUT_KEY = "item_id" -INPUT_LIST_INPUT_KEY = "input_list" -FILTER_ARN_INPUT_KEY = "filter_arn" -FILTER_VALUES_INPUT_KEY = "filter_values" -CONTEXT_INPUT_KEY = "context" -PROMOTIONS_INPUT_KEY = "promotions" -METADATA_COLUMNS_INPUT_KEY = "metadata_columns" -RESULT_OUTPUT_KEY = "result" - - -class AmazonPersonalizeChain(Chain): - """Chain for retrieving recommendations from Amazon Personalize, - and summarizing them. - - It only returns recommendations if return_direct=True. - It can also be used in sequential chains for working with - the output of Amazon Personalize. - - Example: - .. code-block:: python - - chain = PersonalizeChain.from_llm(llm=agent_llm, client=personalize_lg, - return_direct=True)\n - response = chain.run({'user_id':'1'})\n - response = chain.run({'user_id':'1', 'item_id':'234'}) - """ - - client: AmazonPersonalize - summarization_chain: LLMChain - return_direct: bool = False - return_intermediate_steps: bool = False - is_ranking_recipe: bool = False - - @property - def input_keys(self) -> List[str]: - """This returns an empty list since not there are optional - input_keys and none is required. - - :meta private: - """ - return [] - - @property - def output_keys(self) -> List[str]: - """Will always return result key. - - :meta private: - """ - return [RESULT_OUTPUT_KEY] - - @classmethod - def from_llm( - cls, - llm: BaseLanguageModel, - client: AmazonPersonalize, - prompt_template: PromptTemplate = SUMMARIZE_PROMPT, - is_ranking_recipe: bool = False, - **kwargs: Any, - ) -> AmazonPersonalizeChain: - """Initializes the Personalize Chain with LLMAgent, Personalize Client, - Prompts to be used - - Args: - llm: BaseLanguageModel: The LLM to be used in the Chain - client: AmazonPersonalize: The client created to support - invoking AmazonPersonalize - prompt_template: PromptTemplate: The prompt template which can be - invoked with the output from Amazon Personalize - is_ranking_recipe: bool: default: False: specifies - if the trained recipe is USER_PERSONALIZED_RANKING - - Example: - .. code-block:: python - - chain = PersonalizeChain.from_llm(llm=agent_llm, - client=personalize_lg, return_direct=True)\n - response = chain.run({'user_id':'1'})\n - response = chain.run({'user_id':'1', 'item_id':'234'}) - - RANDOM_PROMPT_QUERY=" Summarize recommendations in {result}" - chain = PersonalizeChain.from_llm(llm=agent_llm, - client=personalize_lg, prompt_template=PROMPT_TEMPLATE)\n - """ - summarization_chain = LLMChain(llm=llm, prompt=prompt_template) - - return cls( - summarization_chain=summarization_chain, - client=client, - is_ranking_recipe=is_ranking_recipe, - **kwargs, - ) - - def _call( - self, - inputs: Mapping[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, Any]: - """Retrieves recommendations by invoking Amazon Personalize, - and invokes an LLM using the default/overridden - prompt template with the output from Amazon Personalize - - Args: - inputs: Mapping [str, Any] : Provide input identifiers in a map. - For example - {'user_id','1'} or - {'user_id':'1', 'item_id':'123'}. You can also pass the - filter_arn, filter_values as an - input. - """ - _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() - callbacks = _run_manager.get_child() - - user_id = inputs.get(USER_ID_INPUT_KEY) - item_id = inputs.get(ITEM_ID_INPUT_KEY) - input_list = inputs.get(INPUT_LIST_INPUT_KEY) - filter_arn = inputs.get(FILTER_ARN_INPUT_KEY) - filter_values = inputs.get(FILTER_VALUES_INPUT_KEY) - promotions = inputs.get(PROMOTIONS_INPUT_KEY) - context = inputs.get(CONTEXT_INPUT_KEY) - metadata_columns = inputs.get(METADATA_COLUMNS_INPUT_KEY) - - intermediate_steps: List = [] - intermediate_steps.append({"Calling Amazon Personalize"}) - - if self.is_ranking_recipe: - response = self.client.get_personalized_ranking( - user_id=str(user_id), - input_list=cast(List[str], input_list), - filter_arn=filter_arn, - filter_values=filter_values, - context=context, - metadata_columns=metadata_columns, - ) - else: - response = self.client.get_recommendations( - user_id=user_id, - item_id=item_id, - filter_arn=filter_arn, - filter_values=filter_values, - context=context, - promotions=promotions, - metadata_columns=metadata_columns, - ) - - _run_manager.on_text("Call to Amazon Personalize complete \n") - - if self.return_direct: - final_result = response - else: - result = self.summarization_chain( - {RESULT_OUTPUT_KEY: response}, callbacks=callbacks - ) - final_result = result[self.summarization_chain.output_key] - - intermediate_steps.append({"context": response}) - chain_result: Dict[str, Any] = {RESULT_OUTPUT_KEY: final_result} - if self.return_intermediate_steps: - chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps - return chain_result - - @property - def _chain_type(self) -> str: - return "amazon_personalize_chain" diff --git a/libs/experimental/langchain_experimental/retrievers/__init__.py b/libs/experimental/langchain_experimental/retrievers/__init__.py deleted file mode 100644 index 0f8ca9cb81f65..0000000000000 --- a/libs/experimental/langchain_experimental/retrievers/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""**Retriever** class returns Documents given a text **query**. - -It is more general than a vector store. A retriever does not need to be able to -store documents, only to return (or retrieve) it. -""" diff --git a/libs/experimental/langchain_experimental/retrievers/vector_sql_database.py b/libs/experimental/langchain_experimental/retrievers/vector_sql_database.py deleted file mode 100644 index 58b41e4c5c3b9..0000000000000 --- a/libs/experimental/langchain_experimental/retrievers/vector_sql_database.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Vector SQL Database Chain Retriever""" - -from typing import Any, Dict, List - -from langchain_core.callbacks.manager import ( - AsyncCallbackManagerForRetrieverRun, - CallbackManagerForRetrieverRun, -) -from langchain_core.documents import Document -from langchain_core.retrievers import BaseRetriever - -from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain - - -class VectorSQLDatabaseChainRetriever(BaseRetriever): - """Retriever that uses Vector SQL Database.""" - - sql_db_chain: VectorSQLDatabaseChain - """SQL Database Chain""" - page_content_key: str = "content" - """column name for page content of documents""" - - def _get_relevant_documents( - self, - query: str, - *, - run_manager: CallbackManagerForRetrieverRun, - **kwargs: Any, - ) -> List[Document]: - ret: List[Dict[str, Any]] = self.sql_db_chain( - query, callbacks=run_manager.get_child(), **kwargs - )["result"] - return [ - Document(page_content=r[self.page_content_key], metadata=r) for r in ret - ] - - async def _aget_relevant_documents( - self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun - ) -> List[Document]: - raise NotImplementedError diff --git a/libs/experimental/langchain_experimental/rl_chain/__init__.py b/libs/experimental/langchain_experimental/rl_chain/__init__.py deleted file mode 100644 index eac581f967620..0000000000000 --- a/libs/experimental/langchain_experimental/rl_chain/__init__.py +++ /dev/null @@ -1,63 +0,0 @@ -""" -**RL (Reinforcement Learning) Chain** leverages the `Vowpal Wabbit (VW)` models -for reinforcement learning with a context, with the goal of modifying -the prompt before the LLM call. - -[Vowpal Wabbit](https://vowpalwabbit.org/) provides fast, efficient, -and flexible online machine learning techniques for reinforcement learning, -supervised learning, and more. -""" - -import logging - -from langchain_experimental.rl_chain.base import ( - AutoSelectionScorer, - BasedOn, - Embed, - Embedder, - Policy, - SelectionScorer, - ToSelectFrom, - VwPolicy, -) -from langchain_experimental.rl_chain.helpers import embed, stringify_embedding -from langchain_experimental.rl_chain.pick_best_chain import ( - PickBest, - PickBestEvent, - PickBestFeatureEmbedder, - PickBestRandomPolicy, - PickBestSelected, -) - - -def configure_logger() -> None: - logger = logging.getLogger(__name__) - logger.setLevel(logging.INFO) - ch = logging.StreamHandler() - formatter = logging.Formatter( - "%(asctime)s - %(name)s - %(levelname)s - %(message)s" - ) - ch.setFormatter(formatter) - ch.setLevel(logging.INFO) - logger.addHandler(ch) - - -configure_logger() - -__all__ = [ - "PickBest", - "PickBestEvent", - "PickBestSelected", - "PickBestFeatureEmbedder", - "PickBestRandomPolicy", - "Embed", - "BasedOn", - "ToSelectFrom", - "SelectionScorer", - "AutoSelectionScorer", - "Embedder", - "Policy", - "VwPolicy", - "embed", - "stringify_embedding", -] diff --git a/libs/experimental/langchain_experimental/rl_chain/base.py b/libs/experimental/langchain_experimental/rl_chain/base.py deleted file mode 100644 index 2bd538f458d89..0000000000000 --- a/libs/experimental/langchain_experimental/rl_chain/base.py +++ /dev/null @@ -1,547 +0,0 @@ -from __future__ import annotations - -import logging -import os -from abc import ABC, abstractmethod -from typing import ( - TYPE_CHECKING, - Any, - Dict, - Generic, - List, - Optional, - Tuple, - Type, - TypeVar, - Union, -) - -from langchain.chains.base import Chain -from langchain.chains.llm import LLMChain -from langchain_core.callbacks.manager import CallbackManagerForChainRun -from langchain_core.prompts import ( - BasePromptTemplate, - ChatPromptTemplate, - HumanMessagePromptTemplate, - SystemMessagePromptTemplate, -) -from pydantic import BaseModel, ConfigDict, model_validator - -from langchain_experimental.rl_chain.helpers import _Embed -from langchain_experimental.rl_chain.metrics import ( - MetricsTrackerAverage, - MetricsTrackerRollingWindow, -) -from langchain_experimental.rl_chain.model_repository import ModelRepository -from langchain_experimental.rl_chain.vw_logger import VwLogger - -if TYPE_CHECKING: - import vowpal_wabbit_next as vw - -logger = logging.getLogger(__name__) - - -class _BasedOn: - def __init__(self, value: Any): - self.value = value - - def __str__(self) -> str: - return str(self.value) - - __repr__ = __str__ - - -def BasedOn(anything: Any) -> _BasedOn: - """Wrap a value to indicate that it should be based on.""" - - return _BasedOn(anything) - - -class _ToSelectFrom: - def __init__(self, value: Any): - self.value = value - - def __str__(self) -> str: - return str(self.value) - - __repr__ = __str__ - - -def ToSelectFrom(anything: Any) -> _ToSelectFrom: - """Wrap a value to indicate that it should be selected from.""" - - if not isinstance(anything, list): - raise ValueError("ToSelectFrom must be a list to select from") - return _ToSelectFrom(anything) - - -def Embed(anything: Any, keep: bool = False) -> Any: - """Wrap a value to indicate that it should be embedded.""" - - if isinstance(anything, _ToSelectFrom): - return ToSelectFrom(Embed(anything.value, keep=keep)) - elif isinstance(anything, _BasedOn): - return BasedOn(Embed(anything.value, keep=keep)) - if isinstance(anything, list): - return [Embed(v, keep=keep) for v in anything] - elif isinstance(anything, dict): - return {k: Embed(v, keep=keep) for k, v in anything.items()} - elif isinstance(anything, _Embed): - return anything - return _Embed(anything, keep=keep) - - -def EmbedAndKeep(anything: Any) -> Any: - """Wrap a value to indicate that it should be embedded and kept.""" - - return Embed(anything, keep=True) - - -# helper functions - - -def parse_lines(parser: "vw.TextFormatParser", input_str: str) -> List["vw.Example"]: - """Parse the input string into a list of examples.""" - - return [parser.parse_line(line) for line in input_str.split("\n")] - - -def get_based_on_and_to_select_from(inputs: Dict[str, Any]) -> Tuple[Dict, Dict]: - """Get the BasedOn and ToSelectFrom from the inputs.""" - to_select_from = { - k: inputs[k].value - for k in inputs.keys() - if isinstance(inputs[k], _ToSelectFrom) - } - - if not to_select_from: - raise ValueError( - "No variables using 'ToSelectFrom' found in the inputs. Please include at least one variable containing a list to select from." # noqa: E501 - ) - - based_on = { - k: inputs[k].value if isinstance(inputs[k].value, list) else [inputs[k].value] - for k in inputs.keys() - if isinstance(inputs[k], _BasedOn) - } - - return based_on, to_select_from - - -def prepare_inputs_for_autoembed(inputs: Dict[str, Any]) -> Dict[str, Any]: - """Prepare the inputs for auto embedding. - - Go over all the inputs and if something is either wrapped in _ToSelectFrom or _BasedOn, and if their inner values are not already _Embed, - then wrap them in EmbedAndKeep while retaining their _ToSelectFrom or _BasedOn status - """ # noqa: E501 - - next_inputs = inputs.copy() - for k, v in next_inputs.items(): - if isinstance(v, _ToSelectFrom) or isinstance(v, _BasedOn): - if not isinstance(v.value, _Embed): - next_inputs[k].value = EmbedAndKeep(v.value) - return next_inputs - - -# end helper functions - - -class Selected(ABC): - """Abstract class to represent the selected item.""" - - pass - - -TSelected = TypeVar("TSelected", bound=Selected) - - -class Event(Generic[TSelected], ABC): - """Abstract class to represent an event.""" - - inputs: Dict[str, Any] - selected: Optional[TSelected] - - def __init__(self, inputs: Dict[str, Any], selected: Optional[TSelected] = None): - self.inputs = inputs - self.selected = selected - - -TEvent = TypeVar("TEvent", bound=Event) - - -class Policy(Generic[TEvent], ABC): - """Abstract class to represent a policy.""" - - def __init__(self, **kwargs: Any): - pass - - @abstractmethod - def predict(self, event: TEvent) -> Any: ... - - @abstractmethod - def learn(self, event: TEvent) -> None: ... - - @abstractmethod - def log(self, event: TEvent) -> None: ... - - def save(self) -> None: - pass - - -class VwPolicy(Policy): - """Vowpal Wabbit policy.""" - - def __init__( - self, - model_repo: ModelRepository, - vw_cmd: List[str], - feature_embedder: Embedder, - vw_logger: VwLogger, - *args: Any, - **kwargs: Any, - ): - super().__init__(*args, **kwargs) - self.model_repo = model_repo - self.workspace = self.model_repo.load(vw_cmd) - self.feature_embedder = feature_embedder - self.vw_logger = vw_logger - - def predict(self, event: TEvent) -> Any: - import vowpal_wabbit_next as vw - - text_parser = vw.TextFormatParser(self.workspace) - return self.workspace.predict_one( - parse_lines(text_parser, self.feature_embedder.format(event)) - ) - - def learn(self, event: TEvent) -> None: - import vowpal_wabbit_next as vw - - vw_ex = self.feature_embedder.format(event) - text_parser = vw.TextFormatParser(self.workspace) - multi_ex = parse_lines(text_parser, vw_ex) - self.workspace.learn_one(multi_ex) - - def log(self, event: TEvent) -> None: - if self.vw_logger.logging_enabled(): - vw_ex = self.feature_embedder.format(event) - self.vw_logger.log(vw_ex) - - def save(self) -> None: - self.model_repo.save(self.workspace) - - -class Embedder(Generic[TEvent], ABC): - """Abstract class to represent an embedder.""" - - def __init__(self, *args: Any, **kwargs: Any): - pass - - @abstractmethod - def format(self, event: TEvent) -> str: ... - - -class SelectionScorer(Generic[TEvent], ABC, BaseModel): - """Abstract class to grade the chosen selection or the response of the llm.""" - - @abstractmethod - def score_response( - self, inputs: Dict[str, Any], llm_response: str, event: TEvent - ) -> float: ... - - -class AutoSelectionScorer(SelectionScorer[Event], BaseModel): - """Auto selection scorer.""" - - llm_chain: LLMChain - prompt: Union[BasePromptTemplate, None] = None - scoring_criteria_template_str: Optional[str] = None - - @staticmethod - def get_default_system_prompt() -> SystemMessagePromptTemplate: - return SystemMessagePromptTemplate.from_template( - "PLEASE RESPOND ONLY WITH A SINGLE FLOAT AND NO OTHER TEXT EXPLANATION\n \ - You are a strict judge that is called on to rank a response based on \ - given criteria. You must respond with your ranking by providing a \ - single float within the range [0, 1], 0 being very bad \ - response and 1 being very good response." - ) - - @staticmethod - def get_default_prompt() -> ChatPromptTemplate: - human_template = 'Given this based_on "{rl_chain_selected_based_on}" \ - as the most important attribute, rank how good or bad this text is: \ - "{rl_chain_selected}".' - human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) - default_system_prompt = AutoSelectionScorer.get_default_system_prompt() - chat_prompt = ChatPromptTemplate.from_messages( - [default_system_prompt, human_message_prompt] - ) - return chat_prompt - - @model_validator(mode="before") - @classmethod - def set_prompt_and_llm_chain(cls, values: Dict[str, Any]) -> Any: - llm = values.get("llm") - prompt = values.get("prompt") - scoring_criteria_template_str = values.get("scoring_criteria_template_str") - if prompt is None and scoring_criteria_template_str is None: - prompt = AutoSelectionScorer.get_default_prompt() - elif prompt is None and scoring_criteria_template_str is not None: - human_message_prompt = HumanMessagePromptTemplate.from_template( - scoring_criteria_template_str - ) - default_system_prompt = AutoSelectionScorer.get_default_system_prompt() - prompt = ChatPromptTemplate.from_messages( - [default_system_prompt, human_message_prompt] - ) - values["prompt"] = prompt - values["llm_chain"] = LLMChain(llm=llm, prompt=prompt) # type: ignore[arg-type, arg-type] - return values - - def score_response( - self, inputs: Dict[str, Any], llm_response: str, event: Event - ) -> float: - ranking = self.llm_chain.predict(llm_response=llm_response, **inputs) - ranking = ranking.strip() - try: - resp = float(ranking) - return resp - except Exception as e: - raise RuntimeError( - f"The auto selection scorer did not manage to score the response, there is always the option to try again or tweak the reward prompt. Error: {e}" # noqa: E501 - ) - - -class RLChain(Chain, Generic[TEvent]): - """Chain that leverages the Vowpal Wabbit (VW) model as a learned policy - for reinforcement learning. - - Attributes: - - llm_chain (Chain): Represents the underlying Language Model chain. - - prompt (BasePromptTemplate): The template for the base prompt. - - selection_scorer (Union[SelectionScorer, None]): Scorer for the selection. Can be set to None. - - policy (Optional[Policy]): The policy used by the chain to learn to populate a dynamic prompt. - - auto_embed (bool): Determines if embedding should be automatic. Default is False. - - metrics (Optional[Union[MetricsTrackerRollingWindow, MetricsTrackerAverage]]): Tracker for metrics, can be set to None. - - Initialization Attributes: - - feature_embedder (Embedder): Embedder used for the `BasedOn` and `ToSelectFrom` inputs. - - model_save_dir (str, optional): Directory for saving the VW model. Default is the current directory. - - reset_model (bool): If set to True, the model starts training from scratch. Default is False. - - vw_cmd (List[str], optional): Command line arguments for the VW model. - - policy (Type[VwPolicy]): Policy used by the chain. - - vw_logs (Optional[Union[str, os.PathLike]]): Path for the VW logs. - - metrics_step (int): Step for the metrics tracker. Default is -1. If set without metrics_window_size, average metrics will be tracked, otherwise rolling window metrics will be tracked. - - metrics_window_size (int): Window size for the metrics tracker. Default is -1. If set, rolling window metrics will be tracked. - - Notes: - The class initializes the VW model using the provided arguments. If `selection_scorer` is not provided, a warning is logged, indicating that no reinforcement learning will occur unless the `update_with_delayed_score` method is called. - """ # noqa: E501 - - class _NoOpPolicy(Policy): - """Placeholder policy that does nothing""" - - def predict(self, event: TEvent) -> Any: - return None - - def learn(self, event: TEvent) -> None: - pass - - def log(self, event: TEvent) -> None: - pass - - llm_chain: Chain - - output_key: str = "result" #: :meta private: - prompt: BasePromptTemplate - selection_scorer: Union[SelectionScorer, None] - active_policy: Policy = _NoOpPolicy() - auto_embed: bool = False - selection_scorer_activated: bool = True - selected_input_key: str = "rl_chain_selected" - selected_based_on_input_key: str = "rl_chain_selected_based_on" - metrics: Optional[Union[MetricsTrackerRollingWindow, MetricsTrackerAverage]] = None - - def __init__( - self, - feature_embedder: Embedder, - model_save_dir: str = "./", - reset_model: bool = False, - vw_cmd: Optional[List[str]] = None, - policy: Type[Policy] = VwPolicy, - vw_logs: Optional[Union[str, os.PathLike]] = None, - metrics_step: int = -1, - metrics_window_size: int = -1, - *args: Any, - **kwargs: Any, - ): - super().__init__(*args, **kwargs) - if self.selection_scorer is None: - logger.warning( - "No selection scorer provided, which means that no \ - reinforcement learning will be done in the RL chain \ - unless update_with_delayed_score is called." - ) - - if isinstance(self.active_policy, RLChain._NoOpPolicy): - self.active_policy = policy( - model_repo=ModelRepository( - model_save_dir, with_history=True, reset=reset_model - ), - vw_cmd=vw_cmd or [], - feature_embedder=feature_embedder, - vw_logger=VwLogger(vw_logs), - ) - - if metrics_window_size > 0: - self.metrics = MetricsTrackerRollingWindow( - step=metrics_step, window_size=metrics_window_size - ) - else: - self.metrics = MetricsTrackerAverage(step=metrics_step) - - model_config = ConfigDict( - arbitrary_types_allowed=True, - extra="forbid", - ) - - @property - def input_keys(self) -> List[str]: - """Expect input key. - :meta private: - """ - return [] - - @property - def output_keys(self) -> List[str]: - """Expect output key. - - :meta private: - """ - return [self.output_key] - - def update_with_delayed_score( - self, score: float, chain_response: Dict[str, Any], force_score: bool = False - ) -> None: - """ - Updates the learned policy with the score provided. - Will raise an error if selection_scorer is set, and force_score=True was not provided during the method call - """ # noqa: E501 - if self._can_use_selection_scorer() and not force_score: - raise RuntimeError( - "The selection scorer is set, and force_score was not set to True. Please set force_score=True to use this function." # noqa: E501 - ) - if self.metrics: - self.metrics.on_feedback(score) - event: TEvent = chain_response["selection_metadata"] - self._call_after_scoring_before_learning(event=event, score=score) - self.active_policy.learn(event=event) - self.active_policy.log(event=event) - - def deactivate_selection_scorer(self) -> None: - """ - Deactivates the selection scorer, meaning that the chain will no longer attempt to use the selection scorer to score responses. - """ # noqa: E501 - self.selection_scorer_activated = False - - def activate_selection_scorer(self) -> None: - """ - Activates the selection scorer, meaning that the chain will attempt to use the selection scorer to score responses. - """ # noqa: E501 - self.selection_scorer_activated = True - - def save_progress(self) -> None: - """ - This function should be called to save the state of the learned policy model. - """ - self.active_policy.save() - - def _validate_inputs(self, inputs: Dict[str, Any]) -> None: - super()._validate_inputs(inputs) - if ( - self.selected_input_key in inputs.keys() - or self.selected_based_on_input_key in inputs.keys() - ): - raise ValueError( - f"The rl chain does not accept '{self.selected_input_key}' or '{self.selected_based_on_input_key}' as input keys, they are reserved for internal use during auto reward." # noqa: E501 - ) - - def _can_use_selection_scorer(self) -> bool: - """ - Returns whether the chain can use the selection scorer to score responses or not. - """ # noqa: E501 - return self.selection_scorer is not None and self.selection_scorer_activated - - @abstractmethod - def _call_before_predict(self, inputs: Dict[str, Any]) -> TEvent: ... - - @abstractmethod - def _call_after_predict_before_llm( - self, inputs: Dict[str, Any], event: TEvent, prediction: Any - ) -> Tuple[Dict[str, Any], TEvent]: ... - - @abstractmethod - def _call_after_llm_before_scoring( - self, llm_response: str, event: TEvent - ) -> Tuple[Dict[str, Any], TEvent]: ... - - @abstractmethod - def _call_after_scoring_before_learning( - self, event: TEvent, score: Optional[float] - ) -> TEvent: ... - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, Any]: - _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() - - event: TEvent = self._call_before_predict(inputs=inputs) - prediction = self.active_policy.predict(event=event) - if self.metrics: - self.metrics.on_decision() - - next_chain_inputs, event = self._call_after_predict_before_llm( - inputs=inputs, event=event, prediction=prediction - ) - - t = self.llm_chain.run(**next_chain_inputs, callbacks=_run_manager.get_child()) - _run_manager.on_text(t, color="green", verbose=self.verbose) - t = t.strip() - - if self.verbose: - _run_manager.on_text("\nCode: ", verbose=self.verbose) - - output = t - _run_manager.on_text("\nAnswer: ", verbose=self.verbose) - _run_manager.on_text(output, color="yellow", verbose=self.verbose) - - next_chain_inputs, event = self._call_after_llm_before_scoring( - llm_response=output, event=event - ) - - score = None - try: - if self._can_use_selection_scorer(): - score = self.selection_scorer.score_response( # type: ignore - inputs=next_chain_inputs, llm_response=output, event=event - ) - except Exception as e: - logger.info( - f"The selection scorer was not able to score, \ - and the chain was not able to adjust to this response, error: {e}" - ) - if self.metrics and score is not None: - self.metrics.on_feedback(score) - - event = self._call_after_scoring_before_learning(score=score, event=event) - self.active_policy.learn(event=event) - self.active_policy.log(event=event) - - return {self.output_key: {"response": output, "selection_metadata": event}} - - @property - def _chain_type(self) -> str: - return "llm_personalizer_chain" diff --git a/libs/experimental/langchain_experimental/rl_chain/helpers.py b/libs/experimental/langchain_experimental/rl_chain/helpers.py deleted file mode 100644 index e4e221089e600..0000000000000 --- a/libs/experimental/langchain_experimental/rl_chain/helpers.py +++ /dev/null @@ -1,114 +0,0 @@ -from __future__ import annotations - -from typing import Any, Dict, List, Optional, Union - - -class _Embed: - def __init__(self, value: Any, keep: bool = False): - self.value = value - self.keep = keep - - def __str__(self) -> str: - return str(self.value) - - __repr__ = __str__ - - -def stringify_embedding(embedding: List) -> str: - """Convert an embedding to a string.""" - - return " ".join([f"{i}:{e}" for i, e in enumerate(embedding)]) - - -def is_stringtype_instance(item: Any) -> bool: - """Check if an item is a string.""" - - return isinstance(item, str) or ( - isinstance(item, _Embed) and isinstance(item.value, str) - ) - - -def embed_string_type( - item: Union[str, _Embed], model: Any, namespace: Optional[str] = None -) -> Dict[str, Union[str, List[str]]]: - """Embed a string or an _Embed object.""" - - keep_str = "" - if isinstance(item, _Embed): - encoded = stringify_embedding(model.encode(item.value)) - if item.keep: - keep_str = item.value.replace(" ", "_") + " " - elif isinstance(item, str): - encoded = item.replace(" ", "_") - else: - raise ValueError(f"Unsupported type {type(item)} for embedding") - - if namespace is None: - raise ValueError( - "The default namespace must be provided when embedding a string or _Embed object." # noqa: E501 - ) - - return {namespace: keep_str + encoded} - - -def embed_dict_type(item: Dict, model: Any) -> Dict[str, Any]: - """Embed a dictionary item.""" - inner_dict: Dict = {} - for ns, embed_item in item.items(): - if isinstance(embed_item, list): - inner_dict[ns] = [] - for embed_list_item in embed_item: - embedded = embed_string_type(embed_list_item, model, ns) - inner_dict[ns].append(embedded[ns]) - else: - inner_dict.update(embed_string_type(embed_item, model, ns)) - return inner_dict - - -def embed_list_type( - item: list, model: Any, namespace: Optional[str] = None -) -> List[Dict[str, Union[str, List[str]]]]: - """Embed a list item.""" - - ret_list: List = [] - for embed_item in item: - if isinstance(embed_item, dict): - ret_list.append(embed_dict_type(embed_item, model)) - elif isinstance(embed_item, list): - item_embedding = embed_list_type(embed_item, model, namespace) - # Get the first key from the first dictionary - first_key = next(iter(item_embedding[0])) - # Group the values under that key - grouping = {first_key: [item[first_key] for item in item_embedding]} - ret_list.append(grouping) - else: - ret_list.append(embed_string_type(embed_item, model, namespace)) - return ret_list - - -def embed( - to_embed: Union[Union[str, _Embed], Dict, List[Union[str, _Embed]], List[Dict]], - model: Any, - namespace: Optional[str] = None, -) -> List[Dict[str, Union[str, List[str]]]]: - """ - Embed the actions or context using the SentenceTransformer model - (or a model that has an `encode` function). - - Attributes: - to_embed: (Union[Union(str, _Embed(str)), Dict, List[Union(str, _Embed(str))], List[Dict]], required) The text to be embedded, either a string, a list of strings or a dictionary or a list of dictionaries. - namespace: (str, optional) The default namespace to use when dictionary or list of dictionaries not provided. - model: (Any, required) The model to use for embedding - Returns: - List[Dict[str, str]]: A list of dictionaries where each dictionary has the namespace as the key and the embedded string as the value - """ # noqa: E501 - if (isinstance(to_embed, _Embed) and isinstance(to_embed.value, str)) or isinstance( - to_embed, str - ): - return [embed_string_type(to_embed, model, namespace)] - elif isinstance(to_embed, dict): - return [embed_dict_type(to_embed, model)] - elif isinstance(to_embed, list): - return embed_list_type(to_embed, model, namespace) - else: - raise ValueError("Invalid input format for embedding") diff --git a/libs/experimental/langchain_experimental/rl_chain/metrics.py b/libs/experimental/langchain_experimental/rl_chain/metrics.py deleted file mode 100644 index 58663a4b15451..0000000000000 --- a/libs/experimental/langchain_experimental/rl_chain/metrics.py +++ /dev/null @@ -1,70 +0,0 @@ -from collections import deque -from typing import TYPE_CHECKING, Dict, List, Union - -if TYPE_CHECKING: - import pandas as pd - - -class MetricsTrackerAverage: - """Metrics Tracker Average.""" - - def __init__(self, step: int): - self.history: List[Dict[str, Union[int, float]]] = [{"step": 0, "score": 0}] - self.step: int = step - self.i: int = 0 - self.num: float = 0 - self.denom: float = 0 - - @property - def score(self) -> float: - return self.num / self.denom if self.denom > 0 else 0 - - def on_decision(self) -> None: - self.denom += 1 - - def on_feedback(self, score: float) -> None: - self.num += score or 0 - self.i += 1 - if self.step > 0 and self.i % self.step == 0: - self.history.append({"step": self.i, "score": self.score}) - - def to_pandas(self) -> "pd.DataFrame": - import pandas as pd - - return pd.DataFrame(self.history) - - -class MetricsTrackerRollingWindow: - """Metrics Tracker Rolling Window.""" - - def __init__(self, window_size: int, step: int): - self.history: List[Dict[str, Union[int, float]]] = [{"step": 0, "score": 0}] - self.step: int = step - self.i: int = 0 - self.window_size: int = window_size - self.queue: deque = deque() - self.sum: float = 0.0 - - @property - def score(self) -> float: - return self.sum / len(self.queue) if len(self.queue) > 0 else 0 - - def on_decision(self) -> None: - pass - - def on_feedback(self, value: float) -> None: - self.sum += value - self.queue.append(value) - self.i += 1 - - if len(self.queue) > self.window_size: - old_val = self.queue.popleft() - self.sum -= old_val - - if self.step > 0 and self.i % self.step == 0: - self.history.append({"step": self.i, "score": self.sum / len(self.queue)}) - - def to_pandas(self) -> "pd.DataFrame": - import pandas as pd - - return pd.DataFrame(self.history) diff --git a/libs/experimental/langchain_experimental/rl_chain/model_repository.py b/libs/experimental/langchain_experimental/rl_chain/model_repository.py deleted file mode 100644 index ae5f33a0dcfc4..0000000000000 --- a/libs/experimental/langchain_experimental/rl_chain/model_repository.py +++ /dev/null @@ -1,65 +0,0 @@ -import datetime -import glob -import logging -import os -import shutil -from pathlib import Path -from typing import TYPE_CHECKING, List, Union - -if TYPE_CHECKING: - import vowpal_wabbit_next as vw - -logger = logging.getLogger(__name__) - - -class ModelRepository: - """Model Repository.""" - - def __init__( - self, - folder: Union[str, os.PathLike], - with_history: bool = True, - reset: bool = False, - ): - self.folder = Path(folder) - self.model_path = self.folder / "latest.vw" - self.with_history = with_history - if reset and self.has_history(): - logger.warning( - "There is non empty history which is recommended to be cleaned up" - ) - if self.model_path.exists(): - os.remove(self.model_path) - - self.folder.mkdir(parents=True, exist_ok=True) - - def get_tag(self) -> str: - return datetime.datetime.now().strftime("%Y%m%d-%H%M%S") - - def has_history(self) -> bool: - return len(glob.glob(str(self.folder / "model-????????-??????.vw"))) > 0 - - def save(self, workspace: "vw.Workspace") -> None: - with open(self.model_path, "wb") as f: - logger.info(f"storing rl_chain model in: {self.model_path}") - f.write(workspace.serialize()) - if self.with_history: # write history - shutil.copyfile(self.model_path, self.folder / f"model-{self.get_tag()}.vw") - - def load(self, commandline: List[str]) -> "vw.Workspace": - try: - import vowpal_wabbit_next as vw - except ImportError as e: - raise ImportError( - "Unable to import vowpal_wabbit_next, please install with " - "`pip install vowpal_wabbit_next`." - ) from e - - model_data = None - if self.model_path.exists(): - with open(self.model_path, "rb") as f: - model_data = f.read() - if model_data: - logger.info(f"rl_chain model is loaded from: {self.model_path}") - return vw.Workspace(commandline, model_data=model_data) - return vw.Workspace(commandline) diff --git a/libs/experimental/langchain_experimental/rl_chain/pick_best_chain.py b/libs/experimental/langchain_experimental/rl_chain/pick_best_chain.py deleted file mode 100644 index 73ccdadcb4b92..0000000000000 --- a/libs/experimental/langchain_experimental/rl_chain/pick_best_chain.py +++ /dev/null @@ -1,419 +0,0 @@ -from __future__ import annotations - -import logging -from typing import Any, Dict, List, Optional, Tuple, Type, Union - -from langchain.base_language import BaseLanguageModel -from langchain.chains.llm import LLMChain -from langchain_core.callbacks.manager import CallbackManagerForChainRun -from langchain_core.prompts import BasePromptTemplate - -import langchain_experimental.rl_chain.base as base -from langchain_experimental.rl_chain.helpers import embed - -logger = logging.getLogger(__name__) - -# sentinel object used to distinguish between -# user didn't supply anything or user explicitly supplied None -SENTINEL = object() - - -class PickBestSelected(base.Selected): - """Selected class for PickBest chain.""" - - index: Optional[int] - probability: Optional[float] - score: Optional[float] - - def __init__( - self, - index: Optional[int] = None, - probability: Optional[float] = None, - score: Optional[float] = None, - ): - self.index = index - self.probability = probability - self.score = score - - -class PickBestEvent(base.Event[PickBestSelected]): - """Event class for PickBest chain.""" - - def __init__( - self, - inputs: Dict[str, Any], - to_select_from: Dict[str, Any], - based_on: Dict[str, Any], - selected: Optional[PickBestSelected] = None, - ): - super().__init__(inputs=inputs, selected=selected) - self.to_select_from = to_select_from - self.based_on = based_on - - -class PickBestFeatureEmbedder(base.Embedder[PickBestEvent]): - """Embed the `BasedOn` and `ToSelectFrom` inputs into a format that can be used - by the learning policy. - - Attributes: - model name (Any, optional): The type of embeddings to be used for feature representation. Defaults to BERT SentenceTransformer. - """ # noqa E501 - - def __init__( - self, auto_embed: bool, model: Optional[Any] = None, *args: Any, **kwargs: Any - ): - super().__init__(*args, **kwargs) - - if model is None: - from sentence_transformers import SentenceTransformer - - model = SentenceTransformer("all-mpnet-base-v2") - - self.model = model - self.auto_embed = auto_embed - - @staticmethod - def _str(embedding: List[float]) -> str: - return " ".join([f"{i}:{e}" for i, e in enumerate(embedding)]) - - def get_label(self, event: PickBestEvent) -> tuple: - cost = None - if event.selected: - chosen_action = event.selected.index - cost = ( - -1.0 * event.selected.score - if event.selected.score is not None - else None - ) - prob = event.selected.probability - return chosen_action, cost, prob - else: - return None, None, None - - def get_context_and_action_embeddings(self, event: PickBestEvent) -> tuple: - context_emb = embed(event.based_on, self.model) if event.based_on else None - to_select_from_var_name, to_select_from = next( - iter(event.to_select_from.items()), (None, None) - ) - - action_embs = ( - ( - embed(to_select_from, self.model, to_select_from_var_name) - if event.to_select_from - else None - ) - if to_select_from - else None - ) - - if not context_emb or not action_embs: - raise ValueError( - "Context and to_select_from must be provided in the inputs dictionary" - ) - return context_emb, action_embs - - def get_indexed_dot_product(self, context_emb: List, action_embs: List) -> Dict: - import numpy as np - - unique_contexts = set() - for context_item in context_emb: - for ns, ee in context_item.items(): - if isinstance(ee, list): - for ea in ee: - unique_contexts.add(f"{ns}={ea}") - else: - unique_contexts.add(f"{ns}={ee}") - - encoded_contexts = self.model.encode(list(unique_contexts)) - context_embeddings = dict(zip(unique_contexts, encoded_contexts)) - - unique_actions = set() - for action in action_embs: - for ns, e in action.items(): - if isinstance(e, list): - for ea in e: - unique_actions.add(f"{ns}={ea}") - else: - unique_actions.add(f"{ns}={e}") - - encoded_actions = self.model.encode(list(unique_actions)) - action_embeddings = dict(zip(unique_actions, encoded_actions)) - - action_matrix = np.stack([v for k, v in action_embeddings.items()]) - context_matrix = np.stack([v for k, v in context_embeddings.items()]) - dot_product_matrix = np.dot(context_matrix, action_matrix.T) - - indexed_dot_product: Dict = {} - - for i, context_key in enumerate(context_embeddings.keys()): - indexed_dot_product[context_key] = {} - for j, action_key in enumerate(action_embeddings.keys()): - indexed_dot_product[context_key][action_key] = dot_product_matrix[i, j] - - return indexed_dot_product - - def format_auto_embed_on(self, event: PickBestEvent) -> str: - chosen_action, cost, prob = self.get_label(event) - context_emb, action_embs = self.get_context_and_action_embeddings(event) - indexed_dot_product = self.get_indexed_dot_product(context_emb, action_embs) - - action_lines = [] - for i, action in enumerate(action_embs): - line_parts = [] - dot_prods = [] - if cost is not None and chosen_action == i: - line_parts.append(f"{chosen_action}:{cost}:{prob}") - for ns, action in action.items(): - line_parts.append(f"|{ns}") - elements = action if isinstance(action, list) else [action] - nsa = [] - for elem in elements: - line_parts.append(f"{elem}") - ns_a = f"{ns}={elem}" - nsa.append(ns_a) - for k, v in indexed_dot_product.items(): - dot_prods.append(v[ns_a]) - nsa_str = " ".join(nsa) - line_parts.append(f"|# {nsa_str}") - - line_parts.append(f"|dotprod {self._str(dot_prods)}") - action_lines.append(" ".join(line_parts)) - - shared = [] - for item in context_emb: - for ns, context in item.items(): - shared.append(f"|{ns}") - elements = context if isinstance(context, list) else [context] - nsc = [] - for elem in elements: - shared.append(f"{elem}") - nsc.append(f"{ns}={elem}") - nsc_str = " ".join(nsc) - shared.append(f"|@ {nsc_str}") - - return "shared " + " ".join(shared) + "\n" + "\n".join(action_lines) - - def format_auto_embed_off(self, event: PickBestEvent) -> str: - """ - Converts the `BasedOn` and `ToSelectFrom` into a format that can be used by VW - """ - chosen_action, cost, prob = self.get_label(event) - context_emb, action_embs = self.get_context_and_action_embeddings(event) - - example_string = "" - example_string += "shared " - for context_item in context_emb: - for ns, based_on in context_item.items(): - e = " ".join(based_on) if isinstance(based_on, list) else based_on - example_string += f"|{ns} {e} " - example_string += "\n" - - for i, action in enumerate(action_embs): - if cost is not None and chosen_action == i: - example_string += f"{chosen_action}:{cost}:{prob} " - for ns, action_embedding in action.items(): - e = ( - " ".join(action_embedding) - if isinstance(action_embedding, list) - else action_embedding - ) - example_string += f"|{ns} {e} " - example_string += "\n" - # Strip the last newline - return example_string[:-1] - - def format(self, event: PickBestEvent) -> str: - if self.auto_embed: - return self.format_auto_embed_on(event) - else: - return self.format_auto_embed_off(event) - - -class PickBestRandomPolicy(base.Policy[PickBestEvent]): - """Random policy for PickBest chain.""" - - def __init__(self, feature_embedder: base.Embedder, **kwargs: Any): - self.feature_embedder = feature_embedder - - def predict(self, event: PickBestEvent) -> List[Tuple[int, float]]: - num_items = len(event.to_select_from) - return [(i, 1.0 / num_items) for i in range(num_items)] - - def learn(self, event: PickBestEvent) -> None: - pass - - def log(self, event: PickBestEvent) -> None: - pass - - -class PickBest(base.RLChain[PickBestEvent]): - """Chain that leverages the Vowpal Wabbit (VW) model for reinforcement learning - with a context, with the goal of modifying the prompt before the LLM call. - - Each invocation of the chain's `run()` method should be equipped with a set of potential actions (`ToSelectFrom`) and will result in the selection of a specific action based on the `BasedOn` input. This chosen action then informs the LLM (Language Model) prompt for the subsequent response generation. - - The standard operation flow of this Chain includes: - 1. The Chain is invoked with inputs containing the `BasedOn` criteria and a list of potential actions (`ToSelectFrom`). - 2. An action is selected based on the `BasedOn` input. - 3. The LLM is called with the dynamic prompt, producing a response. - 4. If a `selection_scorer` is provided, it is used to score the selection. - 5. The internal Vowpal Wabbit model is updated with the `BasedOn` input, the chosen `ToSelectFrom` action, and the resulting score from the scorer. - 6. The final response is returned. - - Expected input dictionary format: - - At least one variable encapsulated within `BasedOn` to serve as the selection criteria. - - A single list variable within `ToSelectFrom`, representing potential actions for the VW model. This list can take the form of: - - A list of strings, e.g., `action = ToSelectFrom(["action1", "action2", "action3"])` - - A list of list of strings e.g. `action = ToSelectFrom([["action1", "another identifier of action1"], ["action2", "another identifier of action2"]])` - - A list of dictionaries, where each dictionary represents an action with namespace names as keys and corresponding action strings as values. For instance, `action = ToSelectFrom([{"namespace1": ["action1", "another identifier of action1"], "namespace2": "action2"}, {"namespace1": "action3", "namespace2": "action4"}])`. - - Extends: - RLChain - - Attributes: - feature_embedder (PickBestFeatureEmbedder, optional): Is an advanced attribute. Responsible for embedding the `BasedOn` and `ToSelectFrom` inputs. If omitted, a default embedder is utilized. - """ # noqa E501 - - def __init__( - self, - *args: Any, - **kwargs: Any, - ): - auto_embed = kwargs.get("auto_embed", False) - - feature_embedder = kwargs.get("feature_embedder", None) - if feature_embedder: - if "auto_embed" in kwargs: - logger.warning( - "auto_embed will take no effect when explicit feature_embedder is provided" # noqa E501 - ) - # turning auto_embed off for cli setting below - auto_embed = False - else: - feature_embedder = PickBestFeatureEmbedder(auto_embed=auto_embed) - kwargs["feature_embedder"] = feature_embedder - - vw_cmd = kwargs.get("vw_cmd", []) - if vw_cmd: - if "--cb_explore_adf" not in vw_cmd: - raise ValueError( - "If vw_cmd is specified, it must include --cb_explore_adf" - ) - else: - interactions = ["--interactions=::"] - if auto_embed: - interactions = [ - "--interactions=@#", - "--ignore_linear=@", - "--ignore_linear=#", - ] - vw_cmd = interactions + [ - "--cb_explore_adf", - "--coin", - "--squarecb", - "--quiet", - ] - - kwargs["vw_cmd"] = vw_cmd - - super().__init__(*args, **kwargs) - - def _call_before_predict(self, inputs: Dict[str, Any]) -> PickBestEvent: - context, actions = base.get_based_on_and_to_select_from(inputs=inputs) - if not actions: - raise ValueError( - "No variables using 'ToSelectFrom' found in the inputs. Please include at least one variable containing a list to select from." # noqa E501 - ) - - if len(list(actions.values())) > 1: - raise ValueError( - "Only one variable using 'ToSelectFrom' can be provided in the inputs for the PickBest chain. Please provide only one variable containing a list to select from." # noqa E501 - ) - - if not context: - raise ValueError( - "No variables using 'BasedOn' found in the inputs. Please include at least one variable containing information to base the selected of ToSelectFrom on." # noqa E501 - ) - - event = PickBestEvent(inputs=inputs, to_select_from=actions, based_on=context) - return event - - def _call_after_predict_before_llm( - self, - inputs: Dict[str, Any], - event: PickBestEvent, - prediction: List[Tuple[int, float]], - ) -> Tuple[Dict[str, Any], PickBestEvent]: - import numpy as np - - prob_sum = sum(prob for _, prob in prediction) - probabilities = [prob / prob_sum for _, prob in prediction] - ## sample from the pmf - sampled_index = np.random.choice(len(prediction), p=probabilities) - sampled_ap = prediction[sampled_index] - sampled_action = sampled_ap[0] - sampled_prob = sampled_ap[1] - selected = PickBestSelected(index=sampled_action, probability=sampled_prob) - event.selected = selected - - # only one key, value pair in event.to_select_from - key, value = next(iter(event.to_select_from.items())) - next_chain_inputs = inputs.copy() - next_chain_inputs.update({key: value[event.selected.index]}) - return next_chain_inputs, event - - def _call_after_llm_before_scoring( - self, llm_response: str, event: PickBestEvent - ) -> Tuple[Dict[str, Any], PickBestEvent]: - next_chain_inputs = event.inputs.copy() - # only one key, value pair in event.to_select_from - value = next(iter(event.to_select_from.values())) - v = ( - value[event.selected.index] - if event.selected - else event.to_select_from.values() - ) - next_chain_inputs.update( - { - self.selected_based_on_input_key: str(event.based_on), - self.selected_input_key: v, - } - ) - return next_chain_inputs, event - - def _call_after_scoring_before_learning( - self, event: PickBestEvent, score: Optional[float] - ) -> PickBestEvent: - if event.selected: - event.selected.score = score - return event - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, Any]: - return super()._call(run_manager=run_manager, inputs=inputs) - - @property - def _chain_type(self) -> str: - return "rl_chain_pick_best" - - @classmethod - def from_llm( - cls: Type[PickBest], - llm: BaseLanguageModel, - prompt: BasePromptTemplate, - selection_scorer: Union[base.AutoSelectionScorer, object] = SENTINEL, - **kwargs: Any, - ) -> PickBest: - llm_chain = LLMChain(llm=llm, prompt=prompt) - if selection_scorer is SENTINEL: - selection_scorer = base.AutoSelectionScorer(llm=llm_chain.llm) # type: ignore[call-arg] - - return PickBest( - llm_chain=llm_chain, - prompt=prompt, - selection_scorer=selection_scorer, - **kwargs, - ) diff --git a/libs/experimental/langchain_experimental/rl_chain/vw_logger.py b/libs/experimental/langchain_experimental/rl_chain/vw_logger.py deleted file mode 100644 index 52685e56a3612..0000000000000 --- a/libs/experimental/langchain_experimental/rl_chain/vw_logger.py +++ /dev/null @@ -1,20 +0,0 @@ -from os import PathLike -from pathlib import Path -from typing import Optional, Union - - -class VwLogger: - """Vowpal Wabbit custom logger.""" - - def __init__(self, path: Optional[Union[str, PathLike]]): - self.path = Path(path) if path else None - if self.path: - self.path.parent.mkdir(parents=True, exist_ok=True) - - def log(self, vw_ex: str) -> None: - if self.path: - with open(self.path, "a") as f: - f.write(f"{vw_ex}\n\n") - - def logging_enabled(self) -> bool: - return bool(self.path) diff --git a/libs/experimental/langchain_experimental/smart_llm/__init__.py b/libs/experimental/langchain_experimental/smart_llm/__init__.py deleted file mode 100644 index 3e83d6c76e1e7..0000000000000 --- a/libs/experimental/langchain_experimental/smart_llm/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -"""**SmartGPT** chain is applying self-critique using the `SmartGPT` workflow. - -See details at https://youtu.be/wVzuvf9D9BU - -The workflow performs these 3 steps: -1. **Ideate**: Pass the user prompt to an `Ideation LLM` n_ideas times, - each result is an "idea" -2. **Critique**: Pass the ideas to a `Critique LLM` which looks for flaws in the ideas - & picks the best one -3. **Resolve**: Pass the critique to a `Resolver LLM` which improves upon the best idea - & outputs only the (improved version of) the best output - -In total, the SmartGPT workflow will use n_ideas+2 LLM calls - -Note that SmartLLMChain will only improve results (compared to a basic LLMChain), -when the underlying models have the capability for reflection, which smaller models -often don't. - -Finally, a SmartLLMChain assumes that each underlying LLM outputs exactly 1 result. -""" - -from langchain_experimental.smart_llm.base import SmartLLMChain - -__all__ = ["SmartLLMChain"] diff --git a/libs/experimental/langchain_experimental/smart_llm/base.py b/libs/experimental/langchain_experimental/smart_llm/base.py deleted file mode 100644 index 379e29fa7ae6e..0000000000000 --- a/libs/experimental/langchain_experimental/smart_llm/base.py +++ /dev/null @@ -1,327 +0,0 @@ -"""Chain for applying self-critique using the SmartGPT workflow.""" - -from typing import Any, Dict, List, Optional, Tuple, Type - -from langchain.base_language import BaseLanguageModel -from langchain.chains.base import Chain -from langchain.input import get_colored_text -from langchain.schema import LLMResult, PromptValue -from langchain_core.callbacks.manager import CallbackManagerForChainRun -from langchain_core.prompts.base import BasePromptTemplate -from langchain_core.prompts.chat import ( - AIMessagePromptTemplate, - BaseMessagePromptTemplate, - ChatPromptTemplate, - HumanMessagePromptTemplate, -) -from pydantic import ConfigDict, model_validator - - -class SmartLLMChain(Chain): - """Chain for applying self-critique using the SmartGPT workflow. - - See details at https://youtu.be/wVzuvf9D9BU - - A SmartLLMChain is an LLMChain that instead of simply passing the prompt to the LLM - performs these 3 steps: - 1. Ideate: Pass the user prompt to an ideation LLM n_ideas times, - each result is an "idea" - 2. Critique: Pass the ideas to a critique LLM which looks for flaws in the ideas - & picks the best one - 3. Resolve: Pass the critique to a resolver LLM which improves upon the best idea - & outputs only the (improved version of) the best output - - In total, SmartLLMChain pass will use n_ideas+2 LLM calls - - Note that SmartLLMChain will only improve results (compared to a basic LLMChain), - when the underlying models have the capability for reflection, which smaller models - often don't. - - Finally, a SmartLLMChain assumes that each underlying LLM outputs exactly 1 result. - """ - - class SmartLLMChainHistory: - question: str = "" - ideas: List[str] = [] - critique: str = "" - - @property - def n_ideas(self) -> int: - return len(self.ideas) - - def ideation_prompt_inputs(self) -> Dict[str, Any]: - return {"question": self.question} - - def critique_prompt_inputs(self) -> Dict[str, Any]: - return { - "question": self.question, - **{f"idea_{i+1}": idea for i, idea in enumerate(self.ideas)}, - } - - def resolve_prompt_inputs(self) -> Dict[str, Any]: - return { - "question": self.question, - **{f"idea_{i+1}": idea for i, idea in enumerate(self.ideas)}, - "critique": self.critique, - } - - prompt: BasePromptTemplate - """Prompt object to use.""" - output_key: str = "resolution" - ideation_llm: Optional[BaseLanguageModel] = None - """LLM to use in ideation step. If None given, 'llm' will be used.""" - critique_llm: Optional[BaseLanguageModel] = None - """LLM to use in critique step. If None given, 'llm' will be used.""" - resolver_llm: Optional[BaseLanguageModel] = None - """LLM to use in resolve step. If None given, 'llm' will be used.""" - llm: Optional[BaseLanguageModel] = None - """LLM to use for each steps, if no specific llm for that step is given. """ - n_ideas: int = 3 - """Number of ideas to generate in idea step""" - return_intermediate_steps: bool = False - """Whether to return ideas and critique, in addition to resolution.""" - history: SmartLLMChainHistory = SmartLLMChainHistory() - - model_config = ConfigDict( - extra="forbid", - ) - - @model_validator(mode="before") - @classmethod - def validate_inputs(cls, values: Dict[str, Any]) -> Any: - """Ensure we have an LLM for each step.""" - llm = values.get("llm") - ideation_llm = values.get("ideation_llm") - critique_llm = values.get("critique_llm") - resolver_llm = values.get("resolver_llm") - - if not llm and not ideation_llm: - raise ValueError( - "Either ideation_llm or llm needs to be given. Pass llm, " - "if you want to use the same llm for all steps, or pass " - "ideation_llm, critique_llm and resolver_llm if you want " - "to use different llms for each step." - ) - if not llm and not critique_llm: - raise ValueError( - "Either critique_llm or llm needs to be given. Pass llm, " - "if you want to use the same llm for all steps, or pass " - "ideation_llm, critique_llm and resolver_llm if you want " - "to use different llms for each step." - ) - if not llm and not resolver_llm: - raise ValueError( - "Either resolve_llm or llm needs to be given. Pass llm, " - "if you want to use the same llm for all steps, or pass " - "ideation_llm, critique_llm and resolver_llm if you want " - "to use different llms for each step." - ) - if llm and ideation_llm and critique_llm and resolver_llm: - raise ValueError( - "LLMs are given for each step (ideation_llm, critique_llm," - " resolver_llm), but backup LLM (llm) is also given, which" - " would not be used." - ) - return values - - @property - def input_keys(self) -> List[str]: - """Defines the input keys.""" - return self.prompt.input_variables - - @property - def output_keys(self) -> List[str]: - """Defines the output keys.""" - if self.return_intermediate_steps: - return ["ideas", "critique", self.output_key] - return [self.output_key] - - def prep_prompts( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Tuple[PromptValue, Optional[List[str]]]: - """Prepare prompts from inputs.""" - stop = None - if "stop" in inputs: - stop = inputs["stop"] - selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} - prompt = self.prompt.format_prompt(**selected_inputs) - _colored_text = get_colored_text(prompt.to_string(), "green") - _text = "Prompt after formatting:\n" + _colored_text - if run_manager: - run_manager.on_text(_text, end="\n", verbose=self.verbose) - if "stop" in inputs and inputs["stop"] != stop: - raise ValueError( - "If `stop` is present in any inputs, should be present in all." - ) - return prompt, stop - - def _call( - self, - input_list: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, Any]: - prompt, stop = self.prep_prompts(input_list, run_manager=run_manager) - self.history.question = prompt.to_string() - ideas = self._ideate(stop, run_manager) - self.history.ideas = ideas - critique = self._critique(stop, run_manager) - self.history.critique = critique - resolution = self._resolve(stop, run_manager) - if self.return_intermediate_steps: - return {"ideas": ideas, "critique": critique, self.output_key: resolution} - return {self.output_key: resolution} - - def _get_text_from_llm_result(self, result: LLMResult, step: str) -> str: - """Between steps, only the LLM result text is passed, not the LLMResult object. - This function extracts the text from an LLMResult.""" - if len(result.generations) != 1: - raise ValueError( - f"In SmartLLM the LLM result in step {step} is not " - "exactly 1 element. This should never happen" - ) - if len(result.generations[0]) != 1: - raise ValueError( - f"In SmartLLM the LLM in step {step} returned more than " - "1 output. SmartLLM only works with LLMs returning " - "exactly 1 output." - ) - return result.generations[0][0].text - - def get_prompt_strings( - self, stage: str - ) -> List[Tuple[Type[BaseMessagePromptTemplate], str]]: - role_strings: List[Tuple[Type[BaseMessagePromptTemplate], str]] = [] - role_strings.append( - ( - HumanMessagePromptTemplate, - "Question: {question}\nAnswer: Let's work this out in a step by " - "step way to be sure we have the right answer:", - ) - ) - if stage == "ideation": - return role_strings - role_strings.extend( - [ - *[ - ( - AIMessagePromptTemplate, - "Idea " + str(i + 1) + ": {idea_" + str(i + 1) + "}", - ) - for i in range(self.n_ideas) - ], - ( - HumanMessagePromptTemplate, - "You are a researcher tasked with investigating the " - f"{self.n_ideas} response options provided. List the flaws and " - "faulty logic of each answer option. Let's work this out in a step" - " by step way to be sure we have all the errors:", - ), - ] - ) - if stage == "critique": - return role_strings - role_strings.extend( - [ - (AIMessagePromptTemplate, "Critique: {critique}"), - ( - HumanMessagePromptTemplate, - "You are a resolver tasked with 1) finding which of " - f"the {self.n_ideas} answer options the researcher thought was " - "best, 2) improving that answer and 3) printing the answer in " - "full. Don't output anything for step 1 or 2, only the full " - "answer in 3. Let's work this out in a step by step way to " - "be sure we have the right answer:", - ), - ] - ) - if stage == "resolve": - return role_strings - raise ValueError( - "stage should be either 'ideation', 'critique' or 'resolve'," - f" but it is '{stage}'. This should never happen." - ) - - def ideation_prompt(self) -> ChatPromptTemplate: - return ChatPromptTemplate.from_strings(self.get_prompt_strings("ideation")) - - def critique_prompt(self) -> ChatPromptTemplate: - return ChatPromptTemplate.from_strings(self.get_prompt_strings("critique")) - - def resolve_prompt(self) -> ChatPromptTemplate: - return ChatPromptTemplate.from_strings(self.get_prompt_strings("resolve")) - - def _ideate( - self, - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> List[str]: - """Generate n_ideas ideas as response to user prompt.""" - llm = self.ideation_llm if self.ideation_llm else self.llm - prompt = self.ideation_prompt().format_prompt( - **self.history.ideation_prompt_inputs() - ) - callbacks = run_manager.get_child() if run_manager else None - if llm: - ideas = [ - self._get_text_from_llm_result( - llm.generate_prompt([prompt], stop, callbacks), - step="ideate", - ) - for _ in range(self.n_ideas) - ] - for i, idea in enumerate(ideas): - _colored_text = get_colored_text(idea, "blue") - _text = f"Idea {i+1}:\n" + _colored_text - if run_manager: - run_manager.on_text(_text, end="\n", verbose=self.verbose) - return ideas - else: - raise ValueError("llm is none, which should never happen") - - def _critique( - self, - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> str: - """Critique each of the ideas from ideation stage & select best one.""" - llm = self.critique_llm if self.critique_llm else self.llm - prompt = self.critique_prompt().format_prompt( - **self.history.critique_prompt_inputs() - ) - callbacks = run_manager.handlers if run_manager else None - if llm: - critique = self._get_text_from_llm_result( - llm.generate_prompt([prompt], stop, callbacks), step="critique" - ) - _colored_text = get_colored_text(critique, "yellow") - _text = "Critique:\n" + _colored_text - if run_manager: - run_manager.on_text(_text, end="\n", verbose=self.verbose) - return critique - else: - raise ValueError("llm is none, which should never happen") - - def _resolve( - self, - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> str: - """Improve upon the best idea as chosen in critique step & return it.""" - llm = self.resolver_llm if self.resolver_llm else self.llm - prompt = self.resolve_prompt().format_prompt( - **self.history.resolve_prompt_inputs() - ) - callbacks = run_manager.handlers if run_manager else None - if llm: - resolution = self._get_text_from_llm_result( - llm.generate_prompt([prompt], stop, callbacks), step="resolve" - ) - _colored_text = get_colored_text(resolution, "green") - _text = "Resolution:\n" + _colored_text - if run_manager: - run_manager.on_text(_text, end="\n", verbose=self.verbose) - return resolution - else: - raise ValueError("llm is none, which should never happen") diff --git a/libs/experimental/langchain_experimental/sql/__init__.py b/libs/experimental/langchain_experimental/sql/__init__.py deleted file mode 100644 index d04f46fb7ff2d..0000000000000 --- a/libs/experimental/langchain_experimental/sql/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""**SQL Chain** interacts with `SQL` Database.""" - -from langchain_experimental.sql.base import SQLDatabaseChain, SQLDatabaseSequentialChain - -__all__ = ["SQLDatabaseChain", "SQLDatabaseSequentialChain"] diff --git a/libs/experimental/langchain_experimental/sql/base.py b/libs/experimental/langchain_experimental/sql/base.py deleted file mode 100644 index 91a05c6477075..0000000000000 --- a/libs/experimental/langchain_experimental/sql/base.py +++ /dev/null @@ -1,318 +0,0 @@ -"""Chain for interacting with SQL Database.""" - -from __future__ import annotations - -import warnings -from typing import Any, Dict, List, Optional - -from langchain.chains.base import Chain -from langchain.chains.llm import LLMChain -from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS -from langchain.schema import BasePromptTemplate -from langchain_community.tools.sql_database.prompt import QUERY_CHECKER -from langchain_community.utilities.sql_database import SQLDatabase -from langchain_core.callbacks.manager import CallbackManagerForChainRun -from langchain_core.language_models import BaseLanguageModel -from langchain_core.prompts.prompt import PromptTemplate -from pydantic import ConfigDict, Field, model_validator - -INTERMEDIATE_STEPS_KEY = "intermediate_steps" -SQL_QUERY = "SQLQuery:" -SQL_RESULT = "SQLResult:" - - -class SQLDatabaseChain(Chain): - """Chain for interacting with SQL Database. - - Example: - .. code-block:: python - - from langchain_experimental.sql import SQLDatabaseChain - from langchain_community.llms import OpenAI, SQLDatabase - db = SQLDatabase(...) - db_chain = SQLDatabaseChain.from_llm(OpenAI(), db) - - *Security note*: Make sure that the database connection uses credentials - that are narrowly-scoped to only include the permissions this chain needs. - Failure to do so may result in data corruption or loss, since this chain may - attempt commands like `DROP TABLE` or `INSERT` if appropriately prompted. - The best way to guard against such negative outcomes is to (as appropriate) - limit the permissions granted to the credentials used with this chain. - This issue shows an example negative outcome if these steps are not taken: - https://github.com/langchain-ai/langchain/issues/5923 - """ - - llm_chain: LLMChain - llm: Optional[BaseLanguageModel] = None - """[Deprecated] LLM wrapper to use.""" - database: SQLDatabase = Field(exclude=True) - """SQL Database to connect to.""" - prompt: Optional[BasePromptTemplate] = None - """[Deprecated] Prompt to use to translate natural language to SQL.""" - top_k: int = 5 - """Number of results to return from the query""" - input_key: str = "query" #: :meta private: - output_key: str = "result" #: :meta private: - return_sql: bool = False - """Will return sql-command directly without executing it""" - return_intermediate_steps: bool = False - """Whether or not to return the intermediate steps along with the final answer.""" - return_direct: bool = False - """Whether or not to return the result of querying the SQL table directly.""" - use_query_checker: bool = False - """Whether or not the query checker tool should be used to attempt - to fix the initial SQL from the LLM.""" - query_checker_prompt: Optional[BasePromptTemplate] = None - """The prompt template that should be used by the query checker""" - - model_config = ConfigDict( - arbitrary_types_allowed=True, - extra="forbid", - ) - - @model_validator(mode="before") - @classmethod - def raise_deprecation(cls, values: Dict) -> Any: - if "llm" in values: - warnings.warn( - "Directly instantiating an SQLDatabaseChain with an llm is deprecated. " - "Please instantiate with llm_chain argument or using the from_llm " - "class method." - ) - if "llm_chain" not in values and values["llm"] is not None: - database = values["database"] - prompt = values.get("prompt") or SQL_PROMPTS.get( - database.dialect, PROMPT - ) - values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt) - return values - - @property - def input_keys(self) -> List[str]: - """Return the singular input key. - - :meta private: - """ - return [self.input_key] - - @property - def output_keys(self) -> List[str]: - """Return the singular output key. - - :meta private: - """ - if not self.return_intermediate_steps: - return [self.output_key] - else: - return [self.output_key, INTERMEDIATE_STEPS_KEY] - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, Any]: - _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() - input_text = f"{inputs[self.input_key]}\n{SQL_QUERY}" - _run_manager.on_text(input_text, verbose=self.verbose) - # If not present, then defaults to None which is all tables. - table_names_to_use = inputs.get("table_names_to_use") - table_info = self.database.get_table_info(table_names=table_names_to_use) - llm_inputs = { - "input": input_text, - "top_k": str(self.top_k), - "dialect": self.database.dialect, - "table_info": table_info, - "stop": ["\nSQLResult:"], - } - if self.memory is not None: - for k in self.memory.memory_variables: - llm_inputs[k] = inputs[k] - intermediate_steps: List = [] - try: - intermediate_steps.append(llm_inputs.copy()) # input: sql generation - sql_cmd = self.llm_chain.predict( - callbacks=_run_manager.get_child(), - **llm_inputs, - ).strip() - if self.return_sql: - return {self.output_key: sql_cmd} - if not self.use_query_checker: - _run_manager.on_text(sql_cmd, color="green", verbose=self.verbose) - intermediate_steps.append( - sql_cmd - ) # output: sql generation (no checker) - intermediate_steps.append({"sql_cmd": sql_cmd}) # input: sql exec - if SQL_QUERY in sql_cmd: - sql_cmd = sql_cmd.split(SQL_QUERY)[1].strip() - if SQL_RESULT in sql_cmd: - sql_cmd = sql_cmd.split(SQL_RESULT)[0].strip() - result = self.database.run(sql_cmd) - intermediate_steps.append(str(result)) # output: sql exec - else: - query_checker_prompt = self.query_checker_prompt or PromptTemplate( - template=QUERY_CHECKER, input_variables=["query", "dialect"] - ) - query_checker_chain = LLMChain( - llm=self.llm_chain.llm, prompt=query_checker_prompt - ) - query_checker_inputs = { - "query": sql_cmd, - "dialect": self.database.dialect, - } - checked_sql_command: str = query_checker_chain.predict( - callbacks=_run_manager.get_child(), **query_checker_inputs - ).strip() - intermediate_steps.append( - checked_sql_command - ) # output: sql generation (checker) - _run_manager.on_text( - checked_sql_command, color="green", verbose=self.verbose - ) - intermediate_steps.append( - {"sql_cmd": checked_sql_command} - ) # input: sql exec - result = self.database.run(checked_sql_command) - intermediate_steps.append(str(result)) # output: sql exec - sql_cmd = checked_sql_command - - _run_manager.on_text("\nSQLResult: ", verbose=self.verbose) - _run_manager.on_text(str(result), color="yellow", verbose=self.verbose) - # If return direct, we just set the final result equal to - # the result of the sql query result, otherwise try to get a human readable - # final answer - if self.return_direct: - final_result = result - else: - _run_manager.on_text("\nAnswer:", verbose=self.verbose) - input_text += f"{sql_cmd}\nSQLResult: {result}\nAnswer:" - llm_inputs["input"] = input_text - intermediate_steps.append(llm_inputs.copy()) # input: final answer - final_result = self.llm_chain.predict( - callbacks=_run_manager.get_child(), - **llm_inputs, - ).strip() - intermediate_steps.append(final_result) # output: final answer - _run_manager.on_text(final_result, color="green", verbose=self.verbose) - chain_result: Dict[str, Any] = {self.output_key: final_result} - if self.return_intermediate_steps: - chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps - return chain_result - except Exception as exc: - # Append intermediate steps to exception, to aid in logging and later - # improvement of few shot prompt seeds - exc.intermediate_steps = intermediate_steps # type: ignore - raise exc - - @property - def _chain_type(self) -> str: - return "sql_database_chain" - - @classmethod - def from_llm( - cls, - llm: BaseLanguageModel, - db: SQLDatabase, - prompt: Optional[BasePromptTemplate] = None, - **kwargs: Any, - ) -> SQLDatabaseChain: - """Create a SQLDatabaseChain from an LLM and a database connection. - - *Security note*: Make sure that the database connection uses credentials - that are narrowly-scoped to only include the permissions this chain needs. - Failure to do so may result in data corruption or loss, since this chain may - attempt commands like `DROP TABLE` or `INSERT` if appropriately prompted. - The best way to guard against such negative outcomes is to (as appropriate) - limit the permissions granted to the credentials used with this chain. - This issue shows an example negative outcome if these steps are not taken: - https://github.com/langchain-ai/langchain/issues/5923 - """ - prompt = prompt or SQL_PROMPTS.get(db.dialect, PROMPT) - llm_chain = LLMChain(llm=llm, prompt=prompt) - return cls(llm_chain=llm_chain, database=db, **kwargs) - - -class SQLDatabaseSequentialChain(Chain): - """Chain for querying SQL database that is a sequential chain. - - The chain is as follows: - 1. Based on the query, determine which tables to use. - 2. Based on those tables, call the normal SQL database chain. - - This is useful in cases where the number of tables in the database is large. - """ - - decider_chain: LLMChain - sql_chain: SQLDatabaseChain - input_key: str = "query" #: :meta private: - output_key: str = "result" #: :meta private: - return_intermediate_steps: bool = False - - @classmethod - def from_llm( - cls, - llm: BaseLanguageModel, - db: SQLDatabase, - query_prompt: BasePromptTemplate = PROMPT, - decider_prompt: BasePromptTemplate = DECIDER_PROMPT, - **kwargs: Any, - ) -> SQLDatabaseSequentialChain: - """Load the necessary chains.""" - sql_chain = SQLDatabaseChain.from_llm(llm, db, prompt=query_prompt, **kwargs) - decider_chain = LLMChain( - llm=llm, prompt=decider_prompt, output_key="table_names" - ) - return cls(sql_chain=sql_chain, decider_chain=decider_chain, **kwargs) - - @property - def input_keys(self) -> List[str]: - """Return the singular input key. - - :meta private: - """ - return [self.input_key] - - @property - def output_keys(self) -> List[str]: - """Return the singular output key. - - :meta private: - """ - if not self.return_intermediate_steps: - return [self.output_key] - else: - return [self.output_key, INTERMEDIATE_STEPS_KEY] - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, Any]: - _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() - _table_names = self.sql_chain.database.get_usable_table_names() - table_names = ", ".join(_table_names) - llm_inputs = { - "query": inputs[self.input_key], - "table_names": table_names, - } - _lowercased_table_names = [name.lower() for name in _table_names] - table_names_from_chain = self.decider_chain.predict_and_parse(**llm_inputs) - table_names_to_use = [ - name - for name in table_names_from_chain - if name.lower() in _lowercased_table_names - ] - _run_manager.on_text("Table names to use:", end="\n", verbose=self.verbose) - _run_manager.on_text( - str(table_names_to_use), color="yellow", verbose=self.verbose - ) - new_inputs = { - self.sql_chain.input_key: inputs[self.input_key], - "table_names_to_use": table_names_to_use, - } - return self.sql_chain( - new_inputs, callbacks=_run_manager.get_child(), return_only_outputs=True - ) - - @property - def _chain_type(self) -> str: - return "sql_database_sequential_chain" diff --git a/libs/experimental/langchain_experimental/sql/prompt.py b/libs/experimental/langchain_experimental/sql/prompt.py deleted file mode 100644 index 0420507d66c03..0000000000000 --- a/libs/experimental/langchain_experimental/sql/prompt.py +++ /dev/null @@ -1,85 +0,0 @@ -# flake8: noqa -from langchain_core.prompts.prompt import PromptTemplate - - -PROMPT_SUFFIX = """Only use the following tables: -{table_info} - -Question: {input}""" - -_VECTOR_SQL_DEFAULT_TEMPLATE = """You are a {dialect} expert. Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer to the input question. -{dialect} queries has a vector distance function called `DISTANCE(column, array)` to compute relevance to the user's question and sort the feature array column by the relevance. -When the query is asking for {top_k} closest row, you have to use this distance function to calculate distance to entity's array on vector column and order by the distance to retrieve relevant rows. - -*NOTICE*: `DISTANCE(column, array)` only accept an array column as its first argument and a `NeuralArray(entity)` as its second argument. You also need a user defined function called `NeuralArray(entity)` to retrieve the entity's array. - -Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per {dialect}. You should only order according to the distance function. -Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers. -Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. -Pay attention to use today() function to get the current date, if the question involves "today". `ORDER BY` clause should always be after `WHERE` clause. DO NOT add semicolon to the end of SQL. Pay attention to the comment in table schema. - -Use the following format: - -Question: "Question here" -SQLQuery: "SQL Query to run" -SQLResult: "Result of the SQLQuery" -Answer: "Final answer here" -""" - -VECTOR_SQL_PROMPT = PromptTemplate( - input_variables=["input", "table_info", "dialect", "top_k"], - template=_VECTOR_SQL_DEFAULT_TEMPLATE + PROMPT_SUFFIX, -) - - -_myscale_prompt = """You are a MyScale expert. Given an input question, first create a syntactically correct MyScale query to run, then look at the results of the query and return the answer to the input question. -MyScale queries has a vector distance function called `DISTANCE(column, array)` to compute relevance to the user's question and sort the feature array column by the relevance. -When the query is asking for {top_k} closest row, you have to use this distance function to calculate distance to entity's array on vector column and order by the distance to retrieve relevant rows. - -*NOTICE*: `DISTANCE(column, array)` only accept an array column as its first argument and a `NeuralArray(entity)` as its second argument. You also need a user defined function called `NeuralArray(entity)` to retrieve the entity's array. - -Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per MyScale. You should only order according to the distance function. -Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers. -Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. -Pay attention to use today() function to get the current date, if the question involves "today". `ORDER BY` clause should always be after `WHERE` clause. DO NOT add semicolon to the end of SQL. Pay attention to the comment in table schema. - -Use the following format: - -======== table info ======== - - -Question: "Question here" -SQLQuery: "SQL Query to run" - - -Here are some examples: - -======== table info ======== -CREATE TABLE "ChatPaper" ( - abstract String, - id String, - vector Array(Float32), -) ENGINE = ReplicatedReplacingMergeTree() - ORDER BY id - PRIMARY KEY id - -Question: What is Feature Pyramid Network? -SQLQuery: SELECT ChatPaper.title, ChatPaper.id, ChatPaper.authors FROM ChatPaper ORDER BY DISTANCE(vector, NeuralArray(PaperRank contribution)) LIMIT {top_k} - - -Let's begin: -======== table info ======== -{table_info} - -Question: {input} -SQLQuery: """ - -MYSCALE_PROMPT = PromptTemplate( - input_variables=["input", "table_info", "top_k"], - template=_myscale_prompt + PROMPT_SUFFIX, -) - - -VECTOR_SQL_PROMPTS = { - "myscale": MYSCALE_PROMPT, -} diff --git a/libs/experimental/langchain_experimental/sql/vector_sql.py b/libs/experimental/langchain_experimental/sql/vector_sql.py deleted file mode 100644 index bea736b667dbe..0000000000000 --- a/libs/experimental/langchain_experimental/sql/vector_sql.py +++ /dev/null @@ -1,232 +0,0 @@ -"""Vector SQL Database Chain Retriever""" - -from __future__ import annotations - -from typing import Any, Dict, List, Optional, Sequence, Union - -from langchain.chains.llm import LLMChain -from langchain.chains.sql_database.prompt import PROMPT, SQL_PROMPTS -from langchain_community.tools.sql_database.prompt import QUERY_CHECKER -from langchain_community.utilities.sql_database import SQLDatabase -from langchain_core.callbacks.manager import CallbackManagerForChainRun -from langchain_core.embeddings import Embeddings -from langchain_core.language_models import BaseLanguageModel -from langchain_core.output_parsers import BaseOutputParser -from langchain_core.prompts import BasePromptTemplate -from langchain_core.prompts.prompt import PromptTemplate -from pydantic import ConfigDict - -from langchain_experimental.sql.base import INTERMEDIATE_STEPS_KEY, SQLDatabaseChain - - -class VectorSQLOutputParser(BaseOutputParser[str]): - """Output Parser for Vector SQL. - - 1. finds for `NeuralArray()` and replace it with the embedding - 2. finds for `DISTANCE()` and replace it with the distance name in backend SQL - """ - - model: Embeddings - """Embedding model to extract embedding for entity""" - distance_func_name: str = "distance" - """Distance name for Vector SQL""" - - model_config = ConfigDict( - arbitrary_types_allowed=True, - ) - - @property - def _type(self) -> str: - return "vector_sql_parser" - - @classmethod - def from_embeddings( - cls, model: Embeddings, distance_func_name: str = "distance", **kwargs: Any - ) -> VectorSQLOutputParser: - return cls(model=model, distance_func_name=distance_func_name, **kwargs) - - def parse(self, text: str) -> str: - text = text.strip() - start = text.find("NeuralArray(") - _sql_str_compl = text - if start > 0: - _matched = text[text.find("NeuralArray(") + len("NeuralArray(") :] - end = _matched.find(")") + start + len("NeuralArray(") + 1 - entity = _matched[: _matched.find(")")] - vecs = self.model.embed_query(entity) - vecs_str = "[" + ",".join(map(str, vecs)) + "]" - _sql_str_compl = text.replace("DISTANCE", self.distance_func_name).replace( - text[start:end], vecs_str - ) - if _sql_str_compl[-1] == ";": - _sql_str_compl = _sql_str_compl[:-1] - return _sql_str_compl - - -class VectorSQLRetrieveAllOutputParser(VectorSQLOutputParser): - """Parser based on VectorSQLOutputParser. - It also modifies the SQL to get all columns. - """ - - @property - def _type(self) -> str: - return "vector_sql_retrieve_all_parser" - - def parse(self, text: str) -> str: - text = text.strip() - start = text.upper().find("SELECT") - if start >= 0: - end = text.upper().find("FROM") - text = text.replace(text[start + len("SELECT") + 1 : end - 1], "*") - return super().parse(text) - - -def get_result_from_sqldb(db: SQLDatabase, cmd: str) -> Sequence[Dict[str, Any]]: - """Get result from SQL Database.""" - - result = db._execute(cmd, fetch="all") - assert isinstance(result, Sequence) - return result - - -class VectorSQLDatabaseChain(SQLDatabaseChain): - """Chain for interacting with Vector SQL Database. - - Example: - .. code-block:: python - - from langchain_experimental.sql import SQLDatabaseChain - from langchain_community.llms import OpenAI, SQLDatabase, OpenAIEmbeddings - db = SQLDatabase(...) - db_chain = VectorSQLDatabaseChain.from_llm(OpenAI(), db, OpenAIEmbeddings()) - - *Security note*: Make sure that the database connection uses credentials - that are narrowly-scoped to only include the permissions this chain needs. - Failure to do so may result in data corruption or loss, since this chain may - attempt commands like `DROP TABLE` or `INSERT` if appropriately prompted. - The best way to guard against such negative outcomes is to (as appropriate) - limit the permissions granted to the credentials used with this chain. - This issue shows an example negative outcome if these steps are not taken: - https://github.com/langchain-ai/langchain/issues/5923 - """ - - sql_cmd_parser: VectorSQLOutputParser - """Parser for Vector SQL""" - native_format: bool = False - """If return_direct, controls whether to return in python native format""" - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, Any]: - _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() - input_text = f"{inputs[self.input_key]}\nSQLQuery:" - _run_manager.on_text(input_text, verbose=self.verbose) - # If not present, then defaults to None which is all tables. - table_names_to_use = inputs.get("table_names_to_use") - table_info = self.database.get_table_info(table_names=table_names_to_use) - llm_inputs = { - "input": input_text, - "top_k": str(self.top_k), - "dialect": self.database.dialect, - "table_info": table_info, - "stop": ["\nSQLResult:"], - } - intermediate_steps: List = [] - try: - intermediate_steps.append(llm_inputs) # input: sql generation - llm_out = self.llm_chain.predict( - callbacks=_run_manager.get_child(), - **llm_inputs, - ) - sql_cmd = self.sql_cmd_parser.parse(llm_out) - if self.return_sql: - return {self.output_key: sql_cmd} - if not self.use_query_checker: - _run_manager.on_text(llm_out, color="green", verbose=self.verbose) - intermediate_steps.append( - llm_out - ) # output: sql generation (no checker) - intermediate_steps.append({"sql_cmd": llm_out}) # input: sql exec - result = get_result_from_sqldb(self.database, sql_cmd) - intermediate_steps.append(str(result)) # output: sql exec - else: - query_checker_prompt = self.query_checker_prompt or PromptTemplate( - template=QUERY_CHECKER, input_variables=["query", "dialect"] - ) - query_checker_chain = LLMChain( - llm=self.llm_chain.llm, - prompt=query_checker_prompt, - output_parser=self.llm_chain.output_parser, - ) - query_checker_inputs = { - "query": llm_out, - "dialect": self.database.dialect, - } - checked_llm_out = query_checker_chain.predict( - callbacks=_run_manager.get_child(), **query_checker_inputs - ) - checked_sql_command = self.sql_cmd_parser.parse(checked_llm_out) - intermediate_steps.append( - checked_llm_out - ) # output: sql generation (checker) - _run_manager.on_text( - checked_llm_out, color="green", verbose=self.verbose - ) - intermediate_steps.append( - {"sql_cmd": checked_llm_out} - ) # input: sql exec - result = get_result_from_sqldb(self.database, checked_sql_command) - intermediate_steps.append(str(result)) # output: sql exec - llm_out = checked_llm_out - sql_cmd = checked_sql_command - - _run_manager.on_text("\nSQLResult: ", verbose=self.verbose) - _run_manager.on_text(str(result), color="yellow", verbose=self.verbose) - # If return direct, we just set the final result equal to - # the result of the sql query result (`Sequence[Dict[str, Any]]`), - # otherwise try to get a human readable final answer (`str`). - final_result: Union[str, Sequence[Dict[str, Any]]] - if self.return_direct: - final_result = result - else: - _run_manager.on_text("\nAnswer:", verbose=self.verbose) - input_text += f"{llm_out}\nSQLResult: {result}\nAnswer:" - llm_inputs["input"] = input_text - intermediate_steps.append(llm_inputs) # input: final answer - final_result = self.llm_chain.predict( - callbacks=_run_manager.get_child(), - **llm_inputs, - ).strip() - intermediate_steps.append(final_result) # output: final answer - _run_manager.on_text(final_result, color="green", verbose=self.verbose) - chain_result: Dict[str, Any] = {self.output_key: final_result} - if self.return_intermediate_steps: - chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps - return chain_result - except Exception as exc: - # Append intermediate steps to exception, to aid in logging and later - # improvement of few shot prompt seeds - exc.intermediate_steps = intermediate_steps # type: ignore - raise exc - - @property - def _chain_type(self) -> str: - return "vector_sql_database_chain" - - @classmethod - def from_llm( - cls, - llm: BaseLanguageModel, - db: SQLDatabase, - prompt: Optional[BasePromptTemplate] = None, - sql_cmd_parser: Optional[VectorSQLOutputParser] = None, - **kwargs: Any, - ) -> VectorSQLDatabaseChain: - assert sql_cmd_parser, "`sql_cmd_parser` must be set in VectorSQLDatabaseChain." - prompt = prompt or SQL_PROMPTS.get(db.dialect, PROMPT) - llm_chain = LLMChain(llm=llm, prompt=prompt) - return cls( - llm_chain=llm_chain, database=db, sql_cmd_parser=sql_cmd_parser, **kwargs - ) diff --git a/libs/experimental/langchain_experimental/synthetic_data/__init__.py b/libs/experimental/langchain_experimental/synthetic_data/__init__.py deleted file mode 100644 index d611c76ef84a8..0000000000000 --- a/libs/experimental/langchain_experimental/synthetic_data/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Generate **synthetic data** using LLM and few-shot template.""" - -from typing import Any, Dict, List, Optional - -from langchain.chains.base import Chain -from langchain.chains.llm import LLMChain -from langchain_core.language_models import BaseLanguageModel -from langchain_core.prompts import PromptTemplate - -from langchain_experimental.synthetic_data.prompts import SENTENCE_PROMPT - - -def create_data_generation_chain( - llm: BaseLanguageModel, - prompt: Optional[PromptTemplate] = None, -) -> Chain: - """Create a chain that generates synthetic sentences with - provided fields. - - Args: - llm: The language model to use. - prompt: Prompt to feed the language model with. - If not provided, the default one will be used. - """ - prompt = prompt or SENTENCE_PROMPT - return LLMChain( - llm=llm, - prompt=prompt, - ) - - -class DatasetGenerator: - """Generate synthetic dataset with a given language model.""" - - def __init__( - self, - llm: BaseLanguageModel, - sentence_preferences: Optional[Dict[str, Any]] = None, - ): - self.generator = create_data_generation_chain(llm) - self.sentence_preferences = sentence_preferences or {} - - def __call__(self, fields_collection: List[List[Any]]) -> List[Dict[str, Any]]: - results: List[Dict[str, Any]] = [] - for fields in fields_collection: - results.append( - self.generator( - {"fields": fields, "preferences": self.sentence_preferences} - ) - ) - return results diff --git a/libs/experimental/langchain_experimental/synthetic_data/prompts.py b/libs/experimental/langchain_experimental/synthetic_data/prompts.py deleted file mode 100644 index 51bc373630a76..0000000000000 --- a/libs/experimental/langchain_experimental/synthetic_data/prompts.py +++ /dev/null @@ -1,15 +0,0 @@ -from langchain_core.prompts.prompt import PromptTemplate - -sentence_template = """Given the following fields, create a sentence about them. -Make the sentence detailed and interesting. Use every given field. -If any additional preferences are given, use them during sentence construction as well. -Fields: -{fields} -Preferences: -{preferences} -Sentence: -""" - -SENTENCE_PROMPT = PromptTemplate( - template=sentence_template, input_variables=["fields", "preferences"] -) diff --git a/libs/experimental/langchain_experimental/tabular_synthetic_data/__init__.py b/libs/experimental/langchain_experimental/tabular_synthetic_data/__init__.py deleted file mode 100644 index d26ccf38f8ebd..0000000000000 --- a/libs/experimental/langchain_experimental/tabular_synthetic_data/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Generate **tabular synthetic data** using LLM and few-shot template.""" diff --git a/libs/experimental/langchain_experimental/tabular_synthetic_data/base.py b/libs/experimental/langchain_experimental/tabular_synthetic_data/base.py deleted file mode 100644 index a35f59fe62d82..0000000000000 --- a/libs/experimental/langchain_experimental/tabular_synthetic_data/base.py +++ /dev/null @@ -1,140 +0,0 @@ -import asyncio -from typing import Any, Dict, List, Optional, Union, cast - -from langchain.chains.base import Chain -from langchain.chains.llm import LLMChain -from langchain_core.language_models import BaseLanguageModel -from langchain_core.prompts.few_shot import FewShotPromptTemplate -from langchain_core.utils.pydantic import is_basemodel_instance -from pydantic import BaseModel, ConfigDict, model_validator -from typing_extensions import Self - - -class SyntheticDataGenerator(BaseModel): - """Generate synthetic data using the given LLM and few-shot template. - - Utilizes the provided LLM to produce synthetic data based on the - few-shot prompt template. - - Attributes: - template (FewShotPromptTemplate): Template for few-shot prompting. - llm (Optional[BaseLanguageModel]): Large Language Model to use for generation. - llm_chain (Optional[Chain]): LLM chain with the LLM and few-shot template. - example_input_key (str): Key to use for storing example inputs. - - Usage Example: - >>> template = FewShotPromptTemplate(...) - >>> llm = BaseLanguageModel(...) - >>> generator = SyntheticDataGenerator(template=template, llm=llm) - >>> results = generator.generate(subject="climate change", runs=5) - """ - - template: FewShotPromptTemplate - llm: Optional[BaseLanguageModel] = None - results: list = [] - llm_chain: Optional[Chain] = None - example_input_key: str = "example" - - model_config = ConfigDict( - validate_assignment=True, - ) - - @model_validator(mode="after") - def set_llm_chain(self) -> Self: - llm_chain = self.llm_chain - llm = self.llm - few_shot_template = self.template - - if not llm_chain: # If llm_chain is None or not present - if llm is None or few_shot_template is None: - raise ValueError( - "Both llm and few_shot_template must be provided if llm_chain is " - "not given." - ) - self.llm_chain = LLMChain(llm=llm, prompt=few_shot_template) - - return self - - @staticmethod - def _format_dict_to_string(input_dict: Dict) -> str: - formatted_str = ", ".join( - [f"{key}: {value}" for key, value in input_dict.items()] - ) - return formatted_str - - def _update_examples(self, example: Union[BaseModel, Dict[str, Any], str]) -> None: - """Prevents duplicates by adding previously generated examples to the few shot - list.""" - if self.template and self.template.examples: - if is_basemodel_instance(example): - formatted_example = self._format_dict_to_string( - cast(BaseModel, example).dict() - ) - elif isinstance(example, dict): - formatted_example = self._format_dict_to_string(example) - else: - formatted_example = str(example) - self.template.examples.pop(0) - self.template.examples.append({self.example_input_key: formatted_example}) - - def generate(self, subject: str, runs: int, *args: Any, **kwargs: Any) -> List[str]: - """Generate synthetic data using the given subject string. - - Args: - subject (str): The subject the synthetic data will be about. - runs (int): Number of times to generate the data. - extra (str): Extra instructions for steerability in data generation. - - Returns: - List[str]: List of generated synthetic data. - - Usage Example: - >>> results = generator.generate(subject="climate change", runs=5, - extra="Focus on environmental impacts.") - """ - if self.llm_chain is None: - raise ValueError( - "llm_chain is none, either set either llm_chain or llm at generator " - "construction" - ) - for _ in range(runs): - result = self.llm_chain.run(subject=subject, *args, **kwargs) - self.results.append(result) - self._update_examples(result) - return self.results - - async def agenerate( - self, subject: str, runs: int, extra: str = "", *args: Any, **kwargs: Any - ) -> List[str]: - """Generate synthetic data using the given subject asynchronously. - - Note: Since the LLM calls run concurrently, - you may have fewer duplicates by adding specific instructions to - the "extra" keyword argument. - - Args: - subject (str): The subject the synthetic data will be about. - runs (int): Number of times to generate the data asynchronously. - extra (str): Extra instructions for steerability in data generation. - - Returns: - List[str]: List of generated synthetic data for the given subject. - - Usage Example: - >>> results = await generator.agenerate(subject="climate change", runs=5, - extra="Focus on env impacts.") - """ - - async def run_chain( - subject: str, extra: str = "", *args: Any, **kwargs: Any - ) -> None: - if self.llm_chain is not None: - result = await self.llm_chain.arun( - subject=subject, extra=extra, *args, **kwargs - ) - self.results.append(result) - - await asyncio.gather( - *(run_chain(subject=subject, extra=extra) for _ in range(runs)) - ) - return self.results diff --git a/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py b/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py deleted file mode 100644 index efa9b298248d5..0000000000000 --- a/libs/experimental/langchain_experimental/tabular_synthetic_data/openai.py +++ /dev/null @@ -1,64 +0,0 @@ -from typing import Any, Dict, Optional, Type, Union - -from langchain.chains.openai_functions import create_structured_output_chain -from langchain.schema import BaseLLMOutputParser, BasePromptTemplate -from langchain_community.chat_models import ChatOpenAI -from langchain_core.prompts import PromptTemplate -from pydantic import BaseModel - -from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator - -OPENAI_TEMPLATE = PromptTemplate(input_variables=["example"], template="{example}") - - -def create_openai_data_generator( - output_schema: Union[Dict[str, Any], Type[BaseModel]], - llm: ChatOpenAI, - prompt: BasePromptTemplate, - output_parser: Optional[BaseLLMOutputParser] = None, - **kwargs: Any, -) -> SyntheticDataGenerator: - """ - Create an instance of SyntheticDataGenerator tailored for OpenAI models. - - This function creates an LLM chain designed for structured output based on the - provided schema, language model, and prompt template. The resulting chain is then - used to instantiate and return a SyntheticDataGenerator. - - Args: - output_schema (Union[Dict[str, Any], Type[BaseModel]]): Schema for expected - output. This can be either a dictionary representing a valid JsonSchema or a - Pydantic BaseModel class. - - - llm (ChatOpenAI): OpenAI language model to use. - - prompt (BasePromptTemplate): Template to be used for generating prompts. - - - output_parser (Optional[BaseLLMOutputParser], optional): Parser for - processing model outputs. If none is provided, a default will be inferred - from the function types. - - - kwargs: Additional keyword arguments to be passed to - `create_structured_output_chain`. - - - Returns: SyntheticDataGenerator: An instance of the data generator set up with - the constructed chain. - - Usage: - To generate synthetic data with a structured output, first define your desired - output schema. Then, use this function to create a SyntheticDataGenerator - instance. After obtaining the generator, you can utilize its methods to produce - the desired synthetic data. - """ - # Create function calling chain to ensure structured output - chain = create_structured_output_chain( - output_schema, llm, prompt, output_parser=output_parser, **kwargs - ) - - # Create the SyntheticDataGenerator instance with the created chain - generator = SyntheticDataGenerator(template=prompt, llm_chain=chain) # type: ignore[arg-type] - return generator diff --git a/libs/experimental/langchain_experimental/tabular_synthetic_data/prompts.py b/libs/experimental/langchain_experimental/tabular_synthetic_data/prompts.py deleted file mode 100644 index c5e66059285e0..0000000000000 --- a/libs/experimental/langchain_experimental/tabular_synthetic_data/prompts.py +++ /dev/null @@ -1,13 +0,0 @@ -from langchain_core.prompts.prompt import PromptTemplate - -DEFAULT_INPUT_KEY = "example" -DEFAULT_PROMPT = PromptTemplate( - input_variables=[DEFAULT_INPUT_KEY], template="{example}" -) - -SYNTHETIC_FEW_SHOT_PREFIX = ( - "This is a test about generating synthetic data about {subject}. Examples below:" -) -SYNTHETIC_FEW_SHOT_SUFFIX = ( - """Now you generate synthetic data about {subject}. Make sure to {extra}:""" -) diff --git a/libs/experimental/langchain_experimental/text_splitter.py b/libs/experimental/langchain_experimental/text_splitter.py deleted file mode 100644 index 0ea0eec6af887..0000000000000 --- a/libs/experimental/langchain_experimental/text_splitter.py +++ /dev/null @@ -1,293 +0,0 @@ -"""Experimental **text splitter** based on semantic similarity.""" - -import copy -import re -from typing import Any, Dict, Iterable, List, Literal, Optional, Sequence, Tuple, cast - -import numpy as np -from langchain_community.utils.math import ( - cosine_similarity, -) -from langchain_core.documents import BaseDocumentTransformer, Document -from langchain_core.embeddings import Embeddings - - -def combine_sentences(sentences: List[dict], buffer_size: int = 1) -> List[dict]: - """Combine sentences based on buffer size. - - Args: - sentences: List of sentences to combine. - buffer_size: Number of sentences to combine. Defaults to 1. - - Returns: - List of sentences with combined sentences. - """ - - # Go through each sentence dict - for i in range(len(sentences)): - # Create a string that will hold the sentences which are joined - combined_sentence = "" - - # Add sentences before the current one, based on the buffer size. - for j in range(i - buffer_size, i): - # Check if the index j is not negative - # (to avoid index out of range like on the first one) - if j >= 0: - # Add the sentence at index j to the combined_sentence string - combined_sentence += sentences[j]["sentence"] + " " - - # Add the current sentence - combined_sentence += sentences[i]["sentence"] - - # Add sentences after the current one, based on the buffer size - for j in range(i + 1, i + 1 + buffer_size): - # Check if the index j is within the range of the sentences list - if j < len(sentences): - # Add the sentence at index j to the combined_sentence string - combined_sentence += " " + sentences[j]["sentence"] - - # Then add the whole thing to your dict - # Store the combined sentence in the current sentence dict - sentences[i]["combined_sentence"] = combined_sentence - - return sentences - - -def calculate_cosine_distances(sentences: List[dict]) -> Tuple[List[float], List[dict]]: - """Calculate cosine distances between sentences. - - Args: - sentences: List of sentences to calculate distances for. - - Returns: - Tuple of distances and sentences. - """ - distances = [] - for i in range(len(sentences) - 1): - embedding_current = sentences[i]["combined_sentence_embedding"] - embedding_next = sentences[i + 1]["combined_sentence_embedding"] - - # Calculate cosine similarity - similarity = cosine_similarity([embedding_current], [embedding_next])[0][0] - - # Convert to cosine distance - distance = 1 - similarity - - # Append cosine distance to the list - distances.append(distance) - - # Store distance in the dictionary - sentences[i]["distance_to_next"] = distance - - # Optionally handle the last sentence - # sentences[-1]['distance_to_next'] = None # or a default value - - return distances, sentences - - -BreakpointThresholdType = Literal[ - "percentile", "standard_deviation", "interquartile", "gradient" -] -BREAKPOINT_DEFAULTS: Dict[BreakpointThresholdType, float] = { - "percentile": 95, - "standard_deviation": 3, - "interquartile": 1.5, - "gradient": 95, -} - - -class SemanticChunker(BaseDocumentTransformer): - """Split the text based on semantic similarity. - - Taken from Greg Kamradt's wonderful notebook: - https://github.com/FullStackRetrieval-com/RetrievalTutorials/blob/main/tutorials/LevelsOfTextSplitting/5_Levels_Of_Text_Splitting.ipynb - - All credits to him. - - At a high level, this splits into sentences, then groups into groups of 3 - sentences, and then merges one that are similar in the embedding space. - """ - - def __init__( - self, - embeddings: Embeddings, - buffer_size: int = 1, - add_start_index: bool = False, - breakpoint_threshold_type: BreakpointThresholdType = "percentile", - breakpoint_threshold_amount: Optional[float] = None, - number_of_chunks: Optional[int] = None, - sentence_split_regex: str = r"(?<=[.?!])\s+", - ): - self._add_start_index = add_start_index - self.embeddings = embeddings - self.buffer_size = buffer_size - self.breakpoint_threshold_type = breakpoint_threshold_type - self.number_of_chunks = number_of_chunks - self.sentence_split_regex = sentence_split_regex - if breakpoint_threshold_amount is None: - self.breakpoint_threshold_amount = BREAKPOINT_DEFAULTS[ - breakpoint_threshold_type - ] - else: - self.breakpoint_threshold_amount = breakpoint_threshold_amount - - def _calculate_breakpoint_threshold( - self, distances: List[float] - ) -> Tuple[float, List[float]]: - if self.breakpoint_threshold_type == "percentile": - return cast( - float, - np.percentile(distances, self.breakpoint_threshold_amount), - ), distances - elif self.breakpoint_threshold_type == "standard_deviation": - return cast( - float, - np.mean(distances) - + self.breakpoint_threshold_amount * np.std(distances), - ), distances - elif self.breakpoint_threshold_type == "interquartile": - q1, q3 = np.percentile(distances, [25, 75]) - iqr = q3 - q1 - - return np.mean( - distances - ) + self.breakpoint_threshold_amount * iqr, distances - elif self.breakpoint_threshold_type == "gradient": - # Calculate the threshold based on the distribution of gradient of distance array. # noqa: E501 - distance_gradient = np.gradient(distances, range(0, len(distances))) - return cast( - float, - np.percentile(distance_gradient, self.breakpoint_threshold_amount), - ), distance_gradient - else: - raise ValueError( - f"Got unexpected `breakpoint_threshold_type`: " - f"{self.breakpoint_threshold_type}" - ) - - def _threshold_from_clusters(self, distances: List[float]) -> float: - """ - Calculate the threshold based on the number of chunks. - Inverse of percentile method. - """ - if self.number_of_chunks is None: - raise ValueError( - "This should never be called if `number_of_chunks` is None." - ) - x1, y1 = len(distances), 0.0 - x2, y2 = 1.0, 100.0 - - x = max(min(self.number_of_chunks, x1), x2) - - # Linear interpolation formula - if x2 == x1: - y = y2 - else: - y = y1 + ((y2 - y1) / (x2 - x1)) * (x - x1) - - y = min(max(y, 0), 100) - - return cast(float, np.percentile(distances, y)) - - def _calculate_sentence_distances( - self, single_sentences_list: List[str] - ) -> Tuple[List[float], List[dict]]: - """Split text into multiple components.""" - - _sentences = [ - {"sentence": x, "index": i} for i, x in enumerate(single_sentences_list) - ] - sentences = combine_sentences(_sentences, self.buffer_size) - embeddings = self.embeddings.embed_documents( - [x["combined_sentence"] for x in sentences] - ) - for i, sentence in enumerate(sentences): - sentence["combined_sentence_embedding"] = embeddings[i] - - return calculate_cosine_distances(sentences) - - def split_text( - self, - text: str, - ) -> List[str]: - # Splitting the essay (by default on '.', '?', and '!') - single_sentences_list = re.split(self.sentence_split_regex, text) - - # having len(single_sentences_list) == 1 would cause the following - # np.percentile to fail. - if len(single_sentences_list) == 1: - return single_sentences_list - # similarly, the following np.gradient would fail - if ( - self.breakpoint_threshold_type == "gradient" - and len(single_sentences_list) == 2 - ): - return single_sentences_list - distances, sentences = self._calculate_sentence_distances(single_sentences_list) - if self.number_of_chunks is not None: - breakpoint_distance_threshold = self._threshold_from_clusters(distances) - breakpoint_array = distances - else: - ( - breakpoint_distance_threshold, - breakpoint_array, - ) = self._calculate_breakpoint_threshold(distances) - - indices_above_thresh = [ - i - for i, x in enumerate(breakpoint_array) - if x > breakpoint_distance_threshold - ] - - chunks = [] - start_index = 0 - - # Iterate through the breakpoints to slice the sentences - for index in indices_above_thresh: - # The end index is the current breakpoint - end_index = index - - # Slice the sentence_dicts from the current start index to the end index - group = sentences[start_index : end_index + 1] - combined_text = " ".join([d["sentence"] for d in group]) - chunks.append(combined_text) - - # Update the start index for the next group - start_index = index + 1 - - # The last group, if any sentences remain - if start_index < len(sentences): - combined_text = " ".join([d["sentence"] for d in sentences[start_index:]]) - chunks.append(combined_text) - return chunks - - def create_documents( - self, texts: List[str], metadatas: Optional[List[dict]] = None - ) -> List[Document]: - """Create documents from a list of texts.""" - _metadatas = metadatas or [{}] * len(texts) - documents = [] - for i, text in enumerate(texts): - start_index = 0 - for chunk in self.split_text(text): - metadata = copy.deepcopy(_metadatas[i]) - if self._add_start_index: - metadata["start_index"] = start_index - new_doc = Document(page_content=chunk, metadata=metadata) - documents.append(new_doc) - start_index += len(chunk) - return documents - - def split_documents(self, documents: Iterable[Document]) -> List[Document]: - """Split documents.""" - texts, metadatas = [], [] - for doc in documents: - texts.append(doc.page_content) - metadatas.append(doc.metadata) - return self.create_documents(texts, metadatas=metadatas) - - def transform_documents( - self, documents: Sequence[Document], **kwargs: Any - ) -> Sequence[Document]: - """Transform sequence of documents by splitting them.""" - return self.split_documents(list(documents)) diff --git a/libs/experimental/langchain_experimental/tools/__init__.py b/libs/experimental/langchain_experimental/tools/__init__.py deleted file mode 100644 index 46da4e17c8c1a..0000000000000 --- a/libs/experimental/langchain_experimental/tools/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Experimental **Python REPL** tools.""" - -from langchain_experimental.tools.python.tool import PythonAstREPLTool, PythonREPLTool - -__all__ = ["PythonREPLTool", "PythonAstREPLTool"] diff --git a/libs/experimental/langchain_experimental/tools/python/__init__.py b/libs/experimental/langchain_experimental/tools/python/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/langchain_experimental/tools/python/tool.py b/libs/experimental/langchain_experimental/tools/python/tool.py deleted file mode 100644 index 4d51e3eed4e5e..0000000000000 --- a/libs/experimental/langchain_experimental/tools/python/tool.py +++ /dev/null @@ -1,148 +0,0 @@ -"""A tool for running python code in a REPL.""" - -import ast -import re -import sys -from contextlib import redirect_stdout -from io import StringIO -from typing import Any, Dict, Optional, Type - -from langchain_core.callbacks.manager import ( - AsyncCallbackManagerForToolRun, - CallbackManagerForToolRun, -) -from langchain_core.runnables.config import run_in_executor -from langchain_core.tools import BaseTool -from pydantic import BaseModel, Field, model_validator - -from langchain_experimental.utilities.python import PythonREPL - - -def _get_default_python_repl() -> PythonREPL: - return PythonREPL(_globals=globals(), _locals=None) - - -def sanitize_input(query: str) -> str: - """Sanitize input to the python REPL. - - Remove whitespace, backtick & python (if llm mistakes python console as terminal) - - Args: - query: The query to sanitize - - Returns: - str: The sanitized query - """ - - # Removes `, whitespace & python from start - query = re.sub(r"^(\s|`)*(?i:python)?\s*", "", query) - # Removes whitespace & ` from end - query = re.sub(r"(\s|`)*$", "", query) - return query - - -class PythonREPLTool(BaseTool): - """Tool for running python code in a REPL.""" - - name: str = "Python_REPL" - description: str = ( - "A Python shell. Use this to execute python commands. " - "Input should be a valid python command. " - "If you want to see the output of a value, you should print it out " - "with `print(...)`." - ) - python_repl: PythonREPL = Field(default_factory=_get_default_python_repl) - sanitize_input: bool = True - - def _run( - self, - query: str, - run_manager: Optional[CallbackManagerForToolRun] = None, - ) -> Any: - """Use the tool.""" - if self.sanitize_input: - query = sanitize_input(query) - return self.python_repl.run(query) - - async def _arun( - self, - query: str, - run_manager: Optional[AsyncCallbackManagerForToolRun] = None, - ) -> Any: - """Use the tool asynchronously.""" - if self.sanitize_input: - query = sanitize_input(query) - - return await run_in_executor(None, self.run, query) - - -class PythonInputs(BaseModel): - """Python inputs.""" - - query: str = Field(description="code snippet to run") - - -class PythonAstREPLTool(BaseTool): - """Tool for running python code in a REPL.""" - - name: str = "python_repl_ast" - description: str = ( - "A Python shell. Use this to execute python commands. " - "Input should be a valid python command. " - "When using this tool, sometimes output is abbreviated - " - "make sure it does not look abbreviated before using it in your answer." - ) - globals: Optional[Dict] = Field(default_factory=dict) - locals: Optional[Dict] = Field(default_factory=dict) - sanitize_input: bool = True - args_schema: Type[BaseModel] = PythonInputs - - @model_validator(mode="before") - @classmethod - def validate_python_version(cls, values: Dict) -> Any: - """Validate valid python version.""" - if sys.version_info < (3, 9): - raise ValueError( - "This tool relies on Python 3.9 or higher " - "(as it uses new functionality in the `ast` module, " - f"you have Python version: {sys.version}" - ) - return values - - def _run( - self, - query: str, - run_manager: Optional[CallbackManagerForToolRun] = None, - ) -> str: - """Use the tool.""" - try: - if self.sanitize_input: - query = sanitize_input(query) - tree = ast.parse(query) - module = ast.Module(tree.body[:-1], type_ignores=[]) - exec(ast.unparse(module), self.globals, self.locals) # type: ignore - module_end = ast.Module(tree.body[-1:], type_ignores=[]) - module_end_str = ast.unparse(module_end) # type: ignore - io_buffer = StringIO() - try: - with redirect_stdout(io_buffer): - ret = eval(module_end_str, self.globals, self.locals) - if ret is None: - return io_buffer.getvalue() - else: - return ret - except Exception: - with redirect_stdout(io_buffer): - exec(module_end_str, self.globals, self.locals) - return io_buffer.getvalue() - except Exception as e: - return "{}: {}".format(type(e).__name__, str(e)) - - async def _arun( - self, - query: str, - run_manager: Optional[AsyncCallbackManagerForToolRun] = None, - ) -> Any: - """Use the tool asynchronously.""" - - return await run_in_executor(None, self._run, query) diff --git a/libs/experimental/langchain_experimental/tot/__init__.py b/libs/experimental/langchain_experimental/tot/__init__.py deleted file mode 100644 index 88b1a276be33c..0000000000000 --- a/libs/experimental/langchain_experimental/tot/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Implementation of a **Tree of Thought (ToT)** chain based on the paper -[Large Language Model Guided Tree-of-Thought](https://arxiv.org/pdf/2305.08291.pdf). - -The Tree of Thought (ToT) chain uses a tree structure to explore the space of -possible solutions to a problem. - -""" - -from langchain_experimental.tot.base import ToTChain -from langchain_experimental.tot.checker import ToTChecker - -__all__ = ["ToTChain", "ToTChecker"] diff --git a/libs/experimental/langchain_experimental/tot/base.py b/libs/experimental/langchain_experimental/tot/base.py deleted file mode 100644 index b7ae696075fc1..0000000000000 --- a/libs/experimental/langchain_experimental/tot/base.py +++ /dev/null @@ -1,143 +0,0 @@ -from __future__ import annotations - -from textwrap import indent -from typing import Any, Dict, List, Optional, Type - -from langchain.base_language import BaseLanguageModel -from langchain.chains.base import Chain -from langchain_core.callbacks.manager import ( - AsyncCallbackManagerForChainRun, - CallbackManagerForChainRun, -) -from pydantic import ConfigDict - -from langchain_experimental.tot.checker import ToTChecker -from langchain_experimental.tot.controller import ToTController -from langchain_experimental.tot.memory import ToTDFSMemory -from langchain_experimental.tot.thought import Thought, ThoughtValidity -from langchain_experimental.tot.thought_generation import ( - BaseThoughtGenerationStrategy, - ProposePromptStrategy, -) - - -class ToTChain(Chain): - """ - Chain implementing the Tree of Thought (ToT). - """ - - llm: BaseLanguageModel - """ - Language model to use. It must be set to produce different variations for - the same prompt. - """ - checker: ToTChecker - """ToT Checker to use.""" - output_key: str = "response" #: :meta private: - k: int = 10 - """The maximum number of conversation rounds""" - c: int = 3 - """The number of children to explore at each node""" - tot_memory: ToTDFSMemory = ToTDFSMemory() - tot_controller: ToTController = ToTController() - tot_strategy_class: Type[BaseThoughtGenerationStrategy] = ProposePromptStrategy - verbose_llm: bool = False - - model_config = ConfigDict( - arbitrary_types_allowed=True, - extra="forbid", - ) - - @classmethod - def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> ToTChain: - """ - Create a ToTChain from a language model. - - :param llm: The language model to use. - :param kwargs: Additional arguments to pass to the ToTChain constructor. - """ - return cls(llm=llm, **kwargs) - - def __init__(self, **kwargs: Any): - super().__init__(**kwargs) - self.tot_controller.c = self.c - - @property - def input_keys(self) -> List[str]: - """Will be whatever keys the prompt expects. - - :meta private: - """ - return ["problem_description"] - - @property - def output_keys(self) -> List[str]: - """Will always return text key. - - :meta private: - """ - return [self.output_key] - - def log_thought( - self, - thought: Thought, - level: int, - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> None: - if run_manager: - colors = { - ThoughtValidity.VALID_FINAL: "green", - ThoughtValidity.VALID_INTERMEDIATE: "yellow", - ThoughtValidity.INVALID: "red", - } - text = indent(f"Thought: {thought.text}\n", prefix=" " * level) - run_manager.on_text( - text=text, color=colors[thought.validity], verbose=self.verbose - ) - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() - if run_manager: - run_manager.on_text(text="Starting the ToT solve procedure.\n") - - problem_description = inputs["problem_description"] - checker_inputs = {"problem_description": problem_description} - thoughts_path: tuple[str, ...] = () - thought_generator = self.tot_strategy_class( # type: ignore[call-arg] - llm=self.llm, c=self.c, verbose=self.verbose_llm - ) - - level = 0 - for _ in range(self.k): - level = self.tot_memory.level - thought_text = thought_generator.next_thought( - problem_description, thoughts_path, callbacks=_run_manager.get_child() - ) - checker_inputs["thoughts"] = thoughts_path + (thought_text,) - thought_validity = self.checker( - checker_inputs, callbacks=_run_manager.get_child() - )["validity"] - thought = Thought(text=thought_text, validity=thought_validity) - if thought.validity == ThoughtValidity.VALID_FINAL: - self.log_thought(thought, level, run_manager) - return {self.output_key: thought.text} - self.tot_memory.store(thought) - self.log_thought(thought, level, run_manager) - thoughts_path = self.tot_controller(self.tot_memory) - - return {self.output_key: "No solution found"} - - async def _acall( - self, - inputs: Dict[str, Any], - run_manager: Optional[AsyncCallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - raise NotImplementedError("Async not implemented yet") - - @property - def _chain_type(self) -> str: - return "tot" diff --git a/libs/experimental/langchain_experimental/tot/checker.py b/libs/experimental/langchain_experimental/tot/checker.py deleted file mode 100644 index 2642125625733..0000000000000 --- a/libs/experimental/langchain_experimental/tot/checker.py +++ /dev/null @@ -1,52 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any, Dict, List, Optional, Tuple - -from langchain.chains.base import Chain -from langchain_core.callbacks.manager import CallbackManagerForChainRun - -from langchain_experimental.tot.thought import ThoughtValidity - - -class ToTChecker(Chain, ABC): - """ - Tree of Thought (ToT) checker. - - This is an abstract ToT checker that must be implemented by the user. You - can implement a simple rule-based checker or a more sophisticated - neural network based classifier. - """ - - output_key: str = "validity" #: :meta private: - - @property - def input_keys(self) -> List[str]: - """The checker input keys. - - :meta private: - """ - return ["problem_description", "thoughts"] - - @property - def output_keys(self) -> List[str]: - """The checker output keys. - - :meta private: - """ - return [self.output_key] - - @abstractmethod - def evaluate( - self, - problem_description: str, - thoughts: Tuple[str, ...] = (), - ) -> ThoughtValidity: - """ - Evaluate the response to the problem description and return the solution type. - """ - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, ThoughtValidity]: - return {self.output_key: self.evaluate(**inputs)} diff --git a/libs/experimental/langchain_experimental/tot/controller.py b/libs/experimental/langchain_experimental/tot/controller.py deleted file mode 100644 index d2a7a6fbd3821..0000000000000 --- a/libs/experimental/langchain_experimental/tot/controller.py +++ /dev/null @@ -1,54 +0,0 @@ -from typing import Tuple - -from langchain_experimental.tot.memory import ToTDFSMemory -from langchain_experimental.tot.thought import ThoughtValidity - - -class ToTController: - """ - Tree of Thought (ToT) controller. - - This is a version of a ToT controller, dubbed in the paper as a "Simple - Controller". - - It has one parameter `c` which is the number of children to explore for each - thought. - """ - - def __init__(self, c: int = 3): - """ - Initialize the controller. - - Args: - c: The number of children to explore at each node. - """ - self.c = c - - def __call__(self, memory: ToTDFSMemory) -> Tuple[str, ...]: - next_thought = memory.top() - parent_thought = memory.top_parent() - validity = ( - ThoughtValidity.VALID_INTERMEDIATE - if next_thought is None - else next_thought.validity - ) - - # 1 if the current partial solution is invalid, backtrack to the parent - # thought. - if validity == ThoughtValidity.INVALID: - memory.pop() - next_thought = memory.top() - if next_thought and len(next_thought.children) >= self.c: - memory.pop() - - # 2 if the current partial solution is valid but C children were - # explored and yet failed to find a final solution, backtrack to the - # parent thought. - elif ( - validity == ThoughtValidity.VALID_INTERMEDIATE - and parent_thought - and len(parent_thought.children) >= self.c - ): - memory.pop(2) - - return tuple(thought.text for thought in memory.current_path()) diff --git a/libs/experimental/langchain_experimental/tot/memory.py b/libs/experimental/langchain_experimental/tot/memory.py deleted file mode 100644 index 5c9de838239c5..0000000000000 --- a/libs/experimental/langchain_experimental/tot/memory.py +++ /dev/null @@ -1,48 +0,0 @@ -from __future__ import annotations - -from typing import List, Optional - -from langchain_experimental.tot.thought import Thought - - -class ToTDFSMemory: - """ - Memory for the Tree of Thought (ToT) chain. - - It is implemented as a stack of - thoughts. This allows for a depth first search (DFS) of the ToT. - """ - - def __init__(self, stack: Optional[List[Thought]] = None): - self.stack: List[Thought] = stack or [] - - def top(self) -> Optional[Thought]: - "Get the top of the stack without popping it." - return self.stack[-1] if len(self.stack) > 0 else None - - def pop(self, n: int = 1) -> Optional[Thought]: - "Pop the top n elements of the stack and return the last one." - if len(self.stack) < n: - return None - for _ in range(n): - node = self.stack.pop() - return node - - def top_parent(self) -> Optional[Thought]: - "Get the parent of the top of the stack without popping it." - return self.stack[-2] if len(self.stack) > 1 else None - - def store(self, node: Thought) -> None: - "Add a node on the top of the stack." - if len(self.stack) > 0: - self.stack[-1].children.add(node) - self.stack.append(node) - - @property - def level(self) -> int: - "Return the current level of the stack." - return len(self.stack) - - def current_path(self) -> List[Thought]: - "Return the thoughts path." - return self.stack[:] diff --git a/libs/experimental/langchain_experimental/tot/prompts.py b/libs/experimental/langchain_experimental/tot/prompts.py deleted file mode 100644 index 78d11a10aac92..0000000000000 --- a/libs/experimental/langchain_experimental/tot/prompts.py +++ /dev/null @@ -1,146 +0,0 @@ -import json -from textwrap import dedent -from typing import List - -from langchain_core.output_parsers import BaseOutputParser -from langchain_core.prompts import PromptTemplate - -from langchain_experimental.tot.thought import ThoughtValidity - - -def get_cot_prompt() -> PromptTemplate: - """Get the prompt for the Chain of Thought (CoT) chain.""" - - return PromptTemplate( - template_format="jinja2", - input_variables=["problem_description", "thoughts"], - template=dedent( - """ - You are an intelligent agent that is generating one thought at a time in - a tree of thoughts setting. - - PROBLEM - - {{problem_description}} - - {% if thoughts %} - THOUGHTS - - {% for thought in thoughts %} - {{ thought }} - {% endfor %} - {% endif %} - - Let's think step by step. - """ - ).strip(), - ) - - -class JSONListOutputParser(BaseOutputParser): - """Parse the output of a PROPOSE_PROMPT response.""" - - @property - def _type(self) -> str: - return "json_list" - - def parse(self, text: str) -> List[str]: - """Parse the output of an LLM call.""" - - json_string = text.split("```json")[1].strip().strip("```").strip() - try: - return json.loads(json_string) - except json.JSONDecodeError: - return [] - - -def get_propose_prompt() -> PromptTemplate: - """Get the prompt for the PROPOSE_PROMPT chain.""" - - return PromptTemplate( - template_format="jinja2", - input_variables=["problem_description", "thoughts", "n"], - output_parser=JSONListOutputParser(), - template=dedent( - """ - You are an intelligent agent that is generating thoughts in a tree of - thoughts setting. - - The output should be a markdown code snippet formatted as a JSON list of - strings, including the leading and trailing "```json" and "```": - - ```json - [ - "", - "", - "" - ] - ``` - - PROBLEM - - {{ problem_description }} - - {% if thoughts %} - VALID THOUGHTS - - {% for thought in thoughts %} - {{ thought }} - {% endfor %} - - Possible next {{ n }} valid thoughts based on the last valid thought: - {% else %} - - Possible next {{ n }} valid thoughts based on the PROBLEM: - {%- endif -%} - """ - ).strip(), - ) - - -class CheckerOutputParser(BaseOutputParser): - """Parse and check the output of the language model.""" - - def parse(self, text: str) -> ThoughtValidity: - """Parse the output of the language model.""" - text = text.upper() - if "INVALID" in text: - return ThoughtValidity.INVALID - elif "INTERMEDIATE" in text: - return ThoughtValidity.VALID_INTERMEDIATE - elif "VALID" in text: - return ThoughtValidity.VALID_FINAL - else: - return ThoughtValidity.INVALID - - @property - def _type(self) -> str: - return "tot_llm_checker_output" - - -CHECKER_PROMPT = PromptTemplate( - input_variables=["problem_description", "thoughts"], - template=dedent( - """ - You are an intelligent agent, validating thoughts of another intelligent agent. - - PROBLEM - - {problem_description} - - THOUGHTS - - {thoughts} - - Evaluate the thoughts and respond with one word. - - - Respond VALID if the last thought is a valid final solution to the - problem. - - Respond INVALID if the last thought is invalid. - - Respond INTERMEDIATE if the last thought is valid but not the final - solution to the problem. - - This chain of thoughts is""" - ).strip(), - output_parser=CheckerOutputParser(), -) diff --git a/libs/experimental/langchain_experimental/tot/thought.py b/libs/experimental/langchain_experimental/tot/thought.py deleted file mode 100644 index c68cab56d6939..0000000000000 --- a/libs/experimental/langchain_experimental/tot/thought.py +++ /dev/null @@ -1,25 +0,0 @@ -from __future__ import annotations - -from enum import Enum -from typing import Set - -from pydantic import BaseModel, Field - - -class ThoughtValidity(Enum): - """Enum for the validity of a thought.""" - - VALID_INTERMEDIATE = 0 - VALID_FINAL = 1 - INVALID = 2 - - -class Thought(BaseModel): - """A thought in the ToT.""" - - text: str - validity: ThoughtValidity - children: Set[Thought] = Field(default_factory=set) - - def __hash__(self) -> int: - return id(self) diff --git a/libs/experimental/langchain_experimental/tot/thought_generation.py b/libs/experimental/langchain_experimental/tot/thought_generation.py deleted file mode 100644 index 9997789d440e8..0000000000000 --- a/libs/experimental/langchain_experimental/tot/thought_generation.py +++ /dev/null @@ -1,95 +0,0 @@ -""" -We provide two strategies for generating thoughts in the Tree of Thoughts (ToT) -framework to avoid repetition: - -These strategies ensure that the language model generates diverse and -non-repeating thoughts, which are crucial for problem-solving tasks that require -exploration. -""" - -from abc import abstractmethod -from typing import Any, Dict, List, Tuple - -from langchain.chains.llm import LLMChain -from langchain_core.prompts.base import BasePromptTemplate -from pydantic import Field - -from langchain_experimental.tot.prompts import get_cot_prompt, get_propose_prompt - - -class BaseThoughtGenerationStrategy(LLMChain): - """ - Base class for a thought generation strategy. - """ - - c: int = 3 - """The number of children thoughts to propose at each step.""" - - @abstractmethod - def next_thought( - self, - problem_description: str, - thoughts_path: Tuple[str, ...] = (), - **kwargs: Any, - ) -> str: - """ - Generate the next thought given the problem description and the thoughts - generated so far. - """ - - -class SampleCoTStrategy(BaseThoughtGenerationStrategy): - """ - Sample strategy from a Chain-of-Thought (CoT) prompt. - - This strategy works better when the thought space is rich, such as when each - thought is a paragraph. Independent and identically distributed samples - lead to diversity, which helps to avoid repetition. - """ - - prompt: BasePromptTemplate = Field(default_factory=get_cot_prompt) - - def next_thought( - self, - problem_description: str, - thoughts_path: Tuple[str, ...] = (), - **kwargs: Any, - ) -> str: - response_text = self.predict_and_parse( - problem_description=problem_description, thoughts=thoughts_path, **kwargs - ) - return response_text if isinstance(response_text, str) else "" - - -class ProposePromptStrategy(BaseThoughtGenerationStrategy): - """ - Strategy that is sequentially using a "propose prompt". - - This strategy works better when the thought space is more constrained, such - as when each thought is just a word or a line. Proposing different thoughts - in the same prompt completion helps to avoid duplication. - """ - - prompt: BasePromptTemplate = Field(default_factory=get_propose_prompt) - tot_memory: Dict[Tuple[str, ...], List[str]] = Field(default_factory=dict) - - def next_thought( - self, - problem_description: str, - thoughts_path: Tuple[str, ...] = (), - **kwargs: Any, - ) -> str: - if thoughts_path not in self.tot_memory or not self.tot_memory[thoughts_path]: - new_thoughts = self.predict_and_parse( - problem_description=problem_description, - thoughts=thoughts_path, - n=self.c, - **kwargs, - ) - if not new_thoughts: - return "" - if isinstance(new_thoughts, list): - self.tot_memory[thoughts_path] = new_thoughts[::-1] - else: - return "" - return self.tot_memory[thoughts_path].pop() diff --git a/libs/experimental/langchain_experimental/utilities/__init__.py b/libs/experimental/langchain_experimental/utilities/__init__.py deleted file mode 100644 index 4b7d64518a8dd..0000000000000 --- a/libs/experimental/langchain_experimental/utilities/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Utility that simulates a standalone **Python REPL**.""" - -from langchain_experimental.utilities.python import PythonREPL - -__all__ = ["PythonREPL"] diff --git a/libs/experimental/langchain_experimental/utilities/python.py b/libs/experimental/langchain_experimental/utilities/python.py deleted file mode 100644 index b29164fc6f93e..0000000000000 --- a/libs/experimental/langchain_experimental/utilities/python.py +++ /dev/null @@ -1,90 +0,0 @@ -import functools -import logging -import multiprocessing -import re -import sys -from io import StringIO -from typing import Dict, Optional - -from pydantic import BaseModel, Field - -logger = logging.getLogger(__name__) - - -@functools.lru_cache(maxsize=None) -def warn_once() -> None: - """Warn once about the dangers of PythonREPL.""" - logger.warning("Python REPL can execute arbitrary code. Use with caution.") - - -class PythonREPL(BaseModel): - """Simulates a standalone Python REPL.""" - - globals: Optional[Dict] = Field(default_factory=dict, alias="_globals") - locals: Optional[Dict] = Field(default_factory=dict, alias="_locals") - - @staticmethod - def sanitize_input(query: str) -> str: - """Sanitize input to the python REPL. - - Remove whitespace, backtick & python - (if llm mistakes python console as terminal) - - Args: - query: The query to sanitize - - Returns: - str: The sanitized query - """ - query = re.sub(r"^(\s|`)*(?i:python)?\s*", "", query) - query = re.sub(r"(\s|`)*$", "", query) - return query - - @classmethod - def worker( - cls, - command: str, - globals: Optional[Dict], - locals: Optional[Dict], - queue: multiprocessing.Queue, - ) -> None: - old_stdout = sys.stdout - sys.stdout = mystdout = StringIO() - try: - cleaned_command = cls.sanitize_input(command) - exec(cleaned_command, globals, locals) - sys.stdout = old_stdout - queue.put(mystdout.getvalue()) - except Exception as e: - sys.stdout = old_stdout - queue.put(repr(e)) - - def run(self, command: str, timeout: Optional[int] = None) -> str: - """Run command with own globals/locals and returns anything printed. - Timeout after the specified number of seconds.""" - - # Warn against dangers of PythonREPL - warn_once() - - queue: multiprocessing.Queue = multiprocessing.Queue() - - # Only use multiprocessing if we are enforcing a timeout - if timeout is not None: - # create a Process - p = multiprocessing.Process( - target=self.worker, args=(command, self.globals, self.locals, queue) - ) - - # start it - p.start() - - # wait for the process to finish or kill it after timeout seconds - p.join(timeout) - - if p.is_alive(): - p.terminate() - return "Execution timed out" - else: - self.worker(command, self.globals, self.locals, queue) - # get the result from the worker function - return queue.get() diff --git a/libs/experimental/langchain_experimental/video_captioning/__init__.py b/libs/experimental/langchain_experimental/video_captioning/__init__.py deleted file mode 100644 index 4cf101f1eb504..0000000000000 --- a/libs/experimental/langchain_experimental/video_captioning/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from langchain_experimental.video_captioning.base import VideoCaptioningChain - -__all__ = ["VideoCaptioningChain"] diff --git a/libs/experimental/langchain_experimental/video_captioning/base.py b/libs/experimental/langchain_experimental/video_captioning/base.py deleted file mode 100644 index 66d54b39f17f3..0000000000000 --- a/libs/experimental/langchain_experimental/video_captioning/base.py +++ /dev/null @@ -1,149 +0,0 @@ -from typing import Any, Dict, List, Optional - -from langchain.chains.base import Chain -from langchain_core.callbacks import CallbackManagerForChainRun -from langchain_core.language_models import BaseLanguageModel -from langchain_core.prompts import PromptTemplate -from pydantic import ConfigDict - -from langchain_experimental.video_captioning.services.audio_service import ( - AudioProcessor, -) -from langchain_experimental.video_captioning.services.caption_service import ( - CaptionProcessor, -) -from langchain_experimental.video_captioning.services.combine_service import ( - CombineProcessor, -) -from langchain_experimental.video_captioning.services.image_service import ( - ImageProcessor, -) -from langchain_experimental.video_captioning.services.srt_service import SRTProcessor - - -class VideoCaptioningChain(Chain): - """ - Video Captioning Chain. - """ - - llm: BaseLanguageModel - assemblyai_key: str - prompt: Optional[PromptTemplate] = None - verbose: bool = True - use_logging: Optional[bool] = True - frame_skip: int = -1 - image_delta_threshold: int = 3000000 - closed_caption_char_limit: int = 20 - closed_caption_similarity_threshold: int = 80 - use_unclustered_video_models: bool = False - - model_config = ConfigDict( - arbitrary_types_allowed=True, - extra="allow", - ) - - @property - def input_keys(self) -> List[str]: - return ["video_file_path"] - - @property - def output_keys(self) -> List[str]: - return ["srt"] - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - if "video_file_path" not in inputs: - raise ValueError( - "Missing 'video_file_path' in inputs for video captioning." - ) - video_file_path = inputs["video_file_path"] - nl = "\n" - - run_manager.on_text( - "Loading processors..." + nl - ) if self.use_logging and run_manager else None - - audio_processor = AudioProcessor(api_key=self.assemblyai_key) - image_processor = ImageProcessor( - frame_skip=self.frame_skip, threshold=self.image_delta_threshold - ) - caption_processor = CaptionProcessor( - llm=self.llm, - verbose=self.verbose, - similarity_threshold=self.closed_caption_similarity_threshold, - use_unclustered_models=self.use_unclustered_video_models, - ) - combine_processor = CombineProcessor( - llm=self.llm, - verbose=self.verbose, - char_limit=self.closed_caption_char_limit, - ) - srt_processor = SRTProcessor() - - run_manager.on_text( - "Finished loading processors." - + nl - + "Generating subtitles from audio..." - + nl - ) if self.use_logging and run_manager else None - - # Get models for speech to text subtitles - audio_models = audio_processor.process(video_file_path, run_manager) - run_manager.on_text( - "Finished generating subtitles:" - + nl - + f"{nl.join(str(obj) for obj in audio_models)}" - + nl - + "Generating closed captions from video..." - + nl - ) if self.use_logging and run_manager else None - - # Get models for image frame description - image_models = image_processor.process(video_file_path, run_manager) - run_manager.on_text( - "Finished generating closed captions:" - + nl - + f"{nl.join(str(obj) for obj in image_models)}" - + nl - + "Refining closed captions..." - + nl - ) if self.use_logging and run_manager else None - - # Get models for video event closed-captions - video_models = caption_processor.process(image_models, run_manager) - run_manager.on_text( - "Finished refining closed captions:" - + nl - + f"{nl.join(str(obj) for obj in video_models)}" - + nl - + "Combining subtitles with closed captions..." - + nl - ) if self.use_logging and run_manager else None - - # Combine the subtitle models with the closed-caption models - caption_models = combine_processor.process( - video_models, audio_models, run_manager - ) - run_manager.on_text( - "Finished combining subtitles with closed captions:" - + nl - + f"{nl.join(str(obj) for obj in caption_models)}" - + nl - + "Generating SRT file..." - + nl - ) if self.use_logging and run_manager else None - - # Convert the combined model to SRT format - srt_content = srt_processor.process(caption_models) - run_manager.on_text( - "Finished generating srt file." + nl - ) if self.use_logging and run_manager else None - - return {"srt": srt_content} - - @property - def _chain_type(self) -> str: - return "video_captioning_chain" diff --git a/libs/experimental/langchain_experimental/video_captioning/models.py b/libs/experimental/langchain_experimental/video_captioning/models.py deleted file mode 100644 index b464b435d7d99..0000000000000 --- a/libs/experimental/langchain_experimental/video_captioning/models.py +++ /dev/null @@ -1,150 +0,0 @@ -from datetime import datetime -from typing import Any - - -class BaseModel: - def __init__(self, start_time: int, end_time: int) -> None: - # Start and end times representing milliseconds - self._start_time = start_time - self._end_time = end_time - - @property - def start_time(self) -> int: - return self._start_time - - @start_time.setter - def start_time(self, value: int) -> None: - self._start_time = value - - @property - def end_time(self) -> int: - return self._end_time - - @end_time.setter - def end_time(self, value: int) -> None: - self._end_time = value - - def __str__(self) -> str: - return f"start_time: {self.start_time}, end_time: {self.end_time}" - - @classmethod - def from_srt(cls, start_time: str, end_time: str, *args: Any) -> "BaseModel": - return cls( - cls._srt_time_to_ms(start_time), cls._srt_time_to_ms(end_time), *args - ) - - @staticmethod - def _srt_time_to_ms(srt_time_string: str) -> int: - # Parse SRT time string into a datetime object - time_format = "%H:%M:%S,%f" - dt = datetime.strptime(srt_time_string, time_format) - ms = dt.microsecond // 1000 - return dt.second * 1000 + ms - - -class VideoModel(BaseModel): - def __init__(self, start_time: int, end_time: int, image_description: str) -> None: - super().__init__(start_time, end_time) - self._image_description = image_description - - @property - def image_description(self) -> str: - return self._image_description - - @image_description.setter - def image_description(self, value: str) -> None: - self._image_description = value - - def __str__(self) -> str: - return f"{super().__str__()}, image_description: {self.image_description}" - - def similarity_score(self, other: "VideoModel") -> float: - # Tokenize the image descriptions by extracting individual words, stripping - # trailing 's' (plural = singular) and converting the words to lowercase in - # order to be case-insensitive - self_tokenized = set( - word.lower().rstrip("s") for word in self.image_description.split() - ) - other_tokenized = set( - word.lower().rstrip("s") for word in other.image_description.split() - ) - - # Find common words - common_words = self_tokenized.intersection(other_tokenized) - - # Calculate similarity score - similarity_score = ( - len(common_words) / max(len(self_tokenized), len(other_tokenized)) * 100 - ) - - return similarity_score - - -class AudioModel(BaseModel): - def __init__(self, start_time: int, end_time: int, subtitle_text: str) -> None: - super().__init__(start_time, end_time) - self._subtitle_text = subtitle_text - - @property - def subtitle_text(self) -> str: - return self._subtitle_text - - @subtitle_text.setter - def subtitle_text(self, value: str) -> None: - self._subtitle_text = value - - def __str__(self) -> str: - return f"{super().__str__()}, subtitle_text: {self.subtitle_text}" - - -class CaptionModel(BaseModel): - def __init__(self, start_time: int, end_time: int, closed_caption: str) -> None: - super().__init__(start_time, end_time) - self._closed_caption = closed_caption - - @property - def closed_caption(self) -> str: - return self._closed_caption - - @closed_caption.setter - def closed_caption(self, value: str) -> None: - self._closed_caption = value - - def add_subtitle_text(self, subtitle_text: str) -> "CaptionModel": - self._closed_caption = self._closed_caption + " " + subtitle_text - return self - - def __str__(self) -> str: - return f"{super().__str__()}, closed_caption: {self.closed_caption}" - - def to_srt_entry(self, index: int) -> str: - def _ms_to_srt_time(ms: int) -> str: - """Converts milliseconds to SRT time format 'HH:MM:SS,mmm'.""" - hours = int(ms // 3600000) - minutes = int((ms % 3600000) // 60000) - seconds = int((ms % 60000) // 1000) - milliseconds = int(ms % 1000) - - return f"{hours:02}:{minutes:02}:{seconds:02},{milliseconds:03}" - - return "\n".join( - [ - f"""{index} - {_ms_to_srt_time(self._start_time)} --> {_ms_to_srt_time(self._end_time)} - {self._closed_caption}""", - ] - ) - - @classmethod - def from_audio_model(cls, audio_model: AudioModel) -> "CaptionModel": - return cls( - audio_model.start_time, audio_model.end_time, audio_model.subtitle_text - ) - - @classmethod - def from_video_model(cls, video_model: VideoModel) -> "CaptionModel": - return cls( - video_model.start_time, - video_model.end_time, - f"[{video_model.image_description}]", - ) diff --git a/libs/experimental/langchain_experimental/video_captioning/prompts.py b/libs/experimental/langchain_experimental/video_captioning/prompts.py deleted file mode 100644 index 1f6e49355d7c4..0000000000000 --- a/libs/experimental/langchain_experimental/video_captioning/prompts.py +++ /dev/null @@ -1,90 +0,0 @@ -# flake8: noqa -from langchain_core.prompts import ( - ChatPromptTemplate, - HumanMessagePromptTemplate, -) -from langchain_core.messages import SystemMessage - -JOIN_SIMILAR_VIDEO_MODELS_TEMPLATE = """ -I will provide you with several descriptions depicting events in one scene. -Your task is to combine these descriptions into one description that contains only the important details from all descriptions. -Especially if the two descriptions are very similar, make sure your response doesn't repeat itself. -IMPORTANT: Do not make up a description. Do not make up events or anything that happened outside of the descriptions I am to provide you. -I will now provide an example for you to learn from: -Example: Description 1: The cat is at the beach, Description 2: The cat is eating lunch, Description 3: The cat is enjoying his time with friends -Result: The cat is at the beach, eating lunch with his friends -Now that I gave you the example, I will explain to you what exactly you need to return: -Just give back one description, the description which is a combination of the descriptions you are provided with. -Do not include anything else in your response other than the combined description. -IMPORTANT: the output in your response should be 'Result:text', where text is the description you generated. -Here is the data for you to work with in order to formulate your response: -""" - -JOIN_SIMILAR_VIDEO_MODELS_PROMPT = ChatPromptTemplate( # type: ignore[call-arg] - messages=[ - SystemMessage(content=JOIN_SIMILAR_VIDEO_MODELS_TEMPLATE), - HumanMessagePromptTemplate.from_template("{descriptions}"), - ] -) - -REMOVE_VIDEO_MODEL_DESCRIPTION_TEMPLATE = """ -Given a closed-caption description of an image or scene, remove any common prefixes like "an image of," "a scene of," or "footage of." -For instance, if the description is "an image of a beautiful landscape," the modified version should be "a beautiful landscape." - -IMPORTANT: the output in your response should be 'Result:text', where text is the description you generated. - -Here are some examples: - -Input: an image of a beautiful landscape -Result: a beautiful landscape - -Input: a scene of people enjoying a picnic -Result: people enjoying a picnic - -Below is the input for you to generate the result from: -""" - -REMOVE_VIDEO_MODEL_DESCRIPTION_PROMPT = ChatPromptTemplate( # type: ignore[call-arg] - messages=[ - SystemMessage(content=REMOVE_VIDEO_MODEL_DESCRIPTION_TEMPLATE), - HumanMessagePromptTemplate.from_template("Input: {description}"), - ] -) - -VALIDATE_AND_ADJUST_DESCRIPTION_TEMPLATE = """ -You are tasked with enhancing closed-caption descriptions based on corresponding subtitles from the audio of a real movie clip. -Assignment details, from highest to lowest priority: - -1) If the subtitle exceeds Limit characters, creatively rewrite the description to not exceed the character limit, preserving as many details as you can. - If you feel that you cannot complete the response under the character limit, you must omit details in order to remain below the character limit. - -2) If the details in the subtitle provide meaningful additional information to its closed-caption description, incorporate those details into the description. - -Enhance the closed-caption description by integrating details from the subtitle if they contribute meaningful information. - -Example: -Subtitle: car screeching, tires squealing -Closed-Caption Description: A car speeds down the street. - -Output: Result: A car speeds down the street, its tires screeching and squealing. - -**IMPORTANT**: Remember your assignment details when formulating your response! YOU MUST NOT EXCEED LIMIT CHARACTERS at human message. - -***IMPORTANT***: You must only return the following text in your response. You may not return a response that does not follow the exact format in the next line: -Result: Text - -**** YOU MUST PROVIDE ME WITH THE BEST ANSWER YOU CAN COME UP WITH, -**** EVEN IF YOU DEEM THAT IT IS A BAD ONE. YOU MUST ONLY RESPOND IN THE FORMAT IN THE NEXT LINE: -Result: Text - -Below is the data provided, generate a response using this data: -""" - -VALIDATE_AND_ADJUST_DESCRIPTION_PROMPT = ChatPromptTemplate( # type: ignore[call-arg] - messages=[ - SystemMessage(content=VALIDATE_AND_ADJUST_DESCRIPTION_TEMPLATE), - HumanMessagePromptTemplate.from_template( - "Limit: {limit}\nSubtitle: {subtitle}\nClosed-Caption Description: {description}" - ), - ] -) diff --git a/libs/experimental/langchain_experimental/video_captioning/services/audio_service.py b/libs/experimental/langchain_experimental/video_captioning/services/audio_service.py deleted file mode 100644 index 66f1710b97a3f..0000000000000 --- a/libs/experimental/langchain_experimental/video_captioning/services/audio_service.py +++ /dev/null @@ -1,92 +0,0 @@ -import subprocess -from pathlib import Path -from typing import List, Optional - -from langchain.schema import Document -from langchain_community.document_loaders import AssemblyAIAudioTranscriptLoader -from langchain_community.document_loaders.assemblyai import TranscriptFormat -from langchain_core.callbacks.manager import CallbackManagerForChainRun - -from langchain_experimental.video_captioning.models import AudioModel, BaseModel - - -class AudioProcessor: - def __init__( - self, - api_key: str, - output_audio_path: str = "output_audio.mp3", - ): - self.output_audio_path = Path(output_audio_path) - self.api_key = api_key - - def process( - self, - video_file_path: str, - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> list: - try: - self._extract_audio(video_file_path) - return self._transcribe_audio() - finally: - # Cleanup: Delete the MP3 file after processing - try: - self.output_audio_path.unlink() - except FileNotFoundError: - pass # File not found, nothing to delete - - def _extract_audio(self, video_file_path: str) -> None: - # Ensure the directory exists where the output file will be saved - self.output_audio_path.parent.mkdir(parents=True, exist_ok=True) - - command = [ - "ffmpeg", - "-i", - video_file_path, - "-vn", - "-acodec", - "mp3", - self.output_audio_path.as_posix(), - "-y", # The '-y' flag overwrites the output file if it exists - ] - - subprocess.run( - command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True - ) - - def _transcribe_audio(self) -> List[BaseModel]: - if not self.api_key: - raise ValueError("API key for AssemblyAI is not configured") - audio_file_path_str = str(self.output_audio_path) - loader = AssemblyAIAudioTranscriptLoader( - file_path=audio_file_path_str, - api_key=self.api_key, - transcript_format=TranscriptFormat.SUBTITLES_SRT, - ) - docs = loader.load() - return self._create_transcript_models(docs) - - @staticmethod - def _create_transcript_models(docs: List[Document]) -> List[BaseModel]: - # Assuming docs is a list of Documents with .page_content as the transcript data - models = [] - for doc in docs: - models.extend(AudioProcessor._parse_transcript(doc.page_content)) - return models - - @staticmethod - def _parse_transcript(srt_content: str) -> List[BaseModel]: - models = [] - entries = srt_content.strip().split("\n\n") # Split based on double newline - - for entry in entries: - index, timespan, *subtitle_lines = entry.split("\n") - - # If not a valid entry format, skip - if len(subtitle_lines) == 0: - continue - - start_time, end_time = timespan.split(" --> ") - subtitle_text = " ".join(subtitle_lines).strip() - models.append(AudioModel.from_srt(start_time, end_time, subtitle_text)) - - return models diff --git a/libs/experimental/langchain_experimental/video_captioning/services/caption_service.py b/libs/experimental/langchain_experimental/video_captioning/services/caption_service.py deleted file mode 100644 index f6810ade77946..0000000000000 --- a/libs/experimental/langchain_experimental/video_captioning/services/caption_service.py +++ /dev/null @@ -1,279 +0,0 @@ -from typing import Dict, List, Optional, Tuple - -from langchain.chains.llm import LLMChain -from langchain_core.callbacks.manager import CallbackManagerForChainRun -from langchain_core.language_models import BaseLanguageModel - -from langchain_experimental.video_captioning.models import VideoModel -from langchain_experimental.video_captioning.prompts import ( - JOIN_SIMILAR_VIDEO_MODELS_PROMPT, - REMOVE_VIDEO_MODEL_DESCRIPTION_PROMPT, -) - - -class CaptionProcessor: - def __init__( - self, - llm: BaseLanguageModel, - verbose: bool = True, - similarity_threshold: int = 80, - use_unclustered_models: bool = False, - ) -> None: - self.llm = llm - self.verbose = verbose - - # Set the percentage value for how similar two video model image - # descriptions should be in order for us to cluster them into a group - self._SIMILARITY_THRESHOLD = similarity_threshold - # Set to True if you want to include video models which were not clustered. - # Will likely result in closed-caption artifacts - self._USE_NON_CLUSTERED_VIDEO_MODELS = use_unclustered_models - - def process( - self, - video_models: List[VideoModel], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> List[VideoModel]: - # Remove any consecutive duplicates - video_models = self._remove_consecutive_duplicates(video_models) - - # Holds the video models after clustering has been applied - video_models_post_clustering = [] - # In this case, index represents a divider between clusters - index = 0 - for start, end in self._get_model_clusters(video_models): - start_vm, end_vm = video_models[start], video_models[end] - - if self._USE_NON_CLUSTERED_VIDEO_MODELS: - # Append all the non-clustered models in between model clusters - # staged for OpenAI combination - video_models_post_clustering += video_models[index:start] - index = end + 1 - - # Send to llm for description combination - models_to_combine = video_models[start:index] - combined_description = self._join_similar_video_models( - models_to_combine, run_manager - ) - - # Strip any prefixes that are redundant in the context of closed-captions - stripped_description = self._remove_video_model_description_prefix( - combined_description, run_manager - ) - - # Create a new video model which is the combination of all the models in - # the cluster - combined_and_stripped_model = VideoModel( - start_vm.start_time, end_vm.end_time, stripped_description - ) - - video_models_post_clustering.append(combined_and_stripped_model) - - if self._USE_NON_CLUSTERED_VIDEO_MODELS: - # Append any non-clustered models present after every clustered model - video_models_post_clustering += video_models[index:] - - return video_models_post_clustering - - def _remove_consecutive_duplicates( - self, - video_models: List[VideoModel], - ) -> List[VideoModel]: - buffer: List[VideoModel] = [] - - for video_model in video_models: - # Join this model and the previous model if they have the same image - # description - if ( - len(buffer) > 0 - and buffer[-1].image_description == video_model.image_description - ): - buffer[-1].end_time = video_model.end_time - - else: - buffer.append(video_model) - - return buffer - - def _remove_video_model_description_prefix( - self, description: str, run_manager: Optional[CallbackManagerForChainRun] = None - ) -> str: - conversation = LLMChain( - llm=self.llm, - prompt=REMOVE_VIDEO_MODEL_DESCRIPTION_PROMPT, - verbose=True, - callbacks=run_manager.get_child() if run_manager else None, - ) - # Get response from OpenAI using LLMChain - response = conversation({"description": description}) - - # Take out the Result: part of the response - return response["text"].replace("Result:", "").strip() - - def _join_similar_video_models( - self, - video_models: List[VideoModel], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> str: - descriptions = "" - count = 1 - for video_model in video_models: - descriptions += ( - f"Description {count}: " + video_model.image_description + ", " - ) - count += 1 - - # Strip trailing ", " - descriptions = descriptions[:-2] - - conversation = LLMChain( - llm=self.llm, - prompt=JOIN_SIMILAR_VIDEO_MODELS_PROMPT, - verbose=True, - callbacks=run_manager.get_child() if run_manager else None, - ) - # Get response from OpenAI using LLMChain - response = conversation({"descriptions": descriptions}) - - # Take out the Result: part of the response - return response["text"].replace("Result:", "").strip() - - def _get_model_clusters( - self, video_models: List[VideoModel] - ) -> List[Tuple[int, int]]: - # Word bank which maps lowercase words (case-insensitive) with trailing s's - # removed (singular/plural-insensitive) to video model indexes in video_models - word_bank: Dict[str, List[int]] = {} - - # Function which formats words to be inserted into word bank, as specified - # above - def format_word(w: str) -> str: - return w.lower().rstrip("s") - - # Keeps track of the current video model index - index = 0 - for vm in video_models: - for word in vm.image_description.split(): - formatted_word = format_word(word) - word_bank[formatted_word] = ( - word_bank[formatted_word] if formatted_word in word_bank else [] - ) + [index] - index += 1 - - # Keeps track of the current video model index - index = 0 - # Maps video model index to list of other video model indexes that have a - # similarity score above the threshold - sims: Dict[int, List[int]] = {} - for vm in video_models: - # Maps other video model index to number of words it shares in common - # with this video model - matches: Dict[int, int] = {} - for word in vm.image_description.split(): - formatted_word = format_word(word) - for match in word_bank[formatted_word]: - if match != index: - matches[match] = matches[match] + 1 if match in matches else 1 - if matches: - # Get the highest number of words another video model shares with - # this video model - max_words_in_common = max(matches.values()) - - # Get all video model indexes that share the maximum number of words - # with this video model - vms_with_max_words = [ - key - for key, value in matches.items() - if value == max_words_in_common - ] - - # Maps other video model index to its similarity score with this - # video model - sim_scores: Dict[int, float] = {} - - # Compute similarity score for all video models that share the - # highest number of word occurrences with this video model - for vm_index in vms_with_max_words: - sim_scores[vm_index] = video_models[vm_index].similarity_score(vm) - - # Get the highest similarity score another video model shares with - # this video model - max_score = max(sim_scores.values()) - - # Get a list of all video models that have the maximum similarity - # score to this video model - vms_with_max_score = [ - key for key, value in sim_scores.items() if value == max_score - ] - - # Finally, transfer all video models with a high enough similarity - # to this video model into the sims dictionary - if max_score >= self._SIMILARITY_THRESHOLD: - sims[index] = [] - for vm_index in vms_with_max_score: - sims[index].append(vm_index) - - index += 1 - - # Maps video model index to boolean, indicates if we have already checked - # this video model's similarity array so that we don't have infinite recursion - already_accessed: Dict[int, bool] = {} - - # Recursively search video_model[vm_index]'s similarity matches to find the - # earliest and latest video model in the cluster (start and end) - def _find_start_and_end(vm_index: int) -> Tuple[int, int]: - close_matches = sims[vm_index] - first_vm, last_vm = min(close_matches), max(close_matches) - first_vm, last_vm = min(vm_index, first_vm), max(vm_index, last_vm) - - if not already_accessed.get(vm_index, None): - already_accessed[vm_index] = True - for close_match in close_matches: - if close_match in sims: - if vm_index in sims[close_match]: - s, e = _find_start_and_end(close_match) - first_vm = min(s, first_vm) - last_vm = max(e, last_vm) - - return first_vm, last_vm - - # Add the video model cluster results into a set - clusters = set() - for vm_index in sims: - clusters.add(_find_start_and_end(vm_index)) - - # Filter the set to include only non-subset intervals - filtered_clusters = set() - for interval in clusters: - start, end = interval[0], interval[1] - is_subset = any( - start >= other_start and end <= other_end - for other_start, other_end in clusters - if interval != (other_start, other_end) - ) - if not is_subset: - filtered_clusters.add(interval) - - # Sort these clusters into a list, sorted using the first element of the - # tuple (index of video model in the cluster with earliest start time) - sorted_clusters = sorted(filtered_clusters, key=lambda x: x[0]) - - # Merge any overlapping clusters into one big cluster - def _merge_overlapping_clusters( - array: List[Tuple[int, int]], - ) -> List[Tuple[int, int]]: - if len(array) <= 1: - return array - - def _merge( - curr: Tuple[int, int], rest: List[Tuple[int, int]] - ) -> List[Tuple[int, int]]: - if curr[1] >= rest[0][0]: - return [(curr[0], rest[0][1])] + rest[1:] - return [curr] + rest - - return _merge(array[0], _merge_overlapping_clusters(array[1:])) - - merged_clusters = _merge_overlapping_clusters(sorted_clusters) - - return merged_clusters diff --git a/libs/experimental/langchain_experimental/video_captioning/services/combine_service.py b/libs/experimental/langchain_experimental/video_captioning/services/combine_service.py deleted file mode 100644 index fee94129cb269..0000000000000 --- a/libs/experimental/langchain_experimental/video_captioning/services/combine_service.py +++ /dev/null @@ -1,141 +0,0 @@ -from typing import Dict, List, Optional, Tuple - -from langchain.chains.llm import LLMChain -from langchain.schema.language_model import BaseLanguageModel -from langchain_core.callbacks.manager import CallbackManagerForChainRun - -from langchain_experimental.video_captioning.models import ( - AudioModel, - CaptionModel, - VideoModel, -) -from langchain_experimental.video_captioning.prompts import ( - VALIDATE_AND_ADJUST_DESCRIPTION_PROMPT, -) - - -class CombineProcessor: - def __init__( - self, llm: BaseLanguageModel, verbose: bool = True, char_limit: int = 20 - ): - self.llm = llm - self.verbose = verbose - - # Adjust as needed. Be careful adjusting it too low because OpenAI may - # produce unwanted output - self._CHAR_LIMIT = char_limit - - def process( - self, - video_models: List[VideoModel], - audio_models: List[AudioModel], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> List[CaptionModel]: - caption_models = [] - audio_index = 0 - - for video_model in video_models: - while audio_index < len(audio_models): - audio_model = audio_models[audio_index] - overlap_start, overlap_end = self._check_overlap( - video_model, audio_model - ) - - if overlap_start == -1: - if audio_model.start_time <= video_model.start_time: - caption_models.append( - CaptionModel.from_audio_model(audio_model) - ) - audio_index += 1 - else: - break - else: - self._handle_overlap( - caption_models, - video_model, - audio_model, - overlap_start, - overlap_end, - ) - - # Update audio model or pop if it's fully used - if audio_model.end_time <= overlap_end: - audio_index += 1 - else: - audio_model.start_time = overlap_end - - caption_models.append(CaptionModel.from_video_model(video_model)) - - # Add remaining audio models - for i in range(audio_index, len(audio_models)): - caption_models.append(CaptionModel.from_audio_model(audio_models[i])) - - return caption_models - - @staticmethod - def _check_overlap( - video_model: VideoModel, audio_model: AudioModel - ) -> Tuple[int, int]: - overlap_start = max(audio_model.start_time, video_model.start_time) - overlap_end = min(audio_model.end_time, video_model.end_time) - if overlap_start < overlap_end: - return overlap_start, overlap_end - return -1, -1 - - def _handle_overlap( - self, - caption_models: List[CaptionModel], - video_model: VideoModel, - audio_model: AudioModel, - overlap_start: int, - overlap_end: int, - ) -> None: - # Handle non-overlapping part - if video_model.start_time < overlap_start: - caption_models.append( - CaptionModel.from_video_model( - VideoModel( - video_model.start_time, - overlap_start, - video_model.image_description, - ) - ) - ) - video_model.start_time = overlap_start - - # Handle the combined caption during overlap - caption_text = self._validate_and_adjust_description(audio_model, video_model) - subtitle_text = audio_model.subtitle_text - caption_models.append( - CaptionModel.from_video_model( - VideoModel(overlap_start, overlap_end, caption_text) - ).add_subtitle_text(subtitle_text) - ) - - # Update video model start time for remaining part - if video_model.end_time > overlap_end: - video_model.start_time = overlap_end - - def _validate_and_adjust_description( - self, - audio_model: AudioModel, - video_model: VideoModel, - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> str: - conversation = LLMChain( - llm=self.llm, - prompt=VALIDATE_AND_ADJUST_DESCRIPTION_PROMPT, - verbose=True, - callbacks=run_manager.get_child() if run_manager else None, - ) - # Get response from OpenAI using LLMChain - response: Dict[str, str] = conversation( - { - "limit": self._CHAR_LIMIT, - "subtitle": audio_model.subtitle_text, - "description": video_model.image_description, - } - ) - - # Take out the Result: part of the response - return response["text"].replace("Result:", "").strip() diff --git a/libs/experimental/langchain_experimental/video_captioning/services/image_service.py b/libs/experimental/langchain_experimental/video_captioning/services/image_service.py deleted file mode 100644 index 551499222c9e8..0000000000000 --- a/libs/experimental/langchain_experimental/video_captioning/services/image_service.py +++ /dev/null @@ -1,111 +0,0 @@ -from typing import List, Optional - -import numpy as np -from langchain_community.document_loaders import ImageCaptionLoader -from langchain_core.callbacks import CallbackManagerForChainRun - -from langchain_experimental.video_captioning.models import VideoModel - - -class ImageProcessor: - _SAMPLES_PER_SECOND: int = 4 - - def __init__(self, frame_skip: int = -1, threshold: int = 3000000) -> None: - self.threshold = threshold - self.frame_skip = frame_skip - - def process( - self, - video_file_path: str, - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> list: - return self._extract_frames(video_file_path) - - def _extract_frames(self, video_file_path: str) -> list: - try: - import cv2 - from cv2.typing import MatLike - except ImportError as e: - raise ImportError( - "Unable to import cv2, please install it with " - "`pip install -U opencv-python`" - ) from e - video_models: List[VideoModel] = [] - - def _add_model(start_time: int, end_time: int) -> None: - middle_frame_time = start_time / end_time - cap.set(cv2.CAP_PROP_POS_MSEC, middle_frame_time) - - # Convert the frame to bytes - _, encoded_frame = cv2.imencode(".jpg", frame) - notable_frame_bytes = encoded_frame.tobytes() - - cap.set(cv2.CAP_PROP_POS_MSEC, end_time) - - # Create an instance of the ImageCaptionLoader - loader = ImageCaptionLoader(images=notable_frame_bytes) - - # Load captions for the images - list_docs = loader.load() - - video_model = VideoModel( - start_time, - end_time, - list_docs[len(list_docs) - 1].page_content.replace("[SEP]", "").strip(), - ) - video_models.append(video_model) - - def _is_notable_frame(frame1: MatLike, frame2: MatLike, threshold: int) -> bool: - # Convert frames to grayscale - gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY) - gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY) - - # Compute absolute difference between frames - frame_diff = cv2.absdiff(gray1, gray2) - - # Apply threshold to identify notable differences - _, thresholded_diff = cv2.threshold(frame_diff, 30, 255, cv2.THRESH_BINARY) - - # Count the number of white pixels (indicating differences) - num_diff_pixels = np.sum(thresholded_diff) - - return num_diff_pixels > threshold - - # Open the video file - cap = cv2.VideoCapture(video_file_path) - - if self.frame_skip == -1: - self.frame_skip = int(cap.get(cv2.CAP_PROP_FPS)) // self._SAMPLES_PER_SECOND - - # Read the first frame - ret, prev_frame = cap.read() - - # Loop through the video frames - start_time = 0 - end_time = 0 - - while True: - # Read the next frame - ret, frame = cap.read() - if not ret: - break # Break the loop if there are no more frames - - # Check if the current frame is notable - if _is_notable_frame(prev_frame, frame, self.threshold): - end_time = int(cap.get(cv2.CAP_PROP_POS_MSEC)) - _add_model(start_time, end_time) - start_time = end_time - - # Update the previous frame - prev_frame = frame.copy() - - # Increment the frame position by the skip value - cap.set( - cv2.CAP_PROP_POS_FRAMES, - cap.get(cv2.CAP_PROP_POS_FRAMES) + self.frame_skip, - ) - - # Release the video capture object - cap.release() - - return video_models diff --git a/libs/experimental/langchain_experimental/video_captioning/services/srt_service.py b/libs/experimental/langchain_experimental/video_captioning/services/srt_service.py deleted file mode 100644 index 4b094904005fc..0000000000000 --- a/libs/experimental/langchain_experimental/video_captioning/services/srt_service.py +++ /dev/null @@ -1,14 +0,0 @@ -from typing import List - -from langchain_experimental.video_captioning.models import CaptionModel - - -class SRTProcessor: - @staticmethod - def process(caption_models: List[CaptionModel]) -> str: - """Generates the full SRT content from a list of caption models.""" - srt_entries = [] - for index, model in enumerate(caption_models, start=1): - srt_entries.append(model.to_srt_entry(index)) - - return "\n".join(srt_entries) diff --git a/libs/experimental/poetry.lock b/libs/experimental/poetry.lock deleted file mode 100644 index bf760ccb35d92..0000000000000 --- a/libs/experimental/poetry.lock +++ /dev/null @@ -1,3808 +0,0 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. - -[[package]] -name = "aiohappyeyeballs" -version = "2.4.0" -description = "Happy Eyeballs for asyncio" -optional = false -python-versions = ">=3.8" -files = [ - {file = "aiohappyeyeballs-2.4.0-py3-none-any.whl", hash = "sha256:7ce92076e249169a13c2f49320d1967425eaf1f407522d707d59cac7628d62bd"}, - {file = "aiohappyeyeballs-2.4.0.tar.gz", hash = "sha256:55a1714f084e63d49639800f95716da97a1f173d46a16dfcfda0016abb93b6b2"}, -] - -[[package]] -name = "aiohttp" -version = "3.10.6" -description = "Async http client/server framework (asyncio)" -optional = false -python-versions = ">=3.8" -files = [ - {file = "aiohttp-3.10.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:682836fc672972cc3101cc9e30d49c5f7e8f1d010478d46119fe725a4545acfd"}, - {file = "aiohttp-3.10.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:289fa8a20018d0d5aa9e4b35d899bd51bcb80f0d5f365d9a23e30dac3b79159b"}, - {file = "aiohttp-3.10.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8617c96a20dd57e7e9d398ff9d04f3d11c4d28b1767273a5b1a018ada5a654d3"}, - {file = "aiohttp-3.10.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdbeff1b062751c2a2a55b171f7050fb7073633c699299d042e962aacdbe1a07"}, - {file = "aiohttp-3.10.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ea35d849cdd4a9268f910bff4497baebbc1aa3f2f625fd8ccd9ac99c860c621"}, - {file = "aiohttp-3.10.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:473961b3252f3b949bb84873d6e268fb6d8aa0ccc6eb7404fa58c76a326bb8e1"}, - {file = "aiohttp-3.10.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d2665c5df629eb2f981dab244c01bfa6cdc185f4ffa026639286c4d56fafb54"}, - {file = "aiohttp-3.10.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25d92f794f1332f656e3765841fc2b7ad5c26c3f3d01e8949eeb3495691cf9f4"}, - {file = "aiohttp-3.10.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9bd6b2033993d5ae80883bb29b83fb2b432270bbe067c2f53cc73bb57c46065f"}, - {file = "aiohttp-3.10.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d7f408c43f5e75ea1edc152fb375e8f46ef916f545fb66d4aebcbcfad05e2796"}, - {file = "aiohttp-3.10.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:cf8b8560aa965f87bf9c13bf9fed7025993a155ca0ce8422da74bf46d18c2f5f"}, - {file = "aiohttp-3.10.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14477c4e52e2f17437b99893fd220ffe7d7ee41df5ebf931a92b8ca82e6fd094"}, - {file = "aiohttp-3.10.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fb138fbf9f53928e779650f5ed26d0ea1ed8b2cab67f0ea5d63afa09fdc07593"}, - {file = "aiohttp-3.10.6-cp310-cp310-win32.whl", hash = "sha256:9843d683b8756971797be171ead21511d2215a2d6e3c899c6e3107fbbe826791"}, - {file = "aiohttp-3.10.6-cp310-cp310-win_amd64.whl", hash = "sha256:f8b8e49fe02f744d38352daca1dbef462c3874900bd8166516f6ea8e82b5aacf"}, - {file = "aiohttp-3.10.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f52e54fd776ad0da1006708762213b079b154644db54bcfc62f06eaa5b896402"}, - {file = "aiohttp-3.10.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:995ab1a238fd0d19dc65f2d222e5eb064e409665c6426a3e51d5101c1979ee84"}, - {file = "aiohttp-3.10.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0749c4d5a08a802dd66ecdf59b2df4d76b900004017468a7bb736c3b5a3dd902"}, - {file = "aiohttp-3.10.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e05b39158f2af0e2438cc2075cfc271f4ace0c3cc4a81ec95b27a0432e161951"}, - {file = "aiohttp-3.10.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a9f196c970db2dcde4f24317e06615363349dc357cf4d7a3b0716c20ac6d7bcd"}, - {file = "aiohttp-3.10.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:47647c8af04a70e07a2462931b0eba63146a13affa697afb4ecbab9d03a480ce"}, - {file = "aiohttp-3.10.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:669c0efe7e99f6d94d63274c06344bd0e9c8daf184ce5602a29bc39e00a18720"}, - {file = "aiohttp-3.10.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9721cdd83a994225352ca84cd537760d41a9da3c0eacb3ff534747ab8fba6d0"}, - {file = "aiohttp-3.10.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0b82c8ebed66ce182893e7c0b6b60ba2ace45b1df104feb52380edae266a4850"}, - {file = "aiohttp-3.10.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b169f8e755e541b72e714b89a831b315bbe70db44e33fead28516c9e13d5f931"}, - {file = "aiohttp-3.10.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:0be3115753baf8b4153e64f9aa7bf6c0c64af57979aa900c31f496301b374570"}, - {file = "aiohttp-3.10.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e1f80cd17d81a404b6e70ef22bfe1870bafc511728397634ad5f5efc8698df56"}, - {file = "aiohttp-3.10.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6419728b08fb6380c66a470d2319cafcec554c81780e2114b7e150329b9a9a7f"}, - {file = "aiohttp-3.10.6-cp311-cp311-win32.whl", hash = "sha256:bd294dcdc1afdc510bb51d35444003f14e327572877d016d576ac3b9a5888a27"}, - {file = "aiohttp-3.10.6-cp311-cp311-win_amd64.whl", hash = "sha256:bf861da9a43d282d6dd9dcd64c23a0fccf2c5aa5cd7c32024513c8c79fb69de3"}, - {file = "aiohttp-3.10.6-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:2708baccdc62f4b1251e59c2aac725936a900081f079b88843dabcab0feeeb27"}, - {file = "aiohttp-3.10.6-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7475da7a5e2ccf1a1c86c8fee241e277f4874c96564d06f726d8df8e77683ef7"}, - {file = "aiohttp-3.10.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:02108326574ff60267b7b35b17ac5c0bbd0008ccb942ce4c48b657bb90f0b8aa"}, - {file = "aiohttp-3.10.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:029a019627b37fa9eac5c75cc54a6bb722c4ebbf5a54d8c8c0fb4dd8facf2702"}, - {file = "aiohttp-3.10.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a637d387db6fdad95e293fab5433b775fd104ae6348d2388beaaa60d08b38c4"}, - {file = "aiohttp-3.10.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1a16f3fc1944c61290d33c88dc3f09ba62d159b284c38c5331868425aca426"}, - {file = "aiohttp-3.10.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81b292f37969f9cc54f4643f0be7dacabf3612b3b4a65413661cf6c350226787"}, - {file = "aiohttp-3.10.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0754690a3a26e819173a34093798c155bafb21c3c640bff13be1afa1e9d421f9"}, - {file = "aiohttp-3.10.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:164ecd32e65467d86843dbb121a6666c3deb23b460e3f8aefdcaacae79eb718a"}, - {file = "aiohttp-3.10.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:438c5863feb761f7ca3270d48c292c334814459f61cc12bab5ba5b702d7c9e56"}, - {file = "aiohttp-3.10.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ba18573bb1de1063d222f41de64a0d3741223982dcea863b3f74646faf618ec7"}, - {file = "aiohttp-3.10.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:c82a94ddec996413a905f622f3da02c4359952aab8d817c01cf9915419525e95"}, - {file = "aiohttp-3.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:92351aa5363fc3c1f872ca763f86730ced32b01607f0c9662b1fa711087968d0"}, - {file = "aiohttp-3.10.6-cp312-cp312-win32.whl", hash = "sha256:3e15e33bfc73fa97c228f72e05e8795e163a693fd5323549f49367c76a6e5883"}, - {file = "aiohttp-3.10.6-cp312-cp312-win_amd64.whl", hash = "sha256:fe517113fe4d35d9072b826c3e147d63c5f808ca8167d450b4f96c520c8a1d8d"}, - {file = "aiohttp-3.10.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:482f74057ea13d387a7549d7a7ecb60e45146d15f3e58a2d93a0ad2d5a8457cd"}, - {file = "aiohttp-3.10.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:03fa40d1450ee5196e843315ddf74a51afc7e83d489dbfc380eecefea74158b1"}, - {file = "aiohttp-3.10.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1e52e59ed5f4cc3a3acfe2a610f8891f216f486de54d95d6600a2c9ba1581f4d"}, - {file = "aiohttp-3.10.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b3935a22c9e41a8000d90588bed96cf395ef572dbb409be44c6219c61d900d"}, - {file = "aiohttp-3.10.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4bef1480ee50f75abcfcb4b11c12de1005968ca9d0172aec4a5057ba9f2b644f"}, - {file = "aiohttp-3.10.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:671745ea7db19693ce867359d503772177f0b20fa8f6ee1e74e00449f4c4151d"}, - {file = "aiohttp-3.10.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b50b367308ca8c12e0b50cba5773bc9abe64c428d3fd2bbf5cd25aab37c77bf"}, - {file = "aiohttp-3.10.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a504d7cdb431a777d05a124fd0b21efb94498efa743103ea01b1e3136d2e4fb"}, - {file = "aiohttp-3.10.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:66bc81361131763660b969132a22edce2c4d184978ba39614e8f8f95db5c95f8"}, - {file = "aiohttp-3.10.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:27cf19a38506e2e9f12fc17e55f118f04897b0a78537055d93a9de4bf3022e3d"}, - {file = "aiohttp-3.10.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3468b39f977a11271517c6925b226720e148311039a380cc9117b1e2258a721f"}, - {file = "aiohttp-3.10.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:9d26da22a793dfd424be1050712a70c0afd96345245c29aced1e35dbace03413"}, - {file = "aiohttp-3.10.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:844d48ff9173d0b941abed8b2ea6a412f82b56d9ab1edb918c74000c15839362"}, - {file = "aiohttp-3.10.6-cp313-cp313-win32.whl", hash = "sha256:2dd56e3c43660ed3bea67fd4c5025f1ac1f9ecf6f0b991a6e5efe2e678c490c5"}, - {file = "aiohttp-3.10.6-cp313-cp313-win_amd64.whl", hash = "sha256:c91781d969fbced1993537f45efe1213bd6fccb4b37bfae2a026e20d6fbed206"}, - {file = "aiohttp-3.10.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4407a80bca3e694f2d2a523058e20e1f9f98a416619e04f6dc09dc910352ac8b"}, - {file = "aiohttp-3.10.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1cb045ec5961f51af3e2c08cd6fe523f07cc6e345033adee711c49b7b91bb954"}, - {file = "aiohttp-3.10.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4fabdcdc781a36b8fd7b2ca9dea8172f29a99e11d00ca0f83ffeb50958da84a1"}, - {file = "aiohttp-3.10.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79a9f42efcc2681790595ab3d03c0e52d01edc23a0973ea09f0dc8d295e12b8e"}, - {file = "aiohttp-3.10.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cca776a440795db437d82c07455761c85bbcf3956221c3c23b8c93176c278ce7"}, - {file = "aiohttp-3.10.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5582de171f0898139cf51dd9fcdc79b848e28d9abd68e837f0803fc9f30807b1"}, - {file = "aiohttp-3.10.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:370e2d47575c53c817ee42a18acc34aad8da4dbdaac0a6c836d58878955f1477"}, - {file = "aiohttp-3.10.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:444d1704e2af6b30766debed9be8a795958029e552fe77551355badb1944012c"}, - {file = "aiohttp-3.10.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:40271a2a375812967401c9ca8077de9368e09a43a964f4dce0ff603301ec9358"}, - {file = "aiohttp-3.10.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f3af26f86863fad12e25395805bb0babbd49d512806af91ec9708a272b696248"}, - {file = "aiohttp-3.10.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4752df44df48fd42b80f51d6a97553b482cda1274d9dc5df214a3a1aa5d8f018"}, - {file = "aiohttp-3.10.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:2cd5290ab66cfca2f90045db2cc6434c1f4f9fbf97c9f1c316e785033782e7d2"}, - {file = "aiohttp-3.10.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3427031064b0d5c95647e6369c4aa3c556402f324a3e18107cb09517abe5f962"}, - {file = "aiohttp-3.10.6-cp38-cp38-win32.whl", hash = "sha256:614fc21e86adc28e4165a6391f851a6da6e9cbd7bb232d0df7718b453a89ee98"}, - {file = "aiohttp-3.10.6-cp38-cp38-win_amd64.whl", hash = "sha256:58c5d7318a136a3874c78717dd6de57519bc64f6363c5827c2b1cb775bea71dd"}, - {file = "aiohttp-3.10.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5db26bbca8e7968c4c977a0c640e0b9ce7224e1f4dcafa57870dc6ee28e27de6"}, - {file = "aiohttp-3.10.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3fb4216e3ec0dbc01db5ba802f02ed78ad8f07121be54eb9e918448cc3f61b7c"}, - {file = "aiohttp-3.10.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a976ef488f26e224079deb3d424f29144c6d5ba4ded313198169a8af8f47fb82"}, - {file = "aiohttp-3.10.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a86610174de8a85a920e956e2d4f9945e7da89f29a00e95ac62a4a414c4ef4e"}, - {file = "aiohttp-3.10.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:217791c6a399cc4f2e6577bb44344cba1f5714a2aebf6a0bea04cfa956658284"}, - {file = "aiohttp-3.10.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ba3662d41abe2eab0eeec7ee56f33ef4e0b34858f38abf24377687f9e1fb00a5"}, - {file = "aiohttp-3.10.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4dfa5ad4bce9ca30a76117fbaa1c1decf41ebb6c18a4e098df44298941566f9"}, - {file = "aiohttp-3.10.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0009258e97502936d3bd5bf2ced15769629097d0abb81e6495fba1047824fe0"}, - {file = "aiohttp-3.10.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0a75d5c9fb4f06c41d029ae70ad943c3a844c40c0a769d12be4b99b04f473d3d"}, - {file = "aiohttp-3.10.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8198b7c002aae2b40b2d16bfe724b9a90bcbc9b78b2566fc96131ef4e382574d"}, - {file = "aiohttp-3.10.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4611db8c907f90fe86be112efdc2398cd7b4c8eeded5a4f0314b70fdea8feab0"}, - {file = "aiohttp-3.10.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ff99ae06eef85c7a565854826114ced72765832ee16c7e3e766c5e4c5b98d20e"}, - {file = "aiohttp-3.10.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7641920bdcc7cd2d3ddfb8bb9133a6c9536b09dbd49490b79e125180b2d25b93"}, - {file = "aiohttp-3.10.6-cp39-cp39-win32.whl", hash = "sha256:e2e7d5591ea868d5ec82b90bbeb366a198715672841d46281b623e23079593db"}, - {file = "aiohttp-3.10.6-cp39-cp39-win_amd64.whl", hash = "sha256:b504c08c45623bf5c7ca41be380156d925f00199b3970efd758aef4a77645feb"}, - {file = "aiohttp-3.10.6.tar.gz", hash = "sha256:d2578ef941be0c2ba58f6f421a703527d08427237ed45ecb091fed6f83305336"}, -] - -[package.dependencies] -aiohappyeyeballs = ">=2.3.0" -aiosignal = ">=1.1.2" -async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""} -attrs = ">=17.3.0" -frozenlist = ">=1.1.1" -multidict = ">=4.5,<7.0" -yarl = ">=1.12.0,<2.0" - -[package.extras] -speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] - -[[package]] -name = "aiosignal" -version = "1.3.1" -description = "aiosignal: a list of registered asynchronous callbacks" -optional = false -python-versions = ">=3.7" -files = [ - {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, - {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, -] - -[package.dependencies] -frozenlist = ">=1.1.0" - -[[package]] -name = "annotated-types" -version = "0.7.0" -description = "Reusable constraint types to use with typing.Annotated" -optional = false -python-versions = ">=3.8" -files = [ - {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, - {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, -] - -[[package]] -name = "anyio" -version = "4.6.0" -description = "High level compatibility layer for multiple asynchronous event loop implementations" -optional = false -python-versions = ">=3.9" -files = [ - {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"}, - {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"}, -] - -[package.dependencies] -exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} -idna = ">=2.8" -sniffio = ">=1.1" -typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} - -[package.extras] -doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] -trio = ["trio (>=0.26.1)"] - -[[package]] -name = "appnope" -version = "0.1.4" -description = "Disable App Nap on macOS >= 10.9" -optional = false -python-versions = ">=3.6" -files = [ - {file = "appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c"}, - {file = "appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee"}, -] - -[[package]] -name = "argon2-cffi" -version = "23.1.0" -description = "Argon2 for Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "argon2_cffi-23.1.0-py3-none-any.whl", hash = "sha256:c670642b78ba29641818ab2e68bd4e6a78ba53b7eff7b4c3815ae16abf91c7ea"}, - {file = "argon2_cffi-23.1.0.tar.gz", hash = "sha256:879c3e79a2729ce768ebb7d36d4609e3a78a4ca2ec3a9f12286ca057e3d0db08"}, -] - -[package.dependencies] -argon2-cffi-bindings = "*" - -[package.extras] -dev = ["argon2-cffi[tests,typing]", "tox (>4)"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-copybutton", "sphinx-notfound-page"] -tests = ["hypothesis", "pytest"] -typing = ["mypy"] - -[[package]] -name = "argon2-cffi-bindings" -version = "21.2.0" -description = "Low-level CFFI bindings for Argon2" -optional = false -python-versions = ">=3.6" -files = [ - {file = "argon2-cffi-bindings-21.2.0.tar.gz", hash = "sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9524464572e12979364b7d600abf96181d3541da11e23ddf565a32e70bd4dc0d"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b746dba803a79238e925d9046a63aa26bf86ab2a2fe74ce6b009a1c3f5c8f2ae"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58ed19212051f49a523abb1dbe954337dc82d947fb6e5a0da60f7c8471a8476c"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:bd46088725ef7f58b5a1ef7ca06647ebaf0eb4baff7d1d0d177c6cc8744abd86"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_i686.whl", hash = "sha256:8cd69c07dd875537a824deec19f978e0f2078fdda07fd5c42ac29668dda5f40f"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f1152ac548bd5b8bcecfb0b0371f082037e47128653df2e8ba6e914d384f3c3e"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win32.whl", hash = "sha256:603ca0aba86b1349b147cab91ae970c63118a0f30444d4bc80355937c950c082"}, - {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:b2ef1c30440dbbcba7a5dc3e319408b59676e2e039e2ae11a8775ecf482b192f"}, - {file = "argon2_cffi_bindings-21.2.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e415e3f62c8d124ee16018e491a009937f8cf7ebf5eb430ffc5de21b900dad93"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3e385d1c39c520c08b53d63300c3ecc28622f076f4c2b0e6d7e796e9f6502194"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3e3cc67fdb7d82c4718f19b4e7a87123caf8a93fde7e23cf66ac0337d3cb3f"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a22ad9800121b71099d0fb0a65323810a15f2e292f2ba450810a7316e128ee5"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9f8b450ed0547e3d473fdc8612083fd08dd2120d6ac8f73828df9b7d45bb351"}, - {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:93f9bf70084f97245ba10ee36575f0c3f1e7d7724d67d8e5b08e61787c320ed7"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3b9ef65804859d335dc6b31582cad2c5166f0c3e7975f324d9ffaa34ee7e6583"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4966ef5848d820776f5f562a7d45fdd70c2f330c961d0d745b784034bd9f48d"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ef543a89dee4db46a1a6e206cd015360e5a75822f76df533845c3cbaf72670"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2937d286e2ad0cc79a7087d3c272832865f779430e0cc2b4f3718d3159b0cb"}, - {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a"}, -] - -[package.dependencies] -cffi = ">=1.0.1" - -[package.extras] -dev = ["cogapp", "pre-commit", "pytest", "wheel"] -tests = ["pytest"] - -[[package]] -name = "arrow" -version = "1.3.0" -description = "Better dates & times for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "arrow-1.3.0-py3-none-any.whl", hash = "sha256:c728b120ebc00eb84e01882a6f5e7927a53960aa990ce7dd2b10f39005a67f80"}, - {file = "arrow-1.3.0.tar.gz", hash = "sha256:d4540617648cb5f895730f1ad8c82a65f2dad0166f57b75f3ca54759c4d67a85"}, -] - -[package.dependencies] -python-dateutil = ">=2.7.0" -types-python-dateutil = ">=2.8.10" - -[package.extras] -doc = ["doc8", "sphinx (>=7.0.0)", "sphinx-autobuild", "sphinx-autodoc-typehints", "sphinx_rtd_theme (>=1.3.0)"] -test = ["dateparser (==1.*)", "pre-commit", "pytest", "pytest-cov", "pytest-mock", "pytz (==2021.1)", "simplejson (==3.*)"] - -[[package]] -name = "asttokens" -version = "2.4.1" -description = "Annotate AST trees with source code positions" -optional = false -python-versions = "*" -files = [ - {file = "asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24"}, - {file = "asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0"}, -] - -[package.dependencies] -six = ">=1.12.0" - -[package.extras] -astroid = ["astroid (>=1,<2)", "astroid (>=2,<4)"] -test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"] - -[[package]] -name = "async-lru" -version = "2.0.4" -description = "Simple LRU cache for asyncio" -optional = false -python-versions = ">=3.8" -files = [ - {file = "async-lru-2.0.4.tar.gz", hash = "sha256:b8a59a5df60805ff63220b2a0c5b5393da5521b113cd5465a44eb037d81a5627"}, - {file = "async_lru-2.0.4-py3-none-any.whl", hash = "sha256:ff02944ce3c288c5be660c42dbcca0742b32c3b279d6dceda655190240b99224"}, -] - -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} - -[[package]] -name = "async-timeout" -version = "4.0.3" -description = "Timeout context manager for asyncio programs" -optional = false -python-versions = ">=3.7" -files = [ - {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, - {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, -] - -[[package]] -name = "attrs" -version = "24.2.0" -description = "Classes Without Boilerplate" -optional = false -python-versions = ">=3.7" -files = [ - {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, - {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, -] - -[package.extras] -benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] -tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] - -[[package]] -name = "babel" -version = "2.16.0" -description = "Internationalization utilities" -optional = false -python-versions = ">=3.8" -files = [ - {file = "babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b"}, - {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"}, -] - -[package.extras] -dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] - -[[package]] -name = "beautifulsoup4" -version = "4.12.3" -description = "Screen-scraping library" -optional = false -python-versions = ">=3.6.0" -files = [ - {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"}, - {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"}, -] - -[package.dependencies] -soupsieve = ">1.2" - -[package.extras] -cchardet = ["cchardet"] -chardet = ["chardet"] -charset-normalizer = ["charset-normalizer"] -html5lib = ["html5lib"] -lxml = ["lxml"] - -[[package]] -name = "bleach" -version = "6.1.0" -description = "An easy safelist-based HTML-sanitizing tool." -optional = false -python-versions = ">=3.8" -files = [ - {file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"}, - {file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"}, -] - -[package.dependencies] -six = ">=1.9.0" -webencodings = "*" - -[package.extras] -css = ["tinycss2 (>=1.1.0,<1.3)"] - -[[package]] -name = "certifi" -version = "2024.8.30" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -files = [ - {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, - {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, -] - -[[package]] -name = "cffi" -version = "1.17.1" -description = "Foreign Function Interface for Python calling C code." -optional = false -python-versions = ">=3.8" -files = [ - {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, - {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, - {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, - {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, - {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, - {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, - {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, - {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, - {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, - {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, - {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, - {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, - {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, - {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, - {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, - {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, - {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, - {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, - {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, - {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, - {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, - {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, - {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, - {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, - {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, - {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, - {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, - {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, -] - -[package.dependencies] -pycparser = "*" - -[[package]] -name = "charset-normalizer" -version = "3.3.2" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, -] - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[[package]] -name = "comm" -version = "0.2.2" -description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." -optional = false -python-versions = ">=3.8" -files = [ - {file = "comm-0.2.2-py3-none-any.whl", hash = "sha256:e6fb86cb70ff661ee8c9c14e7d36d6de3b4066f1441be4063df9c5009f0a64d3"}, - {file = "comm-0.2.2.tar.gz", hash = "sha256:3fd7a84065306e07bea1773df6eb8282de51ba82f77c72f9c85716ab11fe980e"}, -] - -[package.dependencies] -traitlets = ">=4" - -[package.extras] -test = ["pytest"] - -[[package]] -name = "dataclasses-json" -version = "0.6.7" -description = "Easily serialize dataclasses to and from JSON." -optional = false -python-versions = "<4.0,>=3.7" -files = [ - {file = "dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a"}, - {file = "dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0"}, -] - -[package.dependencies] -marshmallow = ">=3.18.0,<4.0.0" -typing-inspect = ">=0.4.0,<1" - -[[package]] -name = "debugpy" -version = "1.8.6" -description = "An implementation of the Debug Adapter Protocol for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "debugpy-1.8.6-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:30f467c5345d9dfdcc0afdb10e018e47f092e383447500f125b4e013236bf14b"}, - {file = "debugpy-1.8.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d73d8c52614432f4215d0fe79a7e595d0dd162b5c15233762565be2f014803b"}, - {file = "debugpy-1.8.6-cp310-cp310-win32.whl", hash = "sha256:e3e182cd98eac20ee23a00653503315085b29ab44ed66269482349d307b08df9"}, - {file = "debugpy-1.8.6-cp310-cp310-win_amd64.whl", hash = "sha256:e3a82da039cfe717b6fb1886cbbe5c4a3f15d7df4765af857f4307585121c2dd"}, - {file = "debugpy-1.8.6-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:67479a94cf5fd2c2d88f9615e087fcb4fec169ec780464a3f2ba4a9a2bb79955"}, - {file = "debugpy-1.8.6-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fb8653f6cbf1dd0a305ac1aa66ec246002145074ea57933978346ea5afdf70b"}, - {file = "debugpy-1.8.6-cp311-cp311-win32.whl", hash = "sha256:cdaf0b9691879da2d13fa39b61c01887c34558d1ff6e5c30e2eb698f5384cd43"}, - {file = "debugpy-1.8.6-cp311-cp311-win_amd64.whl", hash = "sha256:43996632bee7435583952155c06881074b9a742a86cee74e701d87ca532fe833"}, - {file = "debugpy-1.8.6-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:db891b141fc6ee4b5fc6d1cc8035ec329cabc64bdd2ae672b4550c87d4ecb128"}, - {file = "debugpy-1.8.6-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:567419081ff67da766c898ccf21e79f1adad0e321381b0dfc7a9c8f7a9347972"}, - {file = "debugpy-1.8.6-cp312-cp312-win32.whl", hash = "sha256:c9834dfd701a1f6bf0f7f0b8b1573970ae99ebbeee68314116e0ccc5c78eea3c"}, - {file = "debugpy-1.8.6-cp312-cp312-win_amd64.whl", hash = "sha256:e4ce0570aa4aca87137890d23b86faeadf184924ad892d20c54237bcaab75d8f"}, - {file = "debugpy-1.8.6-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:df5dc9eb4ca050273b8e374a4cd967c43be1327eeb42bfe2f58b3cdfe7c68dcb"}, - {file = "debugpy-1.8.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a85707c6a84b0c5b3db92a2df685b5230dd8fb8c108298ba4f11dba157a615a"}, - {file = "debugpy-1.8.6-cp38-cp38-win32.whl", hash = "sha256:538c6cdcdcdad310bbefd96d7850be1cd46e703079cc9e67d42a9ca776cdc8a8"}, - {file = "debugpy-1.8.6-cp38-cp38-win_amd64.whl", hash = "sha256:22140bc02c66cda6053b6eb56dfe01bbe22a4447846581ba1dd6df2c9f97982d"}, - {file = "debugpy-1.8.6-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:c1cef65cffbc96e7b392d9178dbfd524ab0750da6c0023c027ddcac968fd1caa"}, - {file = "debugpy-1.8.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1e60bd06bb3cc5c0e957df748d1fab501e01416c43a7bdc756d2a992ea1b881"}, - {file = "debugpy-1.8.6-cp39-cp39-win32.whl", hash = "sha256:f7158252803d0752ed5398d291dee4c553bb12d14547c0e1843ab74ee9c31123"}, - {file = "debugpy-1.8.6-cp39-cp39-win_amd64.whl", hash = "sha256:3358aa619a073b620cd0d51d8a6176590af24abcc3fe2e479929a154bf591b51"}, - {file = "debugpy-1.8.6-py2.py3-none-any.whl", hash = "sha256:b48892df4d810eff21d3ef37274f4c60d32cdcafc462ad5647239036b0f0649f"}, - {file = "debugpy-1.8.6.zip", hash = "sha256:c931a9371a86784cee25dec8d65bc2dc7a21f3f1552e3833d9ef8f919d22280a"}, -] - -[[package]] -name = "decorator" -version = "5.1.1" -description = "Decorators for Humans" -optional = false -python-versions = ">=3.5" -files = [ - {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, - {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, -] - -[[package]] -name = "defusedxml" -version = "0.7.1" -description = "XML bomb protection for Python stdlib modules" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, - {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, -] - -[[package]] -name = "distro" -version = "1.9.0" -description = "Distro - an OS platform information API" -optional = false -python-versions = ">=3.6" -files = [ - {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, - {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, -] - -[[package]] -name = "exceptiongroup" -version = "1.2.2" -description = "Backport of PEP 654 (exception groups)" -optional = false -python-versions = ">=3.7" -files = [ - {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, - {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, -] - -[package.extras] -test = ["pytest (>=6)"] - -[[package]] -name = "executing" -version = "2.1.0" -description = "Get the currently executing AST node of a frame, and other information" -optional = false -python-versions = ">=3.8" -files = [ - {file = "executing-2.1.0-py2.py3-none-any.whl", hash = "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf"}, - {file = "executing-2.1.0.tar.gz", hash = "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab"}, -] - -[package.extras] -tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] - -[[package]] -name = "fastjsonschema" -version = "2.20.0" -description = "Fastest Python implementation of JSON schema" -optional = false -python-versions = "*" -files = [ - {file = "fastjsonschema-2.20.0-py3-none-any.whl", hash = "sha256:5875f0b0fa7a0043a91e93a9b8f793bcbbba9691e7fd83dca95c28ba26d21f0a"}, - {file = "fastjsonschema-2.20.0.tar.gz", hash = "sha256:3d48fc5300ee96f5d116f10fe6f28d938e6008f59a6a025c2649475b87f76a23"}, -] - -[package.extras] -devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"] - -[[package]] -name = "fqdn" -version = "1.5.1" -description = "Validates fully-qualified domain names against RFC 1123, so that they are acceptable to modern bowsers" -optional = false -python-versions = ">=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4, <4" -files = [ - {file = "fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014"}, - {file = "fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f"}, -] - -[[package]] -name = "frozenlist" -version = "1.4.1" -description = "A list-like structure which implements collections.abc.MutableSequence" -optional = false -python-versions = ">=3.8" -files = [ - {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, - {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, - {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, - {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, - {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, - {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, - {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, - {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, - {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, - {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, - {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, - {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, - {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, - {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, - {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, - {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, - {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, - {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, - {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, - {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, - {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, -] - -[[package]] -name = "greenlet" -version = "3.1.1" -description = "Lightweight in-process concurrent programming" -optional = false -python-versions = ">=3.7" -files = [ - {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617"}, - {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7"}, - {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6"}, - {file = "greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80"}, - {file = "greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70"}, - {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159"}, - {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e"}, - {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1"}, - {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383"}, - {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a"}, - {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511"}, - {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395"}, - {file = "greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39"}, - {file = "greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d"}, - {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79"}, - {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa"}, - {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441"}, - {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36"}, - {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9"}, - {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0"}, - {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942"}, - {file = "greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01"}, - {file = "greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1"}, - {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff"}, - {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a"}, - {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e"}, - {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4"}, - {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e"}, - {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1"}, - {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c"}, - {file = "greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761"}, - {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011"}, - {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13"}, - {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475"}, - {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b"}, - {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822"}, - {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01"}, - {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6"}, - {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291"}, - {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981"}, - {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803"}, - {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc"}, - {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de"}, - {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa"}, - {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af"}, - {file = "greenlet-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798"}, - {file = "greenlet-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef"}, - {file = "greenlet-3.1.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9"}, - {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111"}, - {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81"}, - {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba"}, - {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8"}, - {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1"}, - {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd"}, - {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7"}, - {file = "greenlet-3.1.1-cp38-cp38-win32.whl", hash = "sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef"}, - {file = "greenlet-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d"}, - {file = "greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3"}, - {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42"}, - {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f"}, - {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437"}, - {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145"}, - {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c"}, - {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e"}, - {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e"}, - {file = "greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c"}, - {file = "greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22"}, - {file = "greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467"}, -] - -[package.extras] -docs = ["Sphinx", "furo"] -test = ["objgraph", "psutil"] - -[[package]] -name = "h11" -version = "0.14.0" -description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -optional = false -python-versions = ">=3.7" -files = [ - {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, - {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, -] - -[[package]] -name = "httpcore" -version = "1.0.5" -description = "A minimal low-level HTTP client." -optional = false -python-versions = ">=3.8" -files = [ - {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, - {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, -] - -[package.dependencies] -certifi = "*" -h11 = ">=0.13,<0.15" - -[package.extras] -asyncio = ["anyio (>=4.0,<5.0)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.26.0)"] - -[[package]] -name = "httpx" -version = "0.27.2" -description = "The next generation HTTP client." -optional = false -python-versions = ">=3.8" -files = [ - {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, - {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, -] - -[package.dependencies] -anyio = "*" -certifi = "*" -httpcore = "==1.*" -idna = "*" -sniffio = "*" - -[package.extras] -brotli = ["brotli", "brotlicffi"] -cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "idna" -version = "3.10" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.6" -files = [ - {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, - {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, -] - -[package.extras] -all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] - -[[package]] -name = "importlib-metadata" -version = "8.5.0" -description = "Read metadata from Python packages" -optional = false -python-versions = ">=3.8" -files = [ - {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, - {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, -] - -[package.dependencies] -zipp = ">=3.20" - -[package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] -cover = ["pytest-cov"] -doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=2.2)"] -perf = ["ipython"] -test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] -type = ["pytest-mypy"] - -[[package]] -name = "iniconfig" -version = "2.0.0" -description = "brain-dead simple config-ini parsing" -optional = false -python-versions = ">=3.7" -files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, -] - -[[package]] -name = "ipykernel" -version = "6.29.5" -description = "IPython Kernel for Jupyter" -optional = false -python-versions = ">=3.8" -files = [ - {file = "ipykernel-6.29.5-py3-none-any.whl", hash = "sha256:afdb66ba5aa354b09b91379bac28ae4afebbb30e8b39510c9690afb7a10421b5"}, - {file = "ipykernel-6.29.5.tar.gz", hash = "sha256:f093a22c4a40f8828f8e330a9c297cb93dcab13bd9678ded6de8e5cf81c56215"}, -] - -[package.dependencies] -appnope = {version = "*", markers = "platform_system == \"Darwin\""} -comm = ">=0.1.1" -debugpy = ">=1.6.5" -ipython = ">=7.23.1" -jupyter-client = ">=6.1.12" -jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" -matplotlib-inline = ">=0.1" -nest-asyncio = "*" -packaging = "*" -psutil = "*" -pyzmq = ">=24" -tornado = ">=6.1" -traitlets = ">=5.4.0" - -[package.extras] -cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"] -pyqt5 = ["pyqt5"] -pyside6 = ["pyside6"] -test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.23.5)", "pytest-cov", "pytest-timeout"] - -[[package]] -name = "ipython" -version = "8.18.1" -description = "IPython: Productive Interactive Computing" -optional = false -python-versions = ">=3.9" -files = [ - {file = "ipython-8.18.1-py3-none-any.whl", hash = "sha256:e8267419d72d81955ec1177f8a29aaa90ac80ad647499201119e2f05e99aa397"}, - {file = "ipython-8.18.1.tar.gz", hash = "sha256:ca6f079bb33457c66e233e4580ebfc4128855b4cf6370dddd73842a9563e8a27"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -decorator = "*" -exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} -jedi = ">=0.16" -matplotlib-inline = "*" -pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""} -prompt-toolkit = ">=3.0.41,<3.1.0" -pygments = ">=2.4.0" -stack-data = "*" -traitlets = ">=5" -typing-extensions = {version = "*", markers = "python_version < \"3.10\""} - -[package.extras] -all = ["black", "curio", "docrepr", "exceptiongroup", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.22)", "pandas", "pickleshare", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio (<0.22)", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"] -black = ["black"] -doc = ["docrepr", "exceptiongroup", "ipykernel", "matplotlib", "pickleshare", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio (<0.22)", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"] -kernel = ["ipykernel"] -nbconvert = ["nbconvert"] -nbformat = ["nbformat"] -notebook = ["ipywidgets", "notebook"] -parallel = ["ipyparallel"] -qtconsole = ["qtconsole"] -test = ["pickleshare", "pytest (<7.1)", "pytest-asyncio (<0.22)", "testpath"] -test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.22)", "pandas", "pickleshare", "pytest (<7.1)", "pytest-asyncio (<0.22)", "testpath", "trio"] - -[[package]] -name = "ipywidgets" -version = "8.1.5" -description = "Jupyter interactive widgets" -optional = false -python-versions = ">=3.7" -files = [ - {file = "ipywidgets-8.1.5-py3-none-any.whl", hash = "sha256:3290f526f87ae6e77655555baba4f36681c555b8bdbbff430b70e52c34c86245"}, - {file = "ipywidgets-8.1.5.tar.gz", hash = "sha256:870e43b1a35656a80c18c9503bbf2d16802db1cb487eec6fab27d683381dde17"}, -] - -[package.dependencies] -comm = ">=0.1.3" -ipython = ">=6.1.0" -jupyterlab-widgets = ">=3.0.12,<3.1.0" -traitlets = ">=4.3.1" -widgetsnbextension = ">=4.0.12,<4.1.0" - -[package.extras] -test = ["ipykernel", "jsonschema", "pytest (>=3.6.0)", "pytest-cov", "pytz"] - -[[package]] -name = "isoduration" -version = "20.11.0" -description = "Operations with ISO 8601 durations" -optional = false -python-versions = ">=3.7" -files = [ - {file = "isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042"}, - {file = "isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9"}, -] - -[package.dependencies] -arrow = ">=0.15.0" - -[[package]] -name = "jedi" -version = "0.19.1" -description = "An autocompletion tool for Python that can be used for text editors." -optional = false -python-versions = ">=3.6" -files = [ - {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"}, - {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"}, -] - -[package.dependencies] -parso = ">=0.8.3,<0.9.0" - -[package.extras] -docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] -qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] -testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] - -[[package]] -name = "jinja2" -version = "3.1.4" -description = "A very fast and expressive template engine." -optional = false -python-versions = ">=3.7" -files = [ - {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, - {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, -] - -[package.dependencies] -MarkupSafe = ">=2.0" - -[package.extras] -i18n = ["Babel (>=2.7)"] - -[[package]] -name = "jiter" -version = "0.5.0" -description = "Fast iterable JSON parser." -optional = false -python-versions = ">=3.8" -files = [ - {file = "jiter-0.5.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b599f4e89b3def9a94091e6ee52e1d7ad7bc33e238ebb9c4c63f211d74822c3f"}, - {file = "jiter-0.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2a063f71c4b06225543dddadbe09d203dc0c95ba352d8b85f1221173480a71d5"}, - {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:acc0d5b8b3dd12e91dd184b87273f864b363dfabc90ef29a1092d269f18c7e28"}, - {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c22541f0b672f4d741382a97c65609332a783501551445ab2df137ada01e019e"}, - {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:63314832e302cc10d8dfbda0333a384bf4bcfce80d65fe99b0f3c0da8945a91a"}, - {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a25fbd8a5a58061e433d6fae6d5298777c0814a8bcefa1e5ecfff20c594bd749"}, - {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:503b2c27d87dfff5ab717a8200fbbcf4714516c9d85558048b1fc14d2de7d8dc"}, - {file = "jiter-0.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6d1f3d27cce923713933a844872d213d244e09b53ec99b7a7fdf73d543529d6d"}, - {file = "jiter-0.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c95980207b3998f2c3b3098f357994d3fd7661121f30669ca7cb945f09510a87"}, - {file = "jiter-0.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:afa66939d834b0ce063f57d9895e8036ffc41c4bd90e4a99631e5f261d9b518e"}, - {file = "jiter-0.5.0-cp310-none-win32.whl", hash = "sha256:f16ca8f10e62f25fd81d5310e852df6649af17824146ca74647a018424ddeccf"}, - {file = "jiter-0.5.0-cp310-none-win_amd64.whl", hash = "sha256:b2950e4798e82dd9176935ef6a55cf6a448b5c71515a556da3f6b811a7844f1e"}, - {file = "jiter-0.5.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d4c8e1ed0ef31ad29cae5ea16b9e41529eb50a7fba70600008e9f8de6376d553"}, - {file = "jiter-0.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c6f16e21276074a12d8421692515b3fd6d2ea9c94fd0734c39a12960a20e85f3"}, - {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5280e68e7740c8c128d3ae5ab63335ce6d1fb6603d3b809637b11713487af9e6"}, - {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:583c57fc30cc1fec360e66323aadd7fc3edeec01289bfafc35d3b9dcb29495e4"}, - {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26351cc14507bdf466b5f99aba3df3143a59da75799bf64a53a3ad3155ecded9"}, - {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829df14d656b3fb87e50ae8b48253a8851c707da9f30d45aacab2aa2ba2d614"}, - {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a42a4bdcf7307b86cb863b2fb9bb55029b422d8f86276a50487982d99eed7c6e"}, - {file = "jiter-0.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04d461ad0aebf696f8da13c99bc1b3e06f66ecf6cfd56254cc402f6385231c06"}, - {file = "jiter-0.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e6375923c5f19888c9226582a124b77b622f8fd0018b843c45eeb19d9701c403"}, - {file = "jiter-0.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2cec323a853c24fd0472517113768c92ae0be8f8c384ef4441d3632da8baa646"}, - {file = "jiter-0.5.0-cp311-none-win32.whl", hash = "sha256:aa1db0967130b5cab63dfe4d6ff547c88b2a394c3410db64744d491df7f069bb"}, - {file = "jiter-0.5.0-cp311-none-win_amd64.whl", hash = "sha256:aa9d2b85b2ed7dc7697597dcfaac66e63c1b3028652f751c81c65a9f220899ae"}, - {file = "jiter-0.5.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9f664e7351604f91dcdd557603c57fc0d551bc65cc0a732fdacbf73ad335049a"}, - {file = "jiter-0.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:044f2f1148b5248ad2c8c3afb43430dccf676c5a5834d2f5089a4e6c5bbd64df"}, - {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:702e3520384c88b6e270c55c772d4bd6d7b150608dcc94dea87ceba1b6391248"}, - {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:528d742dcde73fad9d63e8242c036ab4a84389a56e04efd854062b660f559544"}, - {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8cf80e5fe6ab582c82f0c3331df27a7e1565e2dcf06265afd5173d809cdbf9ba"}, - {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:44dfc9ddfb9b51a5626568ef4e55ada462b7328996294fe4d36de02fce42721f"}, - {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c451f7922992751a936b96c5f5b9bb9312243d9b754c34b33d0cb72c84669f4e"}, - {file = "jiter-0.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:308fce789a2f093dca1ff91ac391f11a9f99c35369117ad5a5c6c4903e1b3e3a"}, - {file = "jiter-0.5.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7f5ad4a7c6b0d90776fdefa294f662e8a86871e601309643de30bf94bb93a64e"}, - {file = "jiter-0.5.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ea189db75f8eca08807d02ae27929e890c7d47599ce3d0a6a5d41f2419ecf338"}, - {file = "jiter-0.5.0-cp312-none-win32.whl", hash = "sha256:e3bbe3910c724b877846186c25fe3c802e105a2c1fc2b57d6688b9f8772026e4"}, - {file = "jiter-0.5.0-cp312-none-win_amd64.whl", hash = "sha256:a586832f70c3f1481732919215f36d41c59ca080fa27a65cf23d9490e75b2ef5"}, - {file = "jiter-0.5.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f04bc2fc50dc77be9d10f73fcc4e39346402ffe21726ff41028f36e179b587e6"}, - {file = "jiter-0.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6f433a4169ad22fcb550b11179bb2b4fd405de9b982601914ef448390b2954f3"}, - {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad4a6398c85d3a20067e6c69890ca01f68659da94d74c800298581724e426c7e"}, - {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6baa88334e7af3f4d7a5c66c3a63808e5efbc3698a1c57626541ddd22f8e4fbf"}, - {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ece0a115c05efca597c6d938f88c9357c843f8c245dbbb53361a1c01afd7148"}, - {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:335942557162ad372cc367ffaf93217117401bf930483b4b3ebdb1223dbddfa7"}, - {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:649b0ee97a6e6da174bffcb3c8c051a5935d7d4f2f52ea1583b5b3e7822fbf14"}, - {file = "jiter-0.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f4be354c5de82157886ca7f5925dbda369b77344b4b4adf2723079715f823989"}, - {file = "jiter-0.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5206144578831a6de278a38896864ded4ed96af66e1e63ec5dd7f4a1fce38a3a"}, - {file = "jiter-0.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8120c60f8121ac3d6f072b97ef0e71770cc72b3c23084c72c4189428b1b1d3b6"}, - {file = "jiter-0.5.0-cp38-none-win32.whl", hash = "sha256:6f1223f88b6d76b519cb033a4d3687ca157c272ec5d6015c322fc5b3074d8a5e"}, - {file = "jiter-0.5.0-cp38-none-win_amd64.whl", hash = "sha256:c59614b225d9f434ea8fc0d0bec51ef5fa8c83679afedc0433905994fb36d631"}, - {file = "jiter-0.5.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:0af3838cfb7e6afee3f00dc66fa24695199e20ba87df26e942820345b0afc566"}, - {file = "jiter-0.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:550b11d669600dbc342364fd4adbe987f14d0bbedaf06feb1b983383dcc4b961"}, - {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:489875bf1a0ffb3cb38a727b01e6673f0f2e395b2aad3c9387f94187cb214bbf"}, - {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b250ca2594f5599ca82ba7e68785a669b352156260c5362ea1b4e04a0f3e2389"}, - {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ea18e01f785c6667ca15407cd6dabbe029d77474d53595a189bdc813347218e"}, - {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:462a52be85b53cd9bffd94e2d788a09984274fe6cebb893d6287e1c296d50653"}, - {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92cc68b48d50fa472c79c93965e19bd48f40f207cb557a8346daa020d6ba973b"}, - {file = "jiter-0.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1c834133e59a8521bc87ebcad773608c6fa6ab5c7a022df24a45030826cf10bc"}, - {file = "jiter-0.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab3a71ff31cf2d45cb216dc37af522d335211f3a972d2fe14ea99073de6cb104"}, - {file = "jiter-0.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cccd3af9c48ac500c95e1bcbc498020c87e1781ff0345dd371462d67b76643eb"}, - {file = "jiter-0.5.0-cp39-none-win32.whl", hash = "sha256:368084d8d5c4fc40ff7c3cc513c4f73e02c85f6009217922d0823a48ee7adf61"}, - {file = "jiter-0.5.0-cp39-none-win_amd64.whl", hash = "sha256:ce03f7b4129eb72f1687fa11300fbf677b02990618428934662406d2a76742a1"}, - {file = "jiter-0.5.0.tar.gz", hash = "sha256:1d916ba875bcab5c5f7d927df998c4cb694d27dceddf3392e58beaf10563368a"}, -] - -[[package]] -name = "json5" -version = "0.9.25" -description = "A Python implementation of the JSON5 data format." -optional = false -python-versions = ">=3.8" -files = [ - {file = "json5-0.9.25-py3-none-any.whl", hash = "sha256:34ed7d834b1341a86987ed52f3f76cd8ee184394906b6e22a1e0deb9ab294e8f"}, - {file = "json5-0.9.25.tar.gz", hash = "sha256:548e41b9be043f9426776f05df8635a00fe06104ea51ed24b67f908856e151ae"}, -] - -[[package]] -name = "jsonpatch" -version = "1.33" -description = "Apply JSON-Patches (RFC 6902)" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" -files = [ - {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"}, - {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"}, -] - -[package.dependencies] -jsonpointer = ">=1.9" - -[[package]] -name = "jsonpointer" -version = "3.0.0" -description = "Identify specific nodes in a JSON document (RFC 6901)" -optional = false -python-versions = ">=3.7" -files = [ - {file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"}, - {file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"}, -] - -[[package]] -name = "jsonschema" -version = "4.23.0" -description = "An implementation of JSON Schema validation for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, - {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, -] - -[package.dependencies] -attrs = ">=22.2.0" -fqdn = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} -idna = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} -isoduration = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} -jsonpointer = {version = ">1.13", optional = true, markers = "extra == \"format-nongpl\""} -jsonschema-specifications = ">=2023.03.6" -referencing = ">=0.28.4" -rfc3339-validator = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} -rfc3986-validator = {version = ">0.1.0", optional = true, markers = "extra == \"format-nongpl\""} -rpds-py = ">=0.7.1" -uri-template = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} -webcolors = {version = ">=24.6.0", optional = true, markers = "extra == \"format-nongpl\""} - -[package.extras] -format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] -format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"] - -[[package]] -name = "jsonschema-specifications" -version = "2023.12.1" -description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" -optional = false -python-versions = ">=3.8" -files = [ - {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, - {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, -] - -[package.dependencies] -referencing = ">=0.31.0" - -[[package]] -name = "jupyter" -version = "1.1.1" -description = "Jupyter metapackage. Install all the Jupyter components in one go." -optional = false -python-versions = "*" -files = [ - {file = "jupyter-1.1.1-py2.py3-none-any.whl", hash = "sha256:7a59533c22af65439b24bbe60373a4e95af8f16ac65a6c00820ad378e3f7cc83"}, - {file = "jupyter-1.1.1.tar.gz", hash = "sha256:d55467bceabdea49d7e3624af7e33d59c37fff53ed3a350e1ac957bed731de7a"}, -] - -[package.dependencies] -ipykernel = "*" -ipywidgets = "*" -jupyter-console = "*" -jupyterlab = "*" -nbconvert = "*" -notebook = "*" - -[[package]] -name = "jupyter-client" -version = "8.6.3" -description = "Jupyter protocol implementation and client libraries" -optional = false -python-versions = ">=3.8" -files = [ - {file = "jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f"}, - {file = "jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419"}, -] - -[package.dependencies] -importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""} -jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" -python-dateutil = ">=2.8.2" -pyzmq = ">=23.0" -tornado = ">=6.2" -traitlets = ">=5.3" - -[package.extras] -docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] -test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest (<8.2.0)", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"] - -[[package]] -name = "jupyter-console" -version = "6.6.3" -description = "Jupyter terminal console" -optional = false -python-versions = ">=3.7" -files = [ - {file = "jupyter_console-6.6.3-py3-none-any.whl", hash = "sha256:309d33409fcc92ffdad25f0bcdf9a4a9daa61b6f341177570fdac03de5352485"}, - {file = "jupyter_console-6.6.3.tar.gz", hash = "sha256:566a4bf31c87adbfadf22cdf846e3069b59a71ed5da71d6ba4d8aaad14a53539"}, -] - -[package.dependencies] -ipykernel = ">=6.14" -ipython = "*" -jupyter-client = ">=7.0.0" -jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" -prompt-toolkit = ">=3.0.30" -pygments = "*" -pyzmq = ">=17" -traitlets = ">=5.4" - -[package.extras] -test = ["flaky", "pexpect", "pytest"] - -[[package]] -name = "jupyter-core" -version = "5.7.2" -description = "Jupyter core package. A base package on which Jupyter projects rely." -optional = false -python-versions = ">=3.8" -files = [ - {file = "jupyter_core-5.7.2-py3-none-any.whl", hash = "sha256:4f7315d2f6b4bcf2e3e7cb6e46772eba760ae459cd1f59d29eb57b0a01bd7409"}, - {file = "jupyter_core-5.7.2.tar.gz", hash = "sha256:aa5f8d32bbf6b431ac830496da7392035d6f61b4f54872f15c4bd2a9c3f536d9"}, -] - -[package.dependencies] -platformdirs = ">=2.5" -pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} -traitlets = ">=5.3" - -[package.extras] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] -test = ["ipykernel", "pre-commit", "pytest (<8)", "pytest-cov", "pytest-timeout"] - -[[package]] -name = "jupyter-events" -version = "0.10.0" -description = "Jupyter Event System library" -optional = false -python-versions = ">=3.8" -files = [ - {file = "jupyter_events-0.10.0-py3-none-any.whl", hash = "sha256:4b72130875e59d57716d327ea70d3ebc3af1944d3717e5a498b8a06c6c159960"}, - {file = "jupyter_events-0.10.0.tar.gz", hash = "sha256:670b8229d3cc882ec782144ed22e0d29e1c2d639263f92ca8383e66682845e22"}, -] - -[package.dependencies] -jsonschema = {version = ">=4.18.0", extras = ["format-nongpl"]} -python-json-logger = ">=2.0.4" -pyyaml = ">=5.3" -referencing = "*" -rfc3339-validator = "*" -rfc3986-validator = ">=0.1.1" -traitlets = ">=5.3" - -[package.extras] -cli = ["click", "rich"] -docs = ["jupyterlite-sphinx", "myst-parser", "pydata-sphinx-theme", "sphinxcontrib-spelling"] -test = ["click", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.19.0)", "pytest-console-scripts", "rich"] - -[[package]] -name = "jupyter-lsp" -version = "2.2.5" -description = "Multi-Language Server WebSocket proxy for Jupyter Notebook/Lab server" -optional = false -python-versions = ">=3.8" -files = [ - {file = "jupyter-lsp-2.2.5.tar.gz", hash = "sha256:793147a05ad446f809fd53ef1cd19a9f5256fd0a2d6b7ce943a982cb4f545001"}, - {file = "jupyter_lsp-2.2.5-py3-none-any.whl", hash = "sha256:45fbddbd505f3fbfb0b6cb2f1bc5e15e83ab7c79cd6e89416b248cb3c00c11da"}, -] - -[package.dependencies] -importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""} -jupyter-server = ">=1.1.2" - -[[package]] -name = "jupyter-server" -version = "2.14.2" -description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." -optional = false -python-versions = ">=3.8" -files = [ - {file = "jupyter_server-2.14.2-py3-none-any.whl", hash = "sha256:47ff506127c2f7851a17bf4713434208fc490955d0e8632e95014a9a9afbeefd"}, - {file = "jupyter_server-2.14.2.tar.gz", hash = "sha256:66095021aa9638ced276c248b1d81862e4c50f292d575920bbe960de1c56b12b"}, -] - -[package.dependencies] -anyio = ">=3.1.0" -argon2-cffi = ">=21.1" -jinja2 = ">=3.0.3" -jupyter-client = ">=7.4.4" -jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" -jupyter-events = ">=0.9.0" -jupyter-server-terminals = ">=0.4.4" -nbconvert = ">=6.4.4" -nbformat = ">=5.3.0" -overrides = ">=5.0" -packaging = ">=22.0" -prometheus-client = ">=0.9" -pywinpty = {version = ">=2.0.1", markers = "os_name == \"nt\""} -pyzmq = ">=24" -send2trash = ">=1.8.2" -terminado = ">=0.8.3" -tornado = ">=6.2.0" -traitlets = ">=5.6.0" -websocket-client = ">=1.7" - -[package.extras] -docs = ["ipykernel", "jinja2", "jupyter-client", "myst-parser", "nbformat", "prometheus-client", "pydata-sphinx-theme", "send2trash", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-openapi (>=0.8.0)", "sphinxcontrib-spelling", "sphinxemoji", "tornado", "typing-extensions"] -test = ["flaky", "ipykernel", "pre-commit", "pytest (>=7.0,<9)", "pytest-console-scripts", "pytest-jupyter[server] (>=0.7)", "pytest-timeout", "requests"] - -[[package]] -name = "jupyter-server-terminals" -version = "0.5.3" -description = "A Jupyter Server Extension Providing Terminals." -optional = false -python-versions = ">=3.8" -files = [ - {file = "jupyter_server_terminals-0.5.3-py3-none-any.whl", hash = "sha256:41ee0d7dc0ebf2809c668e0fc726dfaf258fcd3e769568996ca731b6194ae9aa"}, - {file = "jupyter_server_terminals-0.5.3.tar.gz", hash = "sha256:5ae0295167220e9ace0edcfdb212afd2b01ee8d179fe6f23c899590e9b8a5269"}, -] - -[package.dependencies] -pywinpty = {version = ">=2.0.3", markers = "os_name == \"nt\""} -terminado = ">=0.8.3" - -[package.extras] -docs = ["jinja2", "jupyter-server", "mistune (<4.0)", "myst-parser", "nbformat", "packaging", "pydata-sphinx-theme", "sphinxcontrib-github-alt", "sphinxcontrib-openapi", "sphinxcontrib-spelling", "sphinxemoji", "tornado"] -test = ["jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-jupyter[server] (>=0.5.3)", "pytest-timeout"] - -[[package]] -name = "jupyterlab" -version = "4.2.5" -description = "JupyterLab computational environment" -optional = false -python-versions = ">=3.8" -files = [ - {file = "jupyterlab-4.2.5-py3-none-any.whl", hash = "sha256:73b6e0775d41a9fee7ee756c80f58a6bed4040869ccc21411dc559818874d321"}, - {file = "jupyterlab-4.2.5.tar.gz", hash = "sha256:ae7f3a1b8cb88b4f55009ce79fa7c06f99d70cd63601ee4aa91815d054f46f75"}, -] - -[package.dependencies] -async-lru = ">=1.0.0" -httpx = ">=0.25.0" -importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""} -ipykernel = ">=6.5.0" -jinja2 = ">=3.0.3" -jupyter-core = "*" -jupyter-lsp = ">=2.0.0" -jupyter-server = ">=2.4.0,<3" -jupyterlab-server = ">=2.27.1,<3" -notebook-shim = ">=0.2" -packaging = "*" -setuptools = ">=40.1.0" -tomli = {version = ">=1.2.2", markers = "python_version < \"3.11\""} -tornado = ">=6.2.0" -traitlets = "*" - -[package.extras] -dev = ["build", "bump2version", "coverage", "hatch", "pre-commit", "pytest-cov", "ruff (==0.3.5)"] -docs = ["jsx-lexer", "myst-parser", "pydata-sphinx-theme (>=0.13.0)", "pytest", "pytest-check-links", "pytest-jupyter", "sphinx (>=1.8,<7.3.0)", "sphinx-copybutton"] -docs-screenshots = ["altair (==5.3.0)", "ipython (==8.16.1)", "ipywidgets (==8.1.2)", "jupyterlab-geojson (==3.4.0)", "jupyterlab-language-pack-zh-cn (==4.1.post2)", "matplotlib (==3.8.3)", "nbconvert (>=7.0.0)", "pandas (==2.2.1)", "scipy (==1.12.0)", "vega-datasets (==0.9.0)"] -test = ["coverage", "pytest (>=7.0)", "pytest-check-links (>=0.7)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter (>=0.5.3)", "pytest-timeout", "pytest-tornasync", "requests", "requests-cache", "virtualenv"] -upgrade-extension = ["copier (>=9,<10)", "jinja2-time (<0.3)", "pydantic (<3.0)", "pyyaml-include (<3.0)", "tomli-w (<2.0)"] - -[[package]] -name = "jupyterlab-pygments" -version = "0.3.0" -description = "Pygments theme using JupyterLab CSS variables" -optional = false -python-versions = ">=3.8" -files = [ - {file = "jupyterlab_pygments-0.3.0-py3-none-any.whl", hash = "sha256:841a89020971da1d8693f1a99997aefc5dc424bb1b251fd6322462a1b8842780"}, - {file = "jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d"}, -] - -[[package]] -name = "jupyterlab-server" -version = "2.27.3" -description = "A set of server components for JupyterLab and JupyterLab like applications." -optional = false -python-versions = ">=3.8" -files = [ - {file = "jupyterlab_server-2.27.3-py3-none-any.whl", hash = "sha256:e697488f66c3db49df675158a77b3b017520d772c6e1548c7d9bcc5df7944ee4"}, - {file = "jupyterlab_server-2.27.3.tar.gz", hash = "sha256:eb36caca59e74471988f0ae25c77945610b887f777255aa21f8065def9e51ed4"}, -] - -[package.dependencies] -babel = ">=2.10" -importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""} -jinja2 = ">=3.0.3" -json5 = ">=0.9.0" -jsonschema = ">=4.18.0" -jupyter-server = ">=1.21,<3" -packaging = ">=21.3" -requests = ">=2.31" - -[package.extras] -docs = ["autodoc-traits", "jinja2 (<3.2.0)", "mistune (<4)", "myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-copybutton", "sphinxcontrib-openapi (>0.8)"] -openapi = ["openapi-core (>=0.18.0,<0.19.0)", "ruamel-yaml"] -test = ["hatch", "ipykernel", "openapi-core (>=0.18.0,<0.19.0)", "openapi-spec-validator (>=0.6.0,<0.8.0)", "pytest (>=7.0,<8)", "pytest-console-scripts", "pytest-cov", "pytest-jupyter[server] (>=0.6.2)", "pytest-timeout", "requests-mock", "ruamel-yaml", "sphinxcontrib-spelling", "strict-rfc3339", "werkzeug"] - -[[package]] -name = "jupyterlab-widgets" -version = "3.0.13" -description = "Jupyter interactive widgets for JupyterLab" -optional = false -python-versions = ">=3.7" -files = [ - {file = "jupyterlab_widgets-3.0.13-py3-none-any.whl", hash = "sha256:e3cda2c233ce144192f1e29914ad522b2f4c40e77214b0cc97377ca3d323db54"}, - {file = "jupyterlab_widgets-3.0.13.tar.gz", hash = "sha256:a2966d385328c1942b683a8cd96b89b8dd82c8b8f81dda902bb2bc06d46f5bed"}, -] - -[[package]] -name = "langchain" -version = "0.3.0" -description = "Building applications with LLMs through composability" -optional = false -python-versions = ">=3.9,<4.0" -files = [] -develop = true - -[package.dependencies] -aiohttp = "^3.8.3" -async-timeout = {version = "^4.0.0", markers = "python_version < \"3.11\""} -langchain-core = "^0.3.0" -langchain-text-splitters = "^0.3.0" -langsmith = "^0.1.17" -numpy = [ - {version = ">=1,<2", markers = "python_version < \"3.12\""}, - {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, -] -pydantic = "^2.7.4" -PyYAML = ">=5.3" -requests = "^2" -SQLAlchemy = ">=1.4,<3" -tenacity = "^8.1.0,!=8.4.0" - -[package.source] -type = "directory" -url = "../langchain" - -[[package]] -name = "langchain-community" -version = "0.3.0" -description = "Community contributed LangChain integrations." -optional = false -python-versions = ">=3.9,<4.0" -files = [] -develop = true - -[package.dependencies] -aiohttp = "^3.8.3" -dataclasses-json = ">= 0.5.7, < 0.7" -langchain = "^0.3.0" -langchain-core = "^0.3.0" -langsmith = "^0.1.125" -numpy = [ - {version = ">=1,<2", markers = "python_version < \"3.12\""}, - {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, -] -pydantic-settings = "^2.4.0" -PyYAML = ">=5.3" -requests = "^2" -SQLAlchemy = ">=1.4,<3" -tenacity = "^8.1.0,!=8.4.0" - -[package.source] -type = "directory" -url = "../community" - -[[package]] -name = "langchain-core" -version = "0.3.5" -description = "Building applications with LLMs through composability" -optional = false -python-versions = ">=3.9,<4.0" -files = [] -develop = true - -[package.dependencies] -jsonpatch = "^1.33" -langsmith = "^0.1.125" -packaging = ">=23.2,<25" -pydantic = [ - {version = ">=2.5.2,<3.0.0", markers = "python_full_version < \"3.12.4\""}, - {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, -] -PyYAML = ">=5.3" -tenacity = "^8.1.0,!=8.4.0" -typing-extensions = ">=4.7" - -[package.source] -type = "directory" -url = "../core" - -[[package]] -name = "langchain-openai" -version = "0.2.0" -description = "An integration package connecting OpenAI and LangChain" -optional = false -python-versions = ">=3.9,<4.0" -files = [] -develop = true - -[package.dependencies] -langchain-core = "^0.3" -openai = "^1.40.0" -tiktoken = ">=0.7,<1" - -[package.source] -type = "directory" -url = "../partners/openai" - -[[package]] -name = "langchain-text-splitters" -version = "0.3.0" -description = "LangChain text splitting utilities" -optional = false -python-versions = ">=3.9,<4.0" -files = [] -develop = true - -[package.dependencies] -langchain-core = "^0.3.0" - -[package.source] -type = "directory" -url = "../text-splitters" - -[[package]] -name = "langsmith" -version = "0.1.127" -description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." -optional = false -python-versions = "<4.0,>=3.8.1" -files = [ - {file = "langsmith-0.1.127-py3-none-any.whl", hash = "sha256:b4e8058d16ee0c814b16fae135c1e8817d1f43a38462d7f55e0eb1f87b9526aa"}, - {file = "langsmith-0.1.127.tar.gz", hash = "sha256:19c6f95d5558180c600455781e6faacc7798d0e1c54d6eb50ffb744d56f02bc9"}, -] - -[package.dependencies] -httpx = ">=0.23.0,<1" -orjson = ">=3.9.14,<4.0.0" -pydantic = [ - {version = ">=1,<3", markers = "python_full_version < \"3.12.4\""}, - {version = ">=2.7.4,<3.0.0", markers = "python_full_version >= \"3.12.4\""}, -] -requests = ">=2,<3" - -[[package]] -name = "markupsafe" -version = "2.1.5" -description = "Safely add untrusted strings to HTML/XML markup." -optional = false -python-versions = ">=3.7" -files = [ - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, - {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, -] - -[[package]] -name = "marshmallow" -version = "3.22.0" -description = "A lightweight library for converting complex datatypes to and from native Python datatypes." -optional = false -python-versions = ">=3.8" -files = [ - {file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"}, - {file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"}, -] - -[package.dependencies] -packaging = ">=17.0" - -[package.extras] -dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] -docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.0.2)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] -tests = ["pytest", "pytz", "simplejson"] - -[[package]] -name = "matplotlib-inline" -version = "0.1.7" -description = "Inline Matplotlib backend for Jupyter" -optional = false -python-versions = ">=3.8" -files = [ - {file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"}, - {file = "matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90"}, -] - -[package.dependencies] -traitlets = "*" - -[[package]] -name = "mistune" -version = "3.0.2" -description = "A sane and fast Markdown parser with useful plugins and renderers" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mistune-3.0.2-py3-none-any.whl", hash = "sha256:71481854c30fdbc938963d3605b72501f5c10a9320ecd412c121c163a1c7d205"}, - {file = "mistune-3.0.2.tar.gz", hash = "sha256:fc7f93ded930c92394ef2cb6f04a8aabab4117a91449e72dcc8dfa646a508be8"}, -] - -[[package]] -name = "multidict" -version = "6.1.0" -description = "multidict implementation" -optional = false -python-versions = ">=3.8" -files = [ - {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, - {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, - {file = "multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53"}, - {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5"}, - {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581"}, - {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56"}, - {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429"}, - {file = "multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748"}, - {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db"}, - {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056"}, - {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76"}, - {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160"}, - {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7"}, - {file = "multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0"}, - {file = "multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d"}, - {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6"}, - {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156"}, - {file = "multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb"}, - {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e2b90b43e696f25c62656389d32236e049568b39320e2735d51f08fd362761b"}, - {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d83a047959d38a7ff552ff94be767b7fd79b831ad1cd9920662db05fec24fe72"}, - {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d1a9dd711d0877a1ece3d2e4fea11a8e75741ca21954c919406b44e7cf971304"}, - {file = "multidict-6.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec2abea24d98246b94913b76a125e855eb5c434f7c46546046372fe60f666351"}, - {file = "multidict-6.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4867cafcbc6585e4b678876c489b9273b13e9fff9f6d6d66add5e15d11d926cb"}, - {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b48204e8d955c47c55b72779802b219a39acc3ee3d0116d5080c388970b76e3"}, - {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:d8fff389528cad1618fb4b26b95550327495462cd745d879a8c7c2115248e399"}, - {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a7a9541cd308eed5e30318430a9c74d2132e9a8cb46b901326272d780bf2d423"}, - {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:da1758c76f50c39a2efd5e9859ce7d776317eb1dd34317c8152ac9251fc574a3"}, - {file = "multidict-6.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c943a53e9186688b45b323602298ab727d8865d8c9ee0b17f8d62d14b56f0753"}, - {file = "multidict-6.1.0-cp311-cp311-win32.whl", hash = "sha256:90f8717cb649eea3504091e640a1b8568faad18bd4b9fcd692853a04475a4b80"}, - {file = "multidict-6.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:82176036e65644a6cc5bd619f65f6f19781e8ec2e5330f51aa9ada7504cc1926"}, - {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa"}, - {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436"}, - {file = "multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761"}, - {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e"}, - {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef"}, - {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95"}, - {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925"}, - {file = "multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966"}, - {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305"}, - {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2"}, - {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2"}, - {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6"}, - {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3"}, - {file = "multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133"}, - {file = "multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1"}, - {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d569388c381b24671589335a3be6e1d45546c2988c2ebe30fdcada8457a31008"}, - {file = "multidict-6.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:052e10d2d37810b99cc170b785945421141bf7bb7d2f8799d431e7db229c385f"}, - {file = "multidict-6.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f90c822a402cb865e396a504f9fc8173ef34212a342d92e362ca498cad308e28"}, - {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b225d95519a5bf73860323e633a664b0d85ad3d5bede6d30d95b35d4dfe8805b"}, - {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23bfd518810af7de1116313ebd9092cb9aa629beb12f6ed631ad53356ed6b86c"}, - {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c09fcfdccdd0b57867577b719c69e347a436b86cd83747f179dbf0cc0d4c1f3"}, - {file = "multidict-6.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf6bea52ec97e95560af5ae576bdac3aa3aae0b6758c6efa115236d9e07dae44"}, - {file = "multidict-6.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57feec87371dbb3520da6192213c7d6fc892d5589a93db548331954de8248fd2"}, - {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0c3f390dc53279cbc8ba976e5f8035eab997829066756d811616b652b00a23a3"}, - {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:59bfeae4b25ec05b34f1956eaa1cb38032282cd4dfabc5056d0a1ec4d696d3aa"}, - {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b2f59caeaf7632cc633b5cf6fc449372b83bbdf0da4ae04d5be36118e46cc0aa"}, - {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:37bb93b2178e02b7b618893990941900fd25b6b9ac0fa49931a40aecdf083fe4"}, - {file = "multidict-6.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4e9f48f58c2c523d5a06faea47866cd35b32655c46b443f163d08c6d0ddb17d6"}, - {file = "multidict-6.1.0-cp313-cp313-win32.whl", hash = "sha256:3a37ffb35399029b45c6cc33640a92bef403c9fd388acce75cdc88f58bd19a81"}, - {file = "multidict-6.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9aa71e15d9d9beaad2c6b9319edcdc0a49a43ef5c0a4c8265ca9ee7d6c67774"}, - {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:db7457bac39421addd0c8449933ac32d8042aae84a14911a757ae6ca3eef1392"}, - {file = "multidict-6.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d094ddec350a2fb899fec68d8353c78233debde9b7d8b4beeafa70825f1c281a"}, - {file = "multidict-6.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5845c1fd4866bb5dd3125d89b90e57ed3138241540897de748cdf19de8a2fca2"}, - {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9079dfc6a70abe341f521f78405b8949f96db48da98aeb43f9907f342f627cdc"}, - {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3914f5aaa0f36d5d60e8ece6a308ee1c9784cd75ec8151062614657a114c4478"}, - {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c08be4f460903e5a9d0f76818db3250f12e9c344e79314d1d570fc69d7f4eae4"}, - {file = "multidict-6.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d093be959277cb7dee84b801eb1af388b6ad3ca6a6b6bf1ed7585895789d027d"}, - {file = "multidict-6.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3702ea6872c5a2a4eeefa6ffd36b042e9773f05b1f37ae3ef7264b1163c2dcf6"}, - {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2090f6a85cafc5b2db085124d752757c9d251548cedabe9bd31afe6363e0aff2"}, - {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f67f217af4b1ff66c68a87318012de788dd95fcfeb24cc889011f4e1c7454dfd"}, - {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:189f652a87e876098bbc67b4da1049afb5f5dfbaa310dd67c594b01c10388db6"}, - {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:6bb5992037f7a9eff7991ebe4273ea7f51f1c1c511e6a2ce511d0e7bdb754492"}, - {file = "multidict-6.1.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f4c2b9e770c4e393876e35a7046879d195cd123b4f116d299d442b335bcd"}, - {file = "multidict-6.1.0-cp38-cp38-win32.whl", hash = "sha256:e27bbb6d14416713a8bd7aaa1313c0fc8d44ee48d74497a0ff4c3a1b6ccb5167"}, - {file = "multidict-6.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:22f3105d4fb15c8f57ff3959a58fcab6ce36814486500cd7485651230ad4d4ef"}, - {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4e18b656c5e844539d506a0a06432274d7bd52a7487e6828c63a63d69185626c"}, - {file = "multidict-6.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a185f876e69897a6f3325c3f19f26a297fa058c5e456bfcff8015e9a27e83ae1"}, - {file = "multidict-6.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab7c4ceb38d91570a650dba194e1ca87c2b543488fe9309b4212694174fd539c"}, - {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e617fb6b0b6953fffd762669610c1c4ffd05632c138d61ac7e14ad187870669c"}, - {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16e5f4bf4e603eb1fdd5d8180f1a25f30056f22e55ce51fb3d6ad4ab29f7d96f"}, - {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c035da3f544b1882bac24115f3e2e8760f10a0107614fc9839fd232200b875"}, - {file = "multidict-6.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:957cf8e4b6e123a9eea554fa7ebc85674674b713551de587eb318a2df3e00255"}, - {file = "multidict-6.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:483a6aea59cb89904e1ceabd2b47368b5600fb7de78a6e4a2c2987b2d256cf30"}, - {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:87701f25a2352e5bf7454caa64757642734da9f6b11384c1f9d1a8e699758057"}, - {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:682b987361e5fd7a139ed565e30d81fd81e9629acc7d925a205366877d8c8657"}, - {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce2186a7df133a9c895dea3331ddc5ddad42cdd0d1ea2f0a51e5d161e4762f28"}, - {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9f636b730f7e8cb19feb87094949ba54ee5357440b9658b2a32a5ce4bce53972"}, - {file = "multidict-6.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:73eae06aa53af2ea5270cc066dcaf02cc60d2994bbb2c4ef5764949257d10f43"}, - {file = "multidict-6.1.0-cp39-cp39-win32.whl", hash = "sha256:1ca0083e80e791cffc6efce7660ad24af66c8d4079d2a750b29001b53ff59ada"}, - {file = "multidict-6.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:aa466da5b15ccea564bdab9c89175c762bc12825f4659c11227f515cee76fa4a"}, - {file = "multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506"}, - {file = "multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a"}, -] - -[package.dependencies] -typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.11\""} - -[[package]] -name = "mypy" -version = "1.11.2" -description = "Optional static typing for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "mypy-1.11.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a"}, - {file = "mypy-1.11.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef"}, - {file = "mypy-1.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383"}, - {file = "mypy-1.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8"}, - {file = "mypy-1.11.2-cp310-cp310-win_amd64.whl", hash = "sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7"}, - {file = "mypy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385"}, - {file = "mypy-1.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca"}, - {file = "mypy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104"}, - {file = "mypy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4"}, - {file = "mypy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6"}, - {file = "mypy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318"}, - {file = "mypy-1.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36"}, - {file = "mypy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987"}, - {file = "mypy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca"}, - {file = "mypy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70"}, - {file = "mypy-1.11.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b"}, - {file = "mypy-1.11.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86"}, - {file = "mypy-1.11.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce"}, - {file = "mypy-1.11.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1"}, - {file = "mypy-1.11.2-cp38-cp38-win_amd64.whl", hash = "sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b"}, - {file = "mypy-1.11.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6"}, - {file = "mypy-1.11.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70"}, - {file = "mypy-1.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d"}, - {file = "mypy-1.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d"}, - {file = "mypy-1.11.2-cp39-cp39-win_amd64.whl", hash = "sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24"}, - {file = "mypy-1.11.2-py3-none-any.whl", hash = "sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12"}, - {file = "mypy-1.11.2.tar.gz", hash = "sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79"}, -] - -[package.dependencies] -mypy-extensions = ">=1.0.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.6.0" - -[package.extras] -dmypy = ["psutil (>=4.0)"] -install-types = ["pip"] -mypyc = ["setuptools (>=50)"] -reports = ["lxml"] - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -description = "Type system extensions for programs checked with the mypy type checker." -optional = false -python-versions = ">=3.5" -files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] - -[[package]] -name = "nbclient" -version = "0.10.0" -description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "nbclient-0.10.0-py3-none-any.whl", hash = "sha256:f13e3529332a1f1f81d82a53210322476a168bb7090a0289c795fe9cc11c9d3f"}, - {file = "nbclient-0.10.0.tar.gz", hash = "sha256:4b3f1b7dba531e498449c4db4f53da339c91d449dc11e9af3a43b4eb5c5abb09"}, -] - -[package.dependencies] -jupyter-client = ">=6.1.12" -jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" -nbformat = ">=5.1" -traitlets = ">=5.4" - -[package.extras] -dev = ["pre-commit"] -docs = ["autodoc-traits", "mock", "moto", "myst-parser", "nbclient[test]", "sphinx (>=1.7)", "sphinx-book-theme", "sphinxcontrib-spelling"] -test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>=7.0.0)", "pytest (>=7.0,<8)", "pytest-asyncio", "pytest-cov (>=4.0)", "testpath", "xmltodict"] - -[[package]] -name = "nbconvert" -version = "7.16.4" -description = "Converting Jupyter Notebooks (.ipynb files) to other formats. Output formats include asciidoc, html, latex, markdown, pdf, py, rst, script. nbconvert can be used both as a Python library (`import nbconvert`) or as a command line tool (invoked as `jupyter nbconvert ...`)." -optional = false -python-versions = ">=3.8" -files = [ - {file = "nbconvert-7.16.4-py3-none-any.whl", hash = "sha256:05873c620fe520b6322bf8a5ad562692343fe3452abda5765c7a34b7d1aa3eb3"}, - {file = "nbconvert-7.16.4.tar.gz", hash = "sha256:86ca91ba266b0a448dc96fa6c5b9d98affabde2867b363258703536807f9f7f4"}, -] - -[package.dependencies] -beautifulsoup4 = "*" -bleach = "!=5.0.0" -defusedxml = "*" -importlib-metadata = {version = ">=3.6", markers = "python_version < \"3.10\""} -jinja2 = ">=3.0" -jupyter-core = ">=4.7" -jupyterlab-pygments = "*" -markupsafe = ">=2.0" -mistune = ">=2.0.3,<4" -nbclient = ">=0.5.0" -nbformat = ">=5.7" -packaging = "*" -pandocfilters = ">=1.4.1" -pygments = ">=2.4.1" -tinycss2 = "*" -traitlets = ">=5.1" - -[package.extras] -all = ["flaky", "ipykernel", "ipython", "ipywidgets (>=7.5)", "myst-parser", "nbsphinx (>=0.2.12)", "playwright", "pydata-sphinx-theme", "pyqtwebengine (>=5.15)", "pytest (>=7)", "sphinx (==5.0.2)", "sphinxcontrib-spelling", "tornado (>=6.1)"] -docs = ["ipykernel", "ipython", "myst-parser", "nbsphinx (>=0.2.12)", "pydata-sphinx-theme", "sphinx (==5.0.2)", "sphinxcontrib-spelling"] -qtpdf = ["pyqtwebengine (>=5.15)"] -qtpng = ["pyqtwebengine (>=5.15)"] -serve = ["tornado (>=6.1)"] -test = ["flaky", "ipykernel", "ipywidgets (>=7.5)", "pytest (>=7)"] -webpdf = ["playwright"] - -[[package]] -name = "nbformat" -version = "5.10.4" -description = "The Jupyter Notebook format" -optional = false -python-versions = ">=3.8" -files = [ - {file = "nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b"}, - {file = "nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a"}, -] - -[package.dependencies] -fastjsonschema = ">=2.15" -jsonschema = ">=2.6" -jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" -traitlets = ">=5.1" - -[package.extras] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] -test = ["pep440", "pre-commit", "pytest", "testpath"] - -[[package]] -name = "nest-asyncio" -version = "1.6.0" -description = "Patch asyncio to allow nested event loops" -optional = false -python-versions = ">=3.5" -files = [ - {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, - {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, -] - -[[package]] -name = "notebook" -version = "7.2.2" -description = "Jupyter Notebook - A web-based notebook environment for interactive computing" -optional = false -python-versions = ">=3.8" -files = [ - {file = "notebook-7.2.2-py3-none-any.whl", hash = "sha256:c89264081f671bc02eec0ed470a627ed791b9156cad9285226b31611d3e9fe1c"}, - {file = "notebook-7.2.2.tar.gz", hash = "sha256:2ef07d4220421623ad3fe88118d687bc0450055570cdd160814a59cf3a1c516e"}, -] - -[package.dependencies] -jupyter-server = ">=2.4.0,<3" -jupyterlab = ">=4.2.0,<4.3" -jupyterlab-server = ">=2.27.1,<3" -notebook-shim = ">=0.2,<0.3" -tornado = ">=6.2.0" - -[package.extras] -dev = ["hatch", "pre-commit"] -docs = ["myst-parser", "nbsphinx", "pydata-sphinx-theme", "sphinx (>=1.3.6)", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] -test = ["importlib-resources (>=5.0)", "ipykernel", "jupyter-server[test] (>=2.4.0,<3)", "jupyterlab-server[test] (>=2.27.1,<3)", "nbval", "pytest (>=7.0)", "pytest-console-scripts", "pytest-timeout", "pytest-tornasync", "requests"] - -[[package]] -name = "notebook-shim" -version = "0.2.4" -description = "A shim layer for notebook traits and config" -optional = false -python-versions = ">=3.7" -files = [ - {file = "notebook_shim-0.2.4-py3-none-any.whl", hash = "sha256:411a5be4e9dc882a074ccbcae671eda64cceb068767e9a3419096986560e1cef"}, - {file = "notebook_shim-0.2.4.tar.gz", hash = "sha256:b4b2cfa1b65d98307ca24361f5b30fe785b53c3fd07b7a47e89acb5e6ac638cb"}, -] - -[package.dependencies] -jupyter-server = ">=1.8,<3" - -[package.extras] -test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync"] - -[[package]] -name = "numpy" -version = "1.26.4" -description = "Fundamental package for array computing in Python" -optional = false -python-versions = ">=3.9" -files = [ - {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, - {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, - {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, - {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, - {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, - {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, - {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, - {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, - {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, - {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, - {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, -] - -[[package]] -name = "openai" -version = "1.47.1" -description = "The official Python library for the openai API" -optional = false -python-versions = ">=3.7.1" -files = [ - {file = "openai-1.47.1-py3-none-any.whl", hash = "sha256:34277583bf268bb2494bc03f48ac123788c5e2a914db1d5a23d5edc29d35c825"}, - {file = "openai-1.47.1.tar.gz", hash = "sha256:62c8f5f478f82ffafc93b33040f8bb16a45948306198bd0cba2da2ecd9cf7323"}, -] - -[package.dependencies] -anyio = ">=3.5.0,<5" -distro = ">=1.7.0,<2" -httpx = ">=0.23.0,<1" -jiter = ">=0.4.0,<1" -pydantic = ">=1.9.0,<3" -sniffio = "*" -tqdm = ">4" -typing-extensions = ">=4.11,<5" - -[package.extras] -datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] - -[[package]] -name = "orjson" -version = "3.10.7" -description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" -optional = false -python-versions = ">=3.8" -files = [ - {file = "orjson-3.10.7-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:74f4544f5a6405b90da8ea724d15ac9c36da4d72a738c64685003337401f5c12"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34a566f22c28222b08875b18b0dfbf8a947e69df21a9ed5c51a6bf91cfb944ac"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bf6ba8ebc8ef5792e2337fb0419f8009729335bb400ece005606336b7fd7bab7"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac7cf6222b29fbda9e3a472b41e6a5538b48f2c8f99261eecd60aafbdb60690c"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de817e2f5fc75a9e7dd350c4b0f54617b280e26d1631811a43e7e968fa71e3e9"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:348bdd16b32556cf8d7257b17cf2bdb7ab7976af4af41ebe79f9796c218f7e91"}, - {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:479fd0844ddc3ca77e0fd99644c7fe2de8e8be1efcd57705b5c92e5186e8a250"}, - {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fdf5197a21dd660cf19dfd2a3ce79574588f8f5e2dbf21bda9ee2d2b46924d84"}, - {file = "orjson-3.10.7-cp310-none-win32.whl", hash = "sha256:d374d36726746c81a49f3ff8daa2898dccab6596864ebe43d50733275c629175"}, - {file = "orjson-3.10.7-cp310-none-win_amd64.whl", hash = "sha256:cb61938aec8b0ffb6eef484d480188a1777e67b05d58e41b435c74b9d84e0b9c"}, - {file = "orjson-3.10.7-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:7db8539039698ddfb9a524b4dd19508256107568cdad24f3682d5773e60504a2"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:480f455222cb7a1dea35c57a67578848537d2602b46c464472c995297117fa09"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8a9c9b168b3a19e37fe2778c0003359f07822c90fdff8f98d9d2a91b3144d8e0"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8de062de550f63185e4c1c54151bdddfc5625e37daf0aa1e75d2a1293e3b7d9a"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6b0dd04483499d1de9c8f6203f8975caf17a6000b9c0c54630cef02e44ee624e"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b58d3795dafa334fc8fd46f7c5dc013e6ad06fd5b9a4cc98cb1456e7d3558bd6"}, - {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:33cfb96c24034a878d83d1a9415799a73dc77480e6c40417e5dda0710d559ee6"}, - {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e724cebe1fadc2b23c6f7415bad5ee6239e00a69f30ee423f319c6af70e2a5c0"}, - {file = "orjson-3.10.7-cp311-none-win32.whl", hash = "sha256:82763b46053727a7168d29c772ed5c870fdae2f61aa8a25994c7984a19b1021f"}, - {file = "orjson-3.10.7-cp311-none-win_amd64.whl", hash = "sha256:eb8d384a24778abf29afb8e41d68fdd9a156cf6e5390c04cc07bbc24b89e98b5"}, - {file = "orjson-3.10.7-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44a96f2d4c3af51bfac6bc4ef7b182aa33f2f054fd7f34cc0ee9a320d051d41f"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ac14cd57df0572453543f8f2575e2d01ae9e790c21f57627803f5e79b0d3c3"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bdbb61dcc365dd9be94e8f7df91975edc9364d6a78c8f7adb69c1cdff318ec93"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b48b3db6bb6e0a08fa8c83b47bc169623f801e5cc4f24442ab2b6617da3b5313"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23820a1563a1d386414fef15c249040042b8e5d07b40ab3fe3efbfbbcbcb8864"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0c6a008e91d10a2564edbb6ee5069a9e66df3fbe11c9a005cb411f441fd2c09"}, - {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d352ee8ac1926d6193f602cbe36b1643bbd1bbcb25e3c1a657a4390f3000c9a5"}, - {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d2d9f990623f15c0ae7ac608103c33dfe1486d2ed974ac3f40b693bad1a22a7b"}, - {file = "orjson-3.10.7-cp312-none-win32.whl", hash = "sha256:7c4c17f8157bd520cdb7195f75ddbd31671997cbe10aee559c2d613592e7d7eb"}, - {file = "orjson-3.10.7-cp312-none-win_amd64.whl", hash = "sha256:1d9c0e733e02ada3ed6098a10a8ee0052dd55774de3d9110d29868d24b17faa1"}, - {file = "orjson-3.10.7-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:77d325ed866876c0fa6492598ec01fe30e803272a6e8b10e992288b009cbe149"}, - {file = "orjson-3.10.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ea2c232deedcb605e853ae1db2cc94f7390ac776743b699b50b071b02bea6fe"}, - {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3dcfbede6737fdbef3ce9c37af3fb6142e8e1ebc10336daa05872bfb1d87839c"}, - {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:11748c135f281203f4ee695b7f80bb1358a82a63905f9f0b794769483ea854ad"}, - {file = "orjson-3.10.7-cp313-none-win32.whl", hash = "sha256:a7e19150d215c7a13f39eb787d84db274298d3f83d85463e61d277bbd7f401d2"}, - {file = "orjson-3.10.7-cp313-none-win_amd64.whl", hash = "sha256:eef44224729e9525d5261cc8d28d6b11cafc90e6bd0be2157bde69a52ec83024"}, - {file = "orjson-3.10.7-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6ea2b2258eff652c82652d5e0f02bd5e0463a6a52abb78e49ac288827aaa1469"}, - {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:430ee4d85841e1483d487e7b81401785a5dfd69db5de01314538f31f8fbf7ee1"}, - {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4b6146e439af4c2472c56f8540d799a67a81226e11992008cb47e1267a9b3225"}, - {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:084e537806b458911137f76097e53ce7bf5806dda33ddf6aaa66a028f8d43a23"}, - {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829cf2195838e3f93b70fd3b4292156fc5e097aac3739859ac0dcc722b27ac0"}, - {file = "orjson-3.10.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1193b2416cbad1a769f868b1749535d5da47626ac29445803dae7cc64b3f5c98"}, - {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:4e6c3da13e5a57e4b3dca2de059f243ebec705857522f188f0180ae88badd354"}, - {file = "orjson-3.10.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c31008598424dfbe52ce8c5b47e0752dca918a4fdc4a2a32004efd9fab41d866"}, - {file = "orjson-3.10.7-cp38-none-win32.whl", hash = "sha256:7122a99831f9e7fe977dc45784d3b2edc821c172d545e6420c375e5a935f5a1c"}, - {file = "orjson-3.10.7-cp38-none-win_amd64.whl", hash = "sha256:a763bc0e58504cc803739e7df040685816145a6f3c8a589787084b54ebc9f16e"}, - {file = "orjson-3.10.7-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e76be12658a6fa376fcd331b1ea4e58f5a06fd0220653450f0d415b8fd0fbe20"}, - {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed350d6978d28b92939bfeb1a0570c523f6170efc3f0a0ef1f1df287cd4f4960"}, - {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:144888c76f8520e39bfa121b31fd637e18d4cc2f115727865fdf9fa325b10412"}, - {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09b2d92fd95ad2402188cf51573acde57eb269eddabaa60f69ea0d733e789fe9"}, - {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5b24a579123fa884f3a3caadaed7b75eb5715ee2b17ab5c66ac97d29b18fe57f"}, - {file = "orjson-3.10.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72591bcfe7512353bd609875ab38050efe3d55e18934e2f18950c108334b4ff"}, - {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f4db56635b58cd1a200b0a23744ff44206ee6aa428185e2b6c4a65b3197abdcd"}, - {file = "orjson-3.10.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0fa5886854673222618638c6df7718ea7fe2f3f2384c452c9ccedc70b4a510a5"}, - {file = "orjson-3.10.7-cp39-none-win32.whl", hash = "sha256:8272527d08450ab16eb405f47e0f4ef0e5ff5981c3d82afe0efd25dcbef2bcd2"}, - {file = "orjson-3.10.7-cp39-none-win_amd64.whl", hash = "sha256:974683d4618c0c7dbf4f69c95a979734bf183d0658611760017f6e70a145af58"}, - {file = "orjson-3.10.7.tar.gz", hash = "sha256:75ef0640403f945f3a1f9f6400686560dbfb0fb5b16589ad62cd477043c4eee3"}, -] - -[[package]] -name = "overrides" -version = "7.7.0" -description = "A decorator to automatically detect mismatch when overriding a method." -optional = false -python-versions = ">=3.6" -files = [ - {file = "overrides-7.7.0-py3-none-any.whl", hash = "sha256:c7ed9d062f78b8e4c1a7b70bd8796b35ead4d9f510227ef9c5dc7626c60d7e49"}, - {file = "overrides-7.7.0.tar.gz", hash = "sha256:55158fa3d93b98cc75299b1e67078ad9003ca27945c76162c1c0766d6f91820a"}, -] - -[[package]] -name = "packaging" -version = "24.1" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.8" -files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, -] - -[[package]] -name = "pandocfilters" -version = "1.5.1" -description = "Utilities for writing pandoc filters in python" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "pandocfilters-1.5.1-py2.py3-none-any.whl", hash = "sha256:93be382804a9cdb0a7267585f157e5d1731bbe5545a85b268d6f5fe6232de2bc"}, - {file = "pandocfilters-1.5.1.tar.gz", hash = "sha256:002b4a555ee4ebc03f8b66307e287fa492e4a77b4ea14d3f934328297bb4939e"}, -] - -[[package]] -name = "parso" -version = "0.8.4" -description = "A Python Parser" -optional = false -python-versions = ">=3.6" -files = [ - {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"}, - {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"}, -] - -[package.extras] -qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] -testing = ["docopt", "pytest"] - -[[package]] -name = "pexpect" -version = "4.9.0" -description = "Pexpect allows easy control of interactive console applications." -optional = false -python-versions = "*" -files = [ - {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, - {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"}, -] - -[package.dependencies] -ptyprocess = ">=0.5" - -[[package]] -name = "platformdirs" -version = "4.3.6" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." -optional = false -python-versions = ">=3.8" -files = [ - {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, - {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, -] - -[package.extras] -docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] -type = ["mypy (>=1.11.2)"] - -[[package]] -name = "pluggy" -version = "1.5.0" -description = "plugin and hook calling mechanisms for python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, - {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, -] - -[package.extras] -dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] - -[[package]] -name = "prometheus-client" -version = "0.21.0" -description = "Python client for the Prometheus monitoring system." -optional = false -python-versions = ">=3.8" -files = [ - {file = "prometheus_client-0.21.0-py3-none-any.whl", hash = "sha256:4fa6b4dd0ac16d58bb587c04b1caae65b8c5043e85f778f42f5f632f6af2e166"}, - {file = "prometheus_client-0.21.0.tar.gz", hash = "sha256:96c83c606b71ff2b0a433c98889d275f51ffec6c5e267de37c7a2b5c9aa9233e"}, -] - -[package.extras] -twisted = ["twisted"] - -[[package]] -name = "prompt-toolkit" -version = "3.0.47" -description = "Library for building powerful interactive command lines in Python" -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "prompt_toolkit-3.0.47-py3-none-any.whl", hash = "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10"}, - {file = "prompt_toolkit-3.0.47.tar.gz", hash = "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360"}, -] - -[package.dependencies] -wcwidth = "*" - -[[package]] -name = "psutil" -version = "6.0.0" -description = "Cross-platform lib for process and system monitoring in Python." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" -files = [ - {file = "psutil-6.0.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6"}, - {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0"}, - {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c"}, - {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3"}, - {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c"}, - {file = "psutil-6.0.0-cp27-none-win32.whl", hash = "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35"}, - {file = "psutil-6.0.0-cp27-none-win_amd64.whl", hash = "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1"}, - {file = "psutil-6.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132"}, - {file = "psutil-6.0.0-cp36-cp36m-win32.whl", hash = "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14"}, - {file = "psutil-6.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c"}, - {file = "psutil-6.0.0-cp37-abi3-win32.whl", hash = "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d"}, - {file = "psutil-6.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3"}, - {file = "psutil-6.0.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0"}, - {file = "psutil-6.0.0.tar.gz", hash = "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2"}, -] - -[package.extras] -test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] - -[[package]] -name = "ptyprocess" -version = "0.7.0" -description = "Run a subprocess in a pseudo terminal" -optional = false -python-versions = "*" -files = [ - {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, - {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, -] - -[[package]] -name = "pure-eval" -version = "0.2.3" -description = "Safely evaluate AST nodes without side effects" -optional = false -python-versions = "*" -files = [ - {file = "pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0"}, - {file = "pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42"}, -] - -[package.extras] -tests = ["pytest"] - -[[package]] -name = "pycparser" -version = "2.22" -description = "C parser in Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, - {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, -] - -[[package]] -name = "pydantic" -version = "2.9.2" -description = "Data validation using Python type hints" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, - {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, -] - -[package.dependencies] -annotated-types = ">=0.6.0" -pydantic-core = "2.23.4" -typing-extensions = [ - {version = ">=4.6.1", markers = "python_version < \"3.13\""}, - {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, -] - -[package.extras] -email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata"] - -[[package]] -name = "pydantic-core" -version = "2.23.4" -description = "Core functionality for Pydantic validation and serialization" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, - {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, - {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, - {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, - {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, - {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, - {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, - {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"}, - {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"}, - {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"}, - {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"}, - {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"}, - {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"}, - {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"}, - {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"}, - {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"}, - {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"}, - {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"}, - {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"}, - {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"}, - {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"}, - {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"}, - {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"}, - {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"}, - {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, - {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, - {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, - {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"}, - {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"}, - {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"}, - {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"}, - {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"}, - {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"}, - {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"}, - {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"}, - {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"}, - {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"}, - {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"}, - {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"}, - {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"}, - {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"}, - {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"}, - {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, -] - -[package.dependencies] -typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" - -[[package]] -name = "pydantic-settings" -version = "2.5.2" -description = "Settings management using Pydantic" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pydantic_settings-2.5.2-py3-none-any.whl", hash = "sha256:2c912e55fd5794a59bf8c832b9de832dcfdf4778d79ff79b708744eed499a907"}, - {file = "pydantic_settings-2.5.2.tar.gz", hash = "sha256:f90b139682bee4d2065273d5185d71d37ea46cfe57e1b5ae184fc6a0b2484ca0"}, -] - -[package.dependencies] -pydantic = ">=2.7.0" -python-dotenv = ">=0.21.0" - -[package.extras] -azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"] -toml = ["tomli (>=2.0.1)"] -yaml = ["pyyaml (>=6.0.1)"] - -[[package]] -name = "pygments" -version = "2.18.0" -description = "Pygments is a syntax highlighting package written in Python." -optional = false -python-versions = ">=3.8" -files = [ - {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, - {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, -] - -[package.extras] -windows-terminal = ["colorama (>=0.4.6)"] - -[[package]] -name = "pytest" -version = "7.4.4" -description = "pytest: simple powerful testing with Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, - {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=0.12,<2.0" -tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} - -[package.extras] -testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] - -[[package]] -name = "pytest-asyncio" -version = "0.20.3" -description = "Pytest support for asyncio" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pytest-asyncio-0.20.3.tar.gz", hash = "sha256:83cbf01169ce3e8eb71c6c278ccb0574d1a7a3bb8eaaf5e50e0ad342afb33b36"}, - {file = "pytest_asyncio-0.20.3-py3-none-any.whl", hash = "sha256:f129998b209d04fcc65c96fc85c11e5316738358909a8399e93be553d7656442"}, -] - -[package.dependencies] -pytest = ">=6.1.0" - -[package.extras] -docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] -testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"] - -[[package]] -name = "python-dateutil" -version = "2.9.0.post0" -description = "Extensions to the standard Python datetime module" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -files = [ - {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, - {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "python-dotenv" -version = "1.0.1" -description = "Read key-value pairs from a .env file and set them as environment variables" -optional = false -python-versions = ">=3.8" -files = [ - {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, - {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, -] - -[package.extras] -cli = ["click (>=5.0)"] - -[[package]] -name = "python-json-logger" -version = "2.0.7" -description = "A python library adding a json log formatter" -optional = false -python-versions = ">=3.6" -files = [ - {file = "python-json-logger-2.0.7.tar.gz", hash = "sha256:23e7ec02d34237c5aa1e29a070193a4ea87583bb4e7f8fd06d3de8264c4b2e1c"}, - {file = "python_json_logger-2.0.7-py3-none-any.whl", hash = "sha256:f380b826a991ebbe3de4d897aeec42760035ac760345e57b812938dc8b35e2bd"}, -] - -[[package]] -name = "pywin32" -version = "306" -description = "Python for Window Extensions" -optional = false -python-versions = "*" -files = [ - {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, - {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, - {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, - {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, - {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, - {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, - {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, - {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, - {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, - {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, - {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, - {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, - {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, - {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, -] - -[[package]] -name = "pywinpty" -version = "2.0.13" -description = "Pseudo terminal support for Windows from Python." -optional = false -python-versions = ">=3.8" -files = [ - {file = "pywinpty-2.0.13-cp310-none-win_amd64.whl", hash = "sha256:697bff211fb5a6508fee2dc6ff174ce03f34a9a233df9d8b5fe9c8ce4d5eaf56"}, - {file = "pywinpty-2.0.13-cp311-none-win_amd64.whl", hash = "sha256:b96fb14698db1284db84ca38c79f15b4cfdc3172065b5137383910567591fa99"}, - {file = "pywinpty-2.0.13-cp312-none-win_amd64.whl", hash = "sha256:2fd876b82ca750bb1333236ce98488c1be96b08f4f7647cfdf4129dfad83c2d4"}, - {file = "pywinpty-2.0.13-cp38-none-win_amd64.whl", hash = "sha256:61d420c2116c0212808d31625611b51caf621fe67f8a6377e2e8b617ea1c1f7d"}, - {file = "pywinpty-2.0.13-cp39-none-win_amd64.whl", hash = "sha256:71cb613a9ee24174730ac7ae439fd179ca34ccb8c5349e8d7b72ab5dea2c6f4b"}, - {file = "pywinpty-2.0.13.tar.gz", hash = "sha256:c34e32351a3313ddd0d7da23d27f835c860d32fe4ac814d372a3ea9594f41dde"}, -] - -[[package]] -name = "pyyaml" -version = "6.0.2" -description = "YAML parser and emitter for Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, - {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, - {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, - {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, - {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, - {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, - {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, - {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, - {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, - {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, - {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, - {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, - {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, - {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, - {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, - {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, - {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, - {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, - {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, - {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, - {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, - {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, - {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, - {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, - {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, - {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, - {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, - {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, - {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, - {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, - {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, - {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, - {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, - {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, - {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, -] - -[[package]] -name = "pyzmq" -version = "26.2.0" -description = "Python bindings for 0MQ" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pyzmq-26.2.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:ddf33d97d2f52d89f6e6e7ae66ee35a4d9ca6f36eda89c24591b0c40205a3629"}, - {file = "pyzmq-26.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dacd995031a01d16eec825bf30802fceb2c3791ef24bcce48fa98ce40918c27b"}, - {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89289a5ee32ef6c439086184529ae060c741334b8970a6855ec0b6ad3ff28764"}, - {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5506f06d7dc6ecf1efacb4a013b1f05071bb24b76350832c96449f4a2d95091c"}, - {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ea039387c10202ce304af74def5021e9adc6297067f3441d348d2b633e8166a"}, - {file = "pyzmq-26.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a2224fa4a4c2ee872886ed00a571f5e967c85e078e8e8c2530a2fb01b3309b88"}, - {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:28ad5233e9c3b52d76196c696e362508959741e1a005fb8fa03b51aea156088f"}, - {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:1c17211bc037c7d88e85ed8b7d8f7e52db6dc8eca5590d162717c654550f7282"}, - {file = "pyzmq-26.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b8f86dd868d41bea9a5f873ee13bf5551c94cf6bc51baebc6f85075971fe6eea"}, - {file = "pyzmq-26.2.0-cp310-cp310-win32.whl", hash = "sha256:46a446c212e58456b23af260f3d9fb785054f3e3653dbf7279d8f2b5546b21c2"}, - {file = "pyzmq-26.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:49d34ab71db5a9c292a7644ce74190b1dd5a3475612eefb1f8be1d6961441971"}, - {file = "pyzmq-26.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:bfa832bfa540e5b5c27dcf5de5d82ebc431b82c453a43d141afb1e5d2de025fa"}, - {file = "pyzmq-26.2.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:8f7e66c7113c684c2b3f1c83cdd3376103ee0ce4c49ff80a648643e57fb22218"}, - {file = "pyzmq-26.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3a495b30fc91db2db25120df5847d9833af237546fd59170701acd816ccc01c4"}, - {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77eb0968da535cba0470a5165468b2cac7772cfb569977cff92e240f57e31bef"}, - {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ace4f71f1900a548f48407fc9be59c6ba9d9aaf658c2eea6cf2779e72f9f317"}, - {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92a78853d7280bffb93df0a4a6a2498cba10ee793cc8076ef797ef2f74d107cf"}, - {file = "pyzmq-26.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:689c5d781014956a4a6de61d74ba97b23547e431e9e7d64f27d4922ba96e9d6e"}, - {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0aca98bc423eb7d153214b2df397c6421ba6373d3397b26c057af3c904452e37"}, - {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f3496d76b89d9429a656293744ceca4d2ac2a10ae59b84c1da9b5165f429ad3"}, - {file = "pyzmq-26.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5c2b3bfd4b9689919db068ac6c9911f3fcb231c39f7dd30e3138be94896d18e6"}, - {file = "pyzmq-26.2.0-cp311-cp311-win32.whl", hash = "sha256:eac5174677da084abf378739dbf4ad245661635f1600edd1221f150b165343f4"}, - {file = "pyzmq-26.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:5a509df7d0a83a4b178d0f937ef14286659225ef4e8812e05580776c70e155d5"}, - {file = "pyzmq-26.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:c0e6091b157d48cbe37bd67233318dbb53e1e6327d6fc3bb284afd585d141003"}, - {file = "pyzmq-26.2.0-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:ded0fc7d90fe93ae0b18059930086c51e640cdd3baebdc783a695c77f123dcd9"}, - {file = "pyzmq-26.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:17bf5a931c7f6618023cdacc7081f3f266aecb68ca692adac015c383a134ca52"}, - {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55cf66647e49d4621a7e20c8d13511ef1fe1efbbccf670811864452487007e08"}, - {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4661c88db4a9e0f958c8abc2b97472e23061f0bc737f6f6179d7a27024e1faa5"}, - {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea7f69de383cb47522c9c208aec6dd17697db7875a4674c4af3f8cfdac0bdeae"}, - {file = "pyzmq-26.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:7f98f6dfa8b8ccaf39163ce872bddacca38f6a67289116c8937a02e30bbe9711"}, - {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e3e0210287329272539eea617830a6a28161fbbd8a3271bf4150ae3e58c5d0e6"}, - {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6b274e0762c33c7471f1a7471d1a2085b1a35eba5cdc48d2ae319f28b6fc4de3"}, - {file = "pyzmq-26.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:29c6a4635eef69d68a00321e12a7d2559fe2dfccfa8efae3ffb8e91cd0b36a8b"}, - {file = "pyzmq-26.2.0-cp312-cp312-win32.whl", hash = "sha256:989d842dc06dc59feea09e58c74ca3e1678c812a4a8a2a419046d711031f69c7"}, - {file = "pyzmq-26.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:2a50625acdc7801bc6f74698c5c583a491c61d73c6b7ea4dee3901bb99adb27a"}, - {file = "pyzmq-26.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:4d29ab8592b6ad12ebbf92ac2ed2bedcfd1cec192d8e559e2e099f648570e19b"}, - {file = "pyzmq-26.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9dd8cd1aeb00775f527ec60022004d030ddc51d783d056e3e23e74e623e33726"}, - {file = "pyzmq-26.2.0-cp313-cp313-macosx_10_15_universal2.whl", hash = "sha256:28c812d9757fe8acecc910c9ac9dafd2ce968c00f9e619db09e9f8f54c3a68a3"}, - {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d80b1dd99c1942f74ed608ddb38b181b87476c6a966a88a950c7dee118fdf50"}, - {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c997098cc65e3208eca09303630e84d42718620e83b733d0fd69543a9cab9cb"}, - {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ad1bc8d1b7a18497dda9600b12dc193c577beb391beae5cd2349184db40f187"}, - {file = "pyzmq-26.2.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:bea2acdd8ea4275e1278350ced63da0b166421928276c7c8e3f9729d7402a57b"}, - {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:23f4aad749d13698f3f7b64aad34f5fc02d6f20f05999eebc96b89b01262fb18"}, - {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a4f96f0d88accc3dbe4a9025f785ba830f968e21e3e2c6321ccdfc9aef755115"}, - {file = "pyzmq-26.2.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ced65e5a985398827cc9276b93ef6dfabe0273c23de8c7931339d7e141c2818e"}, - {file = "pyzmq-26.2.0-cp313-cp313-win32.whl", hash = "sha256:31507f7b47cc1ead1f6e86927f8ebb196a0bab043f6345ce070f412a59bf87b5"}, - {file = "pyzmq-26.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:70fc7fcf0410d16ebdda9b26cbd8bf8d803d220a7f3522e060a69a9c87bf7bad"}, - {file = "pyzmq-26.2.0-cp313-cp313-win_arm64.whl", hash = "sha256:c3789bd5768ab5618ebf09cef6ec2b35fed88709b104351748a63045f0ff9797"}, - {file = "pyzmq-26.2.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:034da5fc55d9f8da09015d368f519478a52675e558c989bfcb5cf6d4e16a7d2a"}, - {file = "pyzmq-26.2.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:c92d73464b886931308ccc45b2744e5968cbaade0b1d6aeb40d8ab537765f5bc"}, - {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:794a4562dcb374f7dbbfb3f51d28fb40123b5a2abadee7b4091f93054909add5"}, - {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aee22939bb6075e7afededabad1a56a905da0b3c4e3e0c45e75810ebe3a52672"}, - {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ae90ff9dad33a1cfe947d2c40cb9cb5e600d759ac4f0fd22616ce6540f72797"}, - {file = "pyzmq-26.2.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:43a47408ac52647dfabbc66a25b05b6a61700b5165807e3fbd40063fcaf46386"}, - {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:25bf2374a2a8433633c65ccb9553350d5e17e60c8eb4de4d92cc6bd60f01d306"}, - {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_i686.whl", hash = "sha256:007137c9ac9ad5ea21e6ad97d3489af654381324d5d3ba614c323f60dab8fae6"}, - {file = "pyzmq-26.2.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:470d4a4f6d48fb34e92d768b4e8a5cc3780db0d69107abf1cd7ff734b9766eb0"}, - {file = "pyzmq-26.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3b55a4229ce5da9497dd0452b914556ae58e96a4381bb6f59f1305dfd7e53fc8"}, - {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9cb3a6460cdea8fe8194a76de8895707e61ded10ad0be97188cc8463ffa7e3a8"}, - {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8ab5cad923cc95c87bffee098a27856c859bd5d0af31bd346035aa816b081fe1"}, - {file = "pyzmq-26.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ed69074a610fad1c2fda66180e7b2edd4d31c53f2d1872bc2d1211563904cd9"}, - {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:cccba051221b916a4f5e538997c45d7d136a5646442b1231b916d0164067ea27"}, - {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:0eaa83fc4c1e271c24eaf8fb083cbccef8fde77ec8cd45f3c35a9a123e6da097"}, - {file = "pyzmq-26.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9edda2df81daa129b25a39b86cb57dfdfe16f7ec15b42b19bfac503360d27a93"}, - {file = "pyzmq-26.2.0-cp37-cp37m-win32.whl", hash = "sha256:ea0eb6af8a17fa272f7b98d7bebfab7836a0d62738e16ba380f440fceca2d951"}, - {file = "pyzmq-26.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:4ff9dc6bc1664bb9eec25cd17506ef6672d506115095411e237d571e92a58231"}, - {file = "pyzmq-26.2.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:2eb7735ee73ca1b0d71e0e67c3739c689067f055c764f73aac4cc8ecf958ee3f"}, - {file = "pyzmq-26.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a534f43bc738181aa7cbbaf48e3eca62c76453a40a746ab95d4b27b1111a7d2"}, - {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:aedd5dd8692635813368e558a05266b995d3d020b23e49581ddd5bbe197a8ab6"}, - {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8be4700cd8bb02cc454f630dcdf7cfa99de96788b80c51b60fe2fe1dac480289"}, - {file = "pyzmq-26.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fcc03fa4997c447dce58264e93b5aa2d57714fbe0f06c07b7785ae131512732"}, - {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:402b190912935d3db15b03e8f7485812db350d271b284ded2b80d2e5704be780"}, - {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8685fa9c25ff00f550c1fec650430c4b71e4e48e8d852f7ddcf2e48308038640"}, - {file = "pyzmq-26.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:76589c020680778f06b7e0b193f4b6dd66d470234a16e1df90329f5e14a171cd"}, - {file = "pyzmq-26.2.0-cp38-cp38-win32.whl", hash = "sha256:8423c1877d72c041f2c263b1ec6e34360448decfb323fa8b94e85883043ef988"}, - {file = "pyzmq-26.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:76589f2cd6b77b5bdea4fca5992dc1c23389d68b18ccc26a53680ba2dc80ff2f"}, - {file = "pyzmq-26.2.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:b1d464cb8d72bfc1a3adc53305a63a8e0cac6bc8c5a07e8ca190ab8d3faa43c2"}, - {file = "pyzmq-26.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4da04c48873a6abdd71811c5e163bd656ee1b957971db7f35140a2d573f6949c"}, - {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d049df610ac811dcffdc147153b414147428567fbbc8be43bb8885f04db39d98"}, - {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:05590cdbc6b902101d0e65d6a4780af14dc22914cc6ab995d99b85af45362cc9"}, - {file = "pyzmq-26.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c811cfcd6a9bf680236c40c6f617187515269ab2912f3d7e8c0174898e2519db"}, - {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6835dd60355593de10350394242b5757fbbd88b25287314316f266e24c61d073"}, - {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc6bee759a6bddea5db78d7dcd609397449cb2d2d6587f48f3ca613b19410cfc"}, - {file = "pyzmq-26.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c530e1eecd036ecc83c3407f77bb86feb79916d4a33d11394b8234f3bd35b940"}, - {file = "pyzmq-26.2.0-cp39-cp39-win32.whl", hash = "sha256:367b4f689786fca726ef7a6c5ba606958b145b9340a5e4808132cc65759abd44"}, - {file = "pyzmq-26.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:e6fa2e3e683f34aea77de8112f6483803c96a44fd726d7358b9888ae5bb394ec"}, - {file = "pyzmq-26.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:7445be39143a8aa4faec43b076e06944b8f9d0701b669df4af200531b21e40bb"}, - {file = "pyzmq-26.2.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:706e794564bec25819d21a41c31d4df2d48e1cc4b061e8d345d7fb4dd3e94072"}, - {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b435f2753621cd36e7c1762156815e21c985c72b19135dac43a7f4f31d28dd1"}, - {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:160c7e0a5eb178011e72892f99f918c04a131f36056d10d9c1afb223fc952c2d"}, - {file = "pyzmq-26.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c4a71d5d6e7b28a47a394c0471b7e77a0661e2d651e7ae91e0cab0a587859ca"}, - {file = "pyzmq-26.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:90412f2db8c02a3864cbfc67db0e3dcdbda336acf1c469526d3e869394fe001c"}, - {file = "pyzmq-26.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2ea4ad4e6a12e454de05f2949d4beddb52460f3de7c8b9d5c46fbb7d7222e02c"}, - {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fc4f7a173a5609631bb0c42c23d12c49df3966f89f496a51d3eb0ec81f4519d6"}, - {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:878206a45202247781472a2d99df12a176fef806ca175799e1c6ad263510d57c"}, - {file = "pyzmq-26.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17c412bad2eb9468e876f556eb4ee910e62d721d2c7a53c7fa31e643d35352e6"}, - {file = "pyzmq-26.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:0d987a3ae5a71c6226b203cfd298720e0086c7fe7c74f35fa8edddfbd6597eed"}, - {file = "pyzmq-26.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:39887ac397ff35b7b775db7201095fc6310a35fdbae85bac4523f7eb3b840e20"}, - {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fdb5b3e311d4d4b0eb8b3e8b4d1b0a512713ad7e6a68791d0923d1aec433d919"}, - {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:226af7dcb51fdb0109f0016449b357e182ea0ceb6b47dfb5999d569e5db161d5"}, - {file = "pyzmq-26.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bed0e799e6120b9c32756203fb9dfe8ca2fb8467fed830c34c877e25638c3fc"}, - {file = "pyzmq-26.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:29c7947c594e105cb9e6c466bace8532dc1ca02d498684128b339799f5248277"}, - {file = "pyzmq-26.2.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:cdeabcff45d1c219636ee2e54d852262e5c2e085d6cb476d938aee8d921356b3"}, - {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35cffef589bcdc587d06f9149f8d5e9e8859920a071df5a2671de2213bef592a"}, - {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18c8dc3b7468d8b4bdf60ce9d7141897da103c7a4690157b32b60acb45e333e6"}, - {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7133d0a1677aec369d67dd78520d3fa96dd7f3dcec99d66c1762870e5ea1a50a"}, - {file = "pyzmq-26.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6a96179a24b14fa6428cbfc08641c779a53f8fcec43644030328f44034c7f1f4"}, - {file = "pyzmq-26.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4f78c88905461a9203eac9faac157a2a0dbba84a0fd09fd29315db27be40af9f"}, - {file = "pyzmq-26.2.0.tar.gz", hash = "sha256:070672c258581c8e4f640b5159297580a9974b026043bd4ab0470be9ed324f1f"}, -] - -[package.dependencies] -cffi = {version = "*", markers = "implementation_name == \"pypy\""} - -[[package]] -name = "referencing" -version = "0.35.1" -description = "JSON Referencing + Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"}, - {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"}, -] - -[package.dependencies] -attrs = ">=22.2.0" -rpds-py = ">=0.7.0" - -[[package]] -name = "regex" -version = "2024.9.11" -description = "Alternative regular expression module, to replace re." -optional = false -python-versions = ">=3.8" -files = [ - {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1494fa8725c285a81d01dc8c06b55287a1ee5e0e382d8413adc0a9197aac6408"}, - {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e12c481ad92d129c78f13a2a3662317e46ee7ef96c94fd332e1c29131875b7d"}, - {file = "regex-2024.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16e13a7929791ac1216afde26f712802e3df7bf0360b32e4914dca3ab8baeea5"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46989629904bad940bbec2106528140a218b4a36bb3042d8406980be1941429c"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a906ed5e47a0ce5f04b2c981af1c9acf9e8696066900bf03b9d7879a6f679fc8"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a091b0550b3b0207784a7d6d0f1a00d1d1c8a11699c1a4d93db3fbefc3ad35"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ddcd9a179c0a6fa8add279a4444015acddcd7f232a49071ae57fa6e278f1f71"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b41e1adc61fa347662b09398e31ad446afadff932a24807d3ceb955ed865cc8"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ced479f601cd2f8ca1fd7b23925a7e0ad512a56d6e9476f79b8f381d9d37090a"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:635a1d96665f84b292e401c3d62775851aedc31d4f8784117b3c68c4fcd4118d"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c0256beda696edcf7d97ef16b2a33a8e5a875affd6fa6567b54f7c577b30a137"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:3ce4f1185db3fbde8ed8aa223fc9620f276c58de8b0d4f8cc86fd1360829edb6"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:09d77559e80dcc9d24570da3745ab859a9cf91953062e4ab126ba9d5993688ca"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a22ccefd4db3f12b526eccb129390942fe874a3a9fdbdd24cf55773a1faab1a"}, - {file = "regex-2024.9.11-cp310-cp310-win32.whl", hash = "sha256:f745ec09bc1b0bd15cfc73df6fa4f726dcc26bb16c23a03f9e3367d357eeedd0"}, - {file = "regex-2024.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:01c2acb51f8a7d6494c8c5eafe3d8e06d76563d8a8a4643b37e9b2dd8a2ff623"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2cce2449e5927a0bf084d346da6cd5eb016b2beca10d0013ab50e3c226ffc0df"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b37fa423beefa44919e009745ccbf353d8c981516e807995b2bd11c2c77d268"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64ce2799bd75039b480cc0360907c4fb2f50022f030bf9e7a8705b636e408fad"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4cc92bb6db56ab0c1cbd17294e14f5e9224f0cc6521167ef388332604e92679"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d05ac6fa06959c4172eccd99a222e1fbf17b5670c4d596cb1e5cde99600674c4"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:040562757795eeea356394a7fb13076ad4f99d3c62ab0f8bdfb21f99a1f85664"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6113c008a7780792efc80f9dfe10ba0cd043cbf8dc9a76ef757850f51b4edc50"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e5fb5f77c8745a60105403a774fe2c1759b71d3e7b4ca237a5e67ad066c7199"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:54d9ff35d4515debf14bc27f1e3b38bfc453eff3220f5bce159642fa762fe5d4"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:df5cbb1fbc74a8305b6065d4ade43b993be03dbe0f8b30032cced0d7740994bd"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7fb89ee5d106e4a7a51bce305ac4efb981536301895f7bdcf93ec92ae0d91c7f"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a738b937d512b30bf75995c0159c0ddf9eec0775c9d72ac0202076c72f24aa96"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e28f9faeb14b6f23ac55bfbbfd3643f5c7c18ede093977f1df249f73fd22c7b1"}, - {file = "regex-2024.9.11-cp311-cp311-win32.whl", hash = "sha256:18e707ce6c92d7282dfce370cd205098384b8ee21544e7cb29b8aab955b66fa9"}, - {file = "regex-2024.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:313ea15e5ff2a8cbbad96ccef6be638393041b0a7863183c2d31e0c6116688cf"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b0d0a6c64fcc4ef9c69bd5b3b3626cc3776520a1637d8abaa62b9edc147a58f7"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:49b0e06786ea663f933f3710a51e9385ce0cba0ea56b67107fd841a55d56a231"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5b513b6997a0b2f10e4fd3a1313568e373926e8c252bd76c960f96fd039cd28d"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee439691d8c23e76f9802c42a95cfeebf9d47cf4ffd06f18489122dbb0a7ad64"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8f877c89719d759e52783f7fe6e1c67121076b87b40542966c02de5503ace42"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23b30c62d0f16827f2ae9f2bb87619bc4fba2044911e2e6c2eb1af0161cdb766"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ab7824093d8f10d44330fe1e6493f756f252d145323dd17ab6b48733ff6c0a"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dee5b4810a89447151999428fe096977346cf2f29f4d5e29609d2e19e0199c9"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98eeee2f2e63edae2181c886d7911ce502e1292794f4c5ee71e60e23e8d26b5d"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:57fdd2e0b2694ce6fc2e5ccf189789c3e2962916fb38779d3e3521ff8fe7a822"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d552c78411f60b1fdaafd117a1fca2f02e562e309223b9d44b7de8be451ec5e0"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a0b2b80321c2ed3fcf0385ec9e51a12253c50f146fddb2abbb10f033fe3d049a"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a"}, - {file = "regex-2024.9.11-cp312-cp312-win32.whl", hash = "sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776"}, - {file = "regex-2024.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8"}, - {file = "regex-2024.9.11-cp313-cp313-win32.whl", hash = "sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8"}, - {file = "regex-2024.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f"}, - {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:35f4a6f96aa6cb3f2f7247027b07b15a374f0d5b912c0001418d1d55024d5cb4"}, - {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:55b96e7ce3a69a8449a66984c268062fbaa0d8ae437b285428e12797baefce7e"}, - {file = "regex-2024.9.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb130fccd1a37ed894824b8c046321540263013da72745d755f2d35114b81a60"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:323c1f04be6b2968944d730e5c2091c8c89767903ecaa135203eec4565ed2b2b"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be1c8ed48c4c4065ecb19d882a0ce1afe0745dfad8ce48c49586b90a55f02366"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5b029322e6e7b94fff16cd120ab35a253236a5f99a79fb04fda7ae71ca20ae8"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6fff13ef6b5f29221d6904aa816c34701462956aa72a77f1f151a8ec4f56aeb"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:587d4af3979376652010e400accc30404e6c16b7df574048ab1f581af82065e4"}, - {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:079400a8269544b955ffa9e31f186f01d96829110a3bf79dc338e9910f794fca"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f9268774428ec173654985ce55fc6caf4c6d11ade0f6f914d48ef4719eb05ebb"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:23f9985c8784e544d53fc2930fc1ac1a7319f5d5332d228437acc9f418f2f168"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2941333154baff9838e88aa71c1d84f4438189ecc6021a12c7573728b5838e"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e93f1c331ca8e86fe877a48ad64e77882c0c4da0097f2212873a69bbfea95d0c"}, - {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:846bc79ee753acf93aef4184c040d709940c9d001029ceb7b7a52747b80ed2dd"}, - {file = "regex-2024.9.11-cp38-cp38-win32.whl", hash = "sha256:c94bb0a9f1db10a1d16c00880bdebd5f9faf267273b8f5bd1878126e0fbde771"}, - {file = "regex-2024.9.11-cp38-cp38-win_amd64.whl", hash = "sha256:2b08fce89fbd45664d3df6ad93e554b6c16933ffa9d55cb7e01182baaf971508"}, - {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:07f45f287469039ffc2c53caf6803cd506eb5f5f637f1d4acb37a738f71dd066"}, - {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4838e24ee015101d9f901988001038f7f0d90dc0c3b115541a1365fb439add62"}, - {file = "regex-2024.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6edd623bae6a737f10ce853ea076f56f507fd7726bee96a41ee3d68d347e4d16"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c69ada171c2d0e97a4b5aa78fbb835e0ffbb6b13fc5da968c09811346564f0d3"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02087ea0a03b4af1ed6ebab2c54d7118127fee8d71b26398e8e4b05b78963199"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:69dee6a020693d12a3cf892aba4808fe168d2a4cef368eb9bf74f5398bfd4ee8"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:297f54910247508e6e5cae669f2bc308985c60540a4edd1c77203ef19bfa63ca"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ecea58b43a67b1b79805f1a0255730edaf5191ecef84dbc4cc85eb30bc8b63b9"}, - {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eab4bb380f15e189d1313195b062a6aa908f5bd687a0ceccd47c8211e9cf0d4a"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0cbff728659ce4bbf4c30b2a1be040faafaa9eca6ecde40aaff86f7889f4ab39"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:54c4a097b8bc5bb0dfc83ae498061d53ad7b5762e00f4adaa23bee22b012e6ba"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:73d6d2f64f4d894c96626a75578b0bf7d9e56dcda8c3d037a2118fdfe9b1c664"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:e53b5fbab5d675aec9f0c501274c467c0f9a5d23696cfc94247e1fb56501ed89"}, - {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ffbcf9221e04502fc35e54d1ce9567541979c3fdfb93d2c554f0ca583a19b35"}, - {file = "regex-2024.9.11-cp39-cp39-win32.whl", hash = "sha256:e4c22e1ac1f1ec1e09f72e6c44d8f2244173db7eb9629cc3a346a8d7ccc31142"}, - {file = "regex-2024.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:faa3c142464efec496967359ca99696c896c591c56c53506bac1ad465f66e919"}, - {file = "regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd"}, -] - -[[package]] -name = "requests" -version = "2.32.3" -description = "Python HTTP for Humans." -optional = false -python-versions = ">=3.8" -files = [ - {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, - {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "rfc3339-validator" -version = "0.1.4" -description = "A pure python RFC3339 validator" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa"}, - {file = "rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b"}, -] - -[package.dependencies] -six = "*" - -[[package]] -name = "rfc3986-validator" -version = "0.1.1" -description = "Pure python rfc3986 validator" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "rfc3986_validator-0.1.1-py2.py3-none-any.whl", hash = "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9"}, - {file = "rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055"}, -] - -[[package]] -name = "rpds-py" -version = "0.20.0" -description = "Python bindings to Rust's persistent data structures (rpds)" -optional = false -python-versions = ">=3.8" -files = [ - {file = "rpds_py-0.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3ad0fda1635f8439cde85c700f964b23ed5fc2d28016b32b9ee5fe30da5c84e2"}, - {file = "rpds_py-0.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9bb4a0d90fdb03437c109a17eade42dfbf6190408f29b2744114d11586611d6f"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6377e647bbfd0a0b159fe557f2c6c602c159fc752fa316572f012fc0bf67150"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb851b7df9dda52dc1415ebee12362047ce771fc36914586b2e9fcbd7d293b3e"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e0f80b739e5a8f54837be5d5c924483996b603d5502bfff79bf33da06164ee2"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a8c94dad2e45324fc74dce25e1645d4d14df9a4e54a30fa0ae8bad9a63928e3"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e604fe73ba048c06085beaf51147eaec7df856824bfe7b98657cf436623daf"}, - {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:df3de6b7726b52966edf29663e57306b23ef775faf0ac01a3e9f4012a24a4140"}, - {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf258ede5bc22a45c8e726b29835b9303c285ab46fc7c3a4cc770736b5304c9f"}, - {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:55fea87029cded5df854ca7e192ec7bdb7ecd1d9a3f63d5c4eb09148acf4a7ce"}, - {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ae94bd0b2f02c28e199e9bc51485d0c5601f58780636185660f86bf80c89af94"}, - {file = "rpds_py-0.20.0-cp310-none-win32.whl", hash = "sha256:28527c685f237c05445efec62426d285e47a58fb05ba0090a4340b73ecda6dee"}, - {file = "rpds_py-0.20.0-cp310-none-win_amd64.whl", hash = "sha256:238a2d5b1cad28cdc6ed15faf93a998336eb041c4e440dd7f902528b8891b399"}, - {file = "rpds_py-0.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac2f4f7a98934c2ed6505aead07b979e6f999389f16b714448fb39bbaa86a489"}, - {file = "rpds_py-0.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:220002c1b846db9afd83371d08d239fdc865e8f8c5795bbaec20916a76db3318"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d7919548df3f25374a1f5d01fbcd38dacab338ef5f33e044744b5c36729c8db"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:758406267907b3781beee0f0edfe4a179fbd97c0be2e9b1154d7f0a1279cf8e5"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3d61339e9f84a3f0767b1995adfb171a0d00a1185192718a17af6e124728e0f5"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1259c7b3705ac0a0bd38197565a5d603218591d3f6cee6e614e380b6ba61c6f6"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c1dc0f53856b9cc9a0ccca0a7cc61d3d20a7088201c0937f3f4048c1718a209"}, - {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7e60cb630f674a31f0368ed32b2a6b4331b8350d67de53c0359992444b116dd3"}, - {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dbe982f38565bb50cb7fb061ebf762c2f254ca3d8c20d4006878766e84266272"}, - {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:514b3293b64187172bc77c8fb0cdae26981618021053b30d8371c3a902d4d5ad"}, - {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d0a26ffe9d4dd35e4dfdd1e71f46401cff0181c75ac174711ccff0459135fa58"}, - {file = "rpds_py-0.20.0-cp311-none-win32.whl", hash = "sha256:89c19a494bf3ad08c1da49445cc5d13d8fefc265f48ee7e7556839acdacf69d0"}, - {file = "rpds_py-0.20.0-cp311-none-win_amd64.whl", hash = "sha256:c638144ce971df84650d3ed0096e2ae7af8e62ecbbb7b201c8935c370df00a2c"}, - {file = "rpds_py-0.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a84ab91cbe7aab97f7446652d0ed37d35b68a465aeef8fc41932a9d7eee2c1a6"}, - {file = "rpds_py-0.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:56e27147a5a4c2c21633ff8475d185734c0e4befd1c989b5b95a5d0db699b21b"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2580b0c34583b85efec8c5c5ec9edf2dfe817330cc882ee972ae650e7b5ef739"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b80d4a7900cf6b66bb9cee5c352b2d708e29e5a37fe9bf784fa97fc11504bf6c"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50eccbf054e62a7b2209b28dc7a22d6254860209d6753e6b78cfaeb0075d7bee"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:49a8063ea4296b3a7e81a5dfb8f7b2d73f0b1c20c2af401fb0cdf22e14711a96"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea438162a9fcbee3ecf36c23e6c68237479f89f962f82dae83dc15feeceb37e4"}, - {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:18d7585c463087bddcfa74c2ba267339f14f2515158ac4db30b1f9cbdb62c8ef"}, - {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d4c7d1a051eeb39f5c9547e82ea27cbcc28338482242e3e0b7768033cb083821"}, - {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4df1e3b3bec320790f699890d41c59d250f6beda159ea3c44c3f5bac1976940"}, - {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2cf126d33a91ee6eedc7f3197b53e87a2acdac63602c0f03a02dd69e4b138174"}, - {file = "rpds_py-0.20.0-cp312-none-win32.whl", hash = "sha256:8bc7690f7caee50b04a79bf017a8d020c1f48c2a1077ffe172abec59870f1139"}, - {file = "rpds_py-0.20.0-cp312-none-win_amd64.whl", hash = "sha256:0e13e6952ef264c40587d510ad676a988df19adea20444c2b295e536457bc585"}, - {file = "rpds_py-0.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:aa9a0521aeca7d4941499a73ad7d4f8ffa3d1affc50b9ea11d992cd7eff18a29"}, - {file = "rpds_py-0.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1f1d51eccb7e6c32ae89243cb352389228ea62f89cd80823ea7dd1b98e0b91"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a86a9b96070674fc88b6f9f71a97d2c1d3e5165574615d1f9168ecba4cecb24"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c8ef2ebf76df43f5750b46851ed1cdf8f109d7787ca40035fe19fbdc1acc5a7"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b25f024b421d5859d156750ea9a65651793d51b76a2e9238c05c9d5f203a9"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57eb94a8c16ab08fef6404301c38318e2c5a32216bf5de453e2714c964c125c8"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1940dae14e715e2e02dfd5b0f64a52e8374a517a1e531ad9412319dc3ac7879"}, - {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d20277fd62e1b992a50c43f13fbe13277a31f8c9f70d59759c88f644d66c619f"}, - {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:06db23d43f26478303e954c34c75182356ca9aa7797d22c5345b16871ab9c45c"}, - {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b2a5db5397d82fa847e4c624b0c98fe59d2d9b7cf0ce6de09e4d2e80f8f5b3f2"}, - {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5a35df9f5548fd79cb2f52d27182108c3e6641a4feb0f39067911bf2adaa3e57"}, - {file = "rpds_py-0.20.0-cp313-none-win32.whl", hash = "sha256:fd2d84f40633bc475ef2d5490b9c19543fbf18596dcb1b291e3a12ea5d722f7a"}, - {file = "rpds_py-0.20.0-cp313-none-win_amd64.whl", hash = "sha256:9bc2d153989e3216b0559251b0c260cfd168ec78b1fac33dd485750a228db5a2"}, - {file = "rpds_py-0.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f2fbf7db2012d4876fb0d66b5b9ba6591197b0f165db8d99371d976546472a24"}, - {file = "rpds_py-0.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1e5f3cd7397c8f86c8cc72d5a791071431c108edd79872cdd96e00abd8497d29"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce9845054c13696f7af7f2b353e6b4f676dab1b4b215d7fe5e05c6f8bb06f965"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c3e130fd0ec56cb76eb49ef52faead8ff09d13f4527e9b0c400307ff72b408e1"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b16aa0107ecb512b568244ef461f27697164d9a68d8b35090e9b0c1c8b27752"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7f429242aae2947246587d2964fad750b79e8c233a2367f71b554e9447949c"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af0fc424a5842a11e28956e69395fbbeab2c97c42253169d87e90aac2886d751"}, - {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b8c00a3b1e70c1d3891f0db1b05292747f0dbcfb49c43f9244d04c70fbc40eb8"}, - {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:40ce74fc86ee4645d0a225498d091d8bc61f39b709ebef8204cb8b5a464d3c0e"}, - {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4fe84294c7019456e56d93e8ababdad5a329cd25975be749c3f5f558abb48253"}, - {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:338ca4539aad4ce70a656e5187a3a31c5204f261aef9f6ab50e50bcdffaf050a"}, - {file = "rpds_py-0.20.0-cp38-none-win32.whl", hash = "sha256:54b43a2b07db18314669092bb2de584524d1ef414588780261e31e85846c26a5"}, - {file = "rpds_py-0.20.0-cp38-none-win_amd64.whl", hash = "sha256:a1862d2d7ce1674cffa6d186d53ca95c6e17ed2b06b3f4c476173565c862d232"}, - {file = "rpds_py-0.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:3fde368e9140312b6e8b6c09fb9f8c8c2f00999d1823403ae90cc00480221b22"}, - {file = "rpds_py-0.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9824fb430c9cf9af743cf7aaf6707bf14323fb51ee74425c380f4c846ea70789"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11ef6ce74616342888b69878d45e9f779b95d4bd48b382a229fe624a409b72c5"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c52d3f2f82b763a24ef52f5d24358553e8403ce05f893b5347098014f2d9eff2"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d35cef91e59ebbeaa45214861874bc6f19eb35de96db73e467a8358d701a96c"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d72278a30111e5b5525c1dd96120d9e958464316f55adb030433ea905866f4de"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4c29cbbba378759ac5786730d1c3cb4ec6f8ababf5c42a9ce303dc4b3d08cda"}, - {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6632f2d04f15d1bd6fe0eedd3b86d9061b836ddca4c03d5cf5c7e9e6b7c14580"}, - {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d0b67d87bb45ed1cd020e8fbf2307d449b68abc45402fe1a4ac9e46c3c8b192b"}, - {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ec31a99ca63bf3cd7f1a5ac9fe95c5e2d060d3c768a09bc1d16e235840861420"}, - {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22e6c9976e38f4d8c4a63bd8a8edac5307dffd3ee7e6026d97f3cc3a2dc02a0b"}, - {file = "rpds_py-0.20.0-cp39-none-win32.whl", hash = "sha256:569b3ea770c2717b730b61998b6c54996adee3cef69fc28d444f3e7920313cf7"}, - {file = "rpds_py-0.20.0-cp39-none-win_amd64.whl", hash = "sha256:e6900ecdd50ce0facf703f7a00df12374b74bbc8ad9fe0f6559947fb20f82364"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:617c7357272c67696fd052811e352ac54ed1d9b49ab370261a80d3b6ce385045"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9426133526f69fcaba6e42146b4e12d6bc6c839b8b555097020e2b78ce908dcc"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deb62214c42a261cb3eb04d474f7155279c1a8a8c30ac89b7dcb1721d92c3c02"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcaeb7b57f1a1e071ebd748984359fef83ecb026325b9d4ca847c95bc7311c92"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d454b8749b4bd70dd0a79f428731ee263fa6995f83ccb8bada706e8d1d3ff89d"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d807dc2051abe041b6649681dce568f8e10668e3c1c6543ebae58f2d7e617855"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c20f0ddeb6e29126d45f89206b8291352b8c5b44384e78a6499d68b52ae511"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b7f19250ceef892adf27f0399b9e5afad019288e9be756d6919cb58892129f51"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4f1ed4749a08379555cebf4650453f14452eaa9c43d0a95c49db50c18b7da075"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:dcedf0b42bcb4cfff4101d7771a10532415a6106062f005ab97d1d0ab5681c60"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:39ed0d010457a78f54090fafb5d108501b5aa5604cc22408fc1c0c77eac14344"}, - {file = "rpds_py-0.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:bb273176be34a746bdac0b0d7e4e2c467323d13640b736c4c477881a3220a989"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f918a1a130a6dfe1d7fe0f105064141342e7dd1611f2e6a21cd2f5c8cb1cfb3e"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f60012a73aa396be721558caa3a6fd49b3dd0033d1675c6d59c4502e870fcf0c"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d2b1ad682a3dfda2a4e8ad8572f3100f95fad98cb99faf37ff0ddfe9cbf9d03"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:614fdafe9f5f19c63ea02817fa4861c606a59a604a77c8cdef5aa01d28b97921"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa518bcd7600c584bf42e6617ee8132869e877db2f76bcdc281ec6a4113a53ab"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0475242f447cc6cb8a9dd486d68b2ef7fbee84427124c232bff5f63b1fe11e5"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90a4cd061914a60bd51c68bcb4357086991bd0bb93d8aa66a6da7701370708f"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:def7400461c3a3f26e49078302e1c1b38f6752342c77e3cf72ce91ca69fb1bc1"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:65794e4048ee837494aea3c21a28ad5fc080994dfba5b036cf84de37f7ad5074"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:faefcc78f53a88f3076b7f8be0a8f8d35133a3ecf7f3770895c25f8813460f08"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:5b4f105deeffa28bbcdff6c49b34e74903139afa690e35d2d9e3c2c2fba18cec"}, - {file = "rpds_py-0.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdfc3a892927458d98f3d55428ae46b921d1f7543b89382fdb483f5640daaec8"}, - {file = "rpds_py-0.20.0.tar.gz", hash = "sha256:d72a210824facfdaf8768cf2d7ca25a042c30320b3020de2fa04640920d4e121"}, -] - -[[package]] -name = "ruff" -version = "0.5.7" -description = "An extremely fast Python linter and code formatter, written in Rust." -optional = false -python-versions = ">=3.7" -files = [ - {file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"}, - {file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"}, - {file = "ruff-0.5.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eaf3d86a1fdac1aec8a3417a63587d93f906c678bb9ed0b796da7b59c1114a1e"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a01c34400097b06cf8a6e61b35d6d456d5bd1ae6961542de18ec81eaf33b4cb8"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcc8054f1a717e2213500edaddcf1dbb0abad40d98e1bd9d0ad364f75c763eea"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f70284e73f36558ef51602254451e50dd6cc479f8b6f8413a95fcb5db4a55fc"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a78ad870ae3c460394fc95437d43deb5c04b5c29297815a2a1de028903f19692"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ccd078c66a8e419475174bfe60a69adb36ce04f8d4e91b006f1329d5cd44bcf"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e31c9bad4ebf8fdb77b59cae75814440731060a09a0e0077d559a556453acbb"}, - {file = "ruff-0.5.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d796327eed8e168164346b769dd9a27a70e0298d667b4ecee6877ce8095ec8e"}, - {file = "ruff-0.5.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a09ea2c3f7778cc635e7f6edf57d566a8ee8f485f3c4454db7771efb692c499"}, - {file = "ruff-0.5.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a36d8dcf55b3a3bc353270d544fb170d75d2dff41eba5df57b4e0b67a95bb64e"}, - {file = "ruff-0.5.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9369c218f789eefbd1b8d82a8cf25017b523ac47d96b2f531eba73770971c9e5"}, - {file = "ruff-0.5.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b88ca3db7eb377eb24fb7c82840546fb7acef75af4a74bd36e9ceb37a890257e"}, - {file = "ruff-0.5.7-py3-none-win32.whl", hash = "sha256:33d61fc0e902198a3e55719f4be6b375b28f860b09c281e4bdbf783c0566576a"}, - {file = "ruff-0.5.7-py3-none-win_amd64.whl", hash = "sha256:083bbcbe6fadb93cd86709037acc510f86eed5a314203079df174c40bbbca6b3"}, - {file = "ruff-0.5.7-py3-none-win_arm64.whl", hash = "sha256:2dca26154ff9571995107221d0aeaad0e75a77b5a682d6236cf89a58c70b76f4"}, - {file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"}, -] - -[[package]] -name = "send2trash" -version = "1.8.3" -description = "Send file to trash natively under Mac OS X, Windows and Linux" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" -files = [ - {file = "Send2Trash-1.8.3-py3-none-any.whl", hash = "sha256:0c31227e0bd08961c7665474a3d1ef7193929fedda4233843689baa056be46c9"}, - {file = "Send2Trash-1.8.3.tar.gz", hash = "sha256:b18e7a3966d99871aefeb00cfbcfdced55ce4871194810fc71f4aa484b953abf"}, -] - -[package.extras] -nativelib = ["pyobjc-framework-Cocoa", "pywin32"] -objc = ["pyobjc-framework-Cocoa"] -win32 = ["pywin32"] - -[[package]] -name = "setuptools" -version = "67.8.0" -description = "Easily download, build, install, upgrade, and uninstall Python packages" -optional = false -python-versions = ">=3.7" -files = [ - {file = "setuptools-67.8.0-py3-none-any.whl", hash = "sha256:5df61bf30bb10c6f756eb19e7c9f3b473051f48db77fddbe06ff2ca307df9a6f"}, - {file = "setuptools-67.8.0.tar.gz", hash = "sha256:62642358adc77ffa87233bc4d2354c4b2682d214048f500964dbe760ccedf102"}, -] - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] - -[[package]] -name = "sniffio" -version = "1.3.1" -description = "Sniff out which async library your code is running under" -optional = false -python-versions = ">=3.7" -files = [ - {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, - {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, -] - -[[package]] -name = "soupsieve" -version = "2.6" -description = "A modern CSS selector implementation for Beautiful Soup." -optional = false -python-versions = ">=3.8" -files = [ - {file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"}, - {file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"}, -] - -[[package]] -name = "sqlalchemy" -version = "2.0.35" -description = "Database Abstraction Library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-win32.whl", hash = "sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87"}, - {file = "SQLAlchemy-2.0.35-cp37-cp37m-win_amd64.whl", hash = "sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-win32.whl", hash = "sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0"}, - {file = "SQLAlchemy-2.0.35-cp38-cp38-win_amd64.whl", hash = "sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-win32.whl", hash = "sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3"}, - {file = "SQLAlchemy-2.0.35-cp39-cp39-win_amd64.whl", hash = "sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f"}, - {file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"}, - {file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"}, -] - -[package.dependencies] -greenlet = {version = "!=0.4.17", markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} -typing-extensions = ">=4.6.0" - -[package.extras] -aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] -aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] -aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"] -asyncio = ["greenlet (!=0.4.17)"] -asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] -mssql = ["pyodbc"] -mssql-pymssql = ["pymssql"] -mssql-pyodbc = ["pyodbc"] -mypy = ["mypy (>=0.910)"] -mysql = ["mysqlclient (>=1.4.0)"] -mysql-connector = ["mysql-connector-python"] -oracle = ["cx_oracle (>=8)"] -oracle-oracledb = ["oracledb (>=1.0.1)"] -postgresql = ["psycopg2 (>=2.7)"] -postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] -postgresql-pg8000 = ["pg8000 (>=1.29.1)"] -postgresql-psycopg = ["psycopg (>=3.0.7)"] -postgresql-psycopg2binary = ["psycopg2-binary"] -postgresql-psycopg2cffi = ["psycopg2cffi"] -postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] -pymysql = ["pymysql"] -sqlcipher = ["sqlcipher3_binary"] - -[[package]] -name = "stack-data" -version = "0.6.3" -description = "Extract data from python stack frames and tracebacks for informative displays" -optional = false -python-versions = "*" -files = [ - {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, - {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, -] - -[package.dependencies] -asttokens = ">=2.1.0" -executing = ">=1.2.0" -pure-eval = "*" - -[package.extras] -tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] - -[[package]] -name = "tenacity" -version = "8.5.0" -description = "Retry code until it succeeds" -optional = false -python-versions = ">=3.8" -files = [ - {file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"}, - {file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"}, -] - -[package.extras] -doc = ["reno", "sphinx"] -test = ["pytest", "tornado (>=4.5)", "typeguard"] - -[[package]] -name = "terminado" -version = "0.18.1" -description = "Tornado websocket backend for the Xterm.js Javascript terminal emulator library." -optional = false -python-versions = ">=3.8" -files = [ - {file = "terminado-0.18.1-py3-none-any.whl", hash = "sha256:a4468e1b37bb318f8a86514f65814e1afc977cf29b3992a4500d9dd305dcceb0"}, - {file = "terminado-0.18.1.tar.gz", hash = "sha256:de09f2c4b85de4765f7714688fff57d3e75bad1f909b589fde880460c753fd2e"}, -] - -[package.dependencies] -ptyprocess = {version = "*", markers = "os_name != \"nt\""} -pywinpty = {version = ">=1.1.0", markers = "os_name == \"nt\""} -tornado = ">=6.1.0" - -[package.extras] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] -test = ["pre-commit", "pytest (>=7.0)", "pytest-timeout"] -typing = ["mypy (>=1.6,<2.0)", "traitlets (>=5.11.1)"] - -[[package]] -name = "tiktoken" -version = "0.7.0" -description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" -optional = false -python-versions = ">=3.8" -files = [ - {file = "tiktoken-0.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485f3cc6aba7c6b6ce388ba634fbba656d9ee27f766216f45146beb4ac18b25f"}, - {file = "tiktoken-0.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e54be9a2cd2f6d6ffa3517b064983fb695c9a9d8aa7d574d1ef3c3f931a99225"}, - {file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79383a6e2c654c6040e5f8506f3750db9ddd71b550c724e673203b4f6b4b4590"}, - {file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d4511c52caacf3c4981d1ae2df85908bd31853f33d30b345c8b6830763f769c"}, - {file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:13c94efacdd3de9aff824a788353aa5749c0faee1fbe3816df365ea450b82311"}, - {file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8e58c7eb29d2ab35a7a8929cbeea60216a4ccdf42efa8974d8e176d50c9a3df5"}, - {file = "tiktoken-0.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:21a20c3bd1dd3e55b91c1331bf25f4af522c525e771691adbc9a69336fa7f702"}, - {file = "tiktoken-0.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:10c7674f81e6e350fcbed7c09a65bca9356eaab27fb2dac65a1e440f2bcfe30f"}, - {file = "tiktoken-0.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:084cec29713bc9d4189a937f8a35dbdfa785bd1235a34c1124fe2323821ee93f"}, - {file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:811229fde1652fedcca7c6dfe76724d0908775b353556d8a71ed74d866f73f7b"}, - {file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86b6e7dc2e7ad1b3757e8a24597415bafcfb454cebf9a33a01f2e6ba2e663992"}, - {file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1063c5748be36344c7e18c7913c53e2cca116764c2080177e57d62c7ad4576d1"}, - {file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:20295d21419bfcca092644f7e2f2138ff947a6eb8cfc732c09cc7d76988d4a89"}, - {file = "tiktoken-0.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:959d993749b083acc57a317cbc643fb85c014d055b2119b739487288f4e5d1cb"}, - {file = "tiktoken-0.7.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:71c55d066388c55a9c00f61d2c456a6086673ab7dec22dd739c23f77195b1908"}, - {file = "tiktoken-0.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:09ed925bccaa8043e34c519fbb2f99110bd07c6fd67714793c21ac298e449410"}, - {file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03c6c40ff1db0f48a7b4d2dafeae73a5607aacb472fa11f125e7baf9dce73704"}, - {file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d20b5c6af30e621b4aca094ee61777a44118f52d886dbe4f02b70dfe05c15350"}, - {file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d427614c3e074004efa2f2411e16c826f9df427d3c70a54725cae860f09e4bf4"}, - {file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8c46d7af7b8c6987fac9b9f61041b452afe92eb087d29c9ce54951280f899a97"}, - {file = "tiktoken-0.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:0bc603c30b9e371e7c4c7935aba02af5994a909fc3c0fe66e7004070858d3f8f"}, - {file = "tiktoken-0.7.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2398fecd38c921bcd68418675a6d155fad5f5e14c2e92fcf5fe566fa5485a858"}, - {file = "tiktoken-0.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f5f6afb52fb8a7ea1c811e435e4188f2bef81b5e0f7a8635cc79b0eef0193d6"}, - {file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:861f9ee616766d736be4147abac500732b505bf7013cfaf019b85892637f235e"}, - {file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54031f95c6939f6b78122c0aa03a93273a96365103793a22e1793ee86da31685"}, - {file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fffdcb319b614cf14f04d02a52e26b1d1ae14a570f90e9b55461a72672f7b13d"}, - {file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c72baaeaefa03ff9ba9688624143c858d1f6b755bb85d456d59e529e17234769"}, - {file = "tiktoken-0.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:131b8aeb043a8f112aad9f46011dced25d62629091e51d9dc1adbf4a1cc6aa98"}, - {file = "tiktoken-0.7.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cabc6dc77460df44ec5b879e68692c63551ae4fae7460dd4ff17181df75f1db7"}, - {file = "tiktoken-0.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8d57f29171255f74c0aeacd0651e29aa47dff6f070cb9f35ebc14c82278f3b25"}, - {file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ee92776fdbb3efa02a83f968c19d4997a55c8e9ce7be821ceee04a1d1ee149c"}, - {file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e215292e99cb41fbc96988ef62ea63bb0ce1e15f2c147a61acc319f8b4cbe5bf"}, - {file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8a81bac94769cab437dd3ab0b8a4bc4e0f9cf6835bcaa88de71f39af1791727a"}, - {file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d6d73ea93e91d5ca771256dfc9d1d29f5a554b83821a1dc0891987636e0ae226"}, - {file = "tiktoken-0.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:2bcb28ddf79ffa424f171dfeef9a4daff61a94c631ca6813f43967cb263b83b9"}, - {file = "tiktoken-0.7.0.tar.gz", hash = "sha256:1077266e949c24e0291f6c350433c6f0971365ece2b173a23bc3b9f9defef6b6"}, -] - -[package.dependencies] -regex = ">=2022.1.18" -requests = ">=2.26.0" - -[package.extras] -blobfile = ["blobfile (>=2)"] - -[[package]] -name = "tinycss2" -version = "1.3.0" -description = "A tiny CSS parser" -optional = false -python-versions = ">=3.8" -files = [ - {file = "tinycss2-1.3.0-py3-none-any.whl", hash = "sha256:54a8dbdffb334d536851be0226030e9505965bb2f30f21a4a82c55fb2a80fae7"}, - {file = "tinycss2-1.3.0.tar.gz", hash = "sha256:152f9acabd296a8375fbca5b84c961ff95971fcfc32e79550c8df8e29118c54d"}, -] - -[package.dependencies] -webencodings = ">=0.4" - -[package.extras] -doc = ["sphinx", "sphinx_rtd_theme"] -test = ["pytest", "ruff"] - -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] - -[[package]] -name = "tornado" -version = "6.4.1" -description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -optional = false -python-versions = ">=3.8" -files = [ - {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"}, - {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"}, - {file = "tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4"}, - {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842"}, - {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3"}, - {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f"}, - {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4"}, - {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698"}, - {file = "tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d"}, - {file = "tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7"}, - {file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"}, -] - -[[package]] -name = "tqdm" -version = "4.66.5" -description = "Fast, Extensible Progress Meter" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, - {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[package.extras] -dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] -notebook = ["ipywidgets (>=6)"] -slack = ["slack-sdk"] -telegram = ["requests"] - -[[package]] -name = "traitlets" -version = "5.14.3" -description = "Traitlets Python configuration system" -optional = false -python-versions = ">=3.8" -files = [ - {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, - {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, -] - -[package.extras] -docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] -test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] - -[[package]] -name = "types-python-dateutil" -version = "2.9.0.20240906" -description = "Typing stubs for python-dateutil" -optional = false -python-versions = ">=3.8" -files = [ - {file = "types-python-dateutil-2.9.0.20240906.tar.gz", hash = "sha256:9706c3b68284c25adffc47319ecc7947e5bb86b3773f843c73906fd598bc176e"}, - {file = "types_python_dateutil-2.9.0.20240906-py3-none-any.whl", hash = "sha256:27c8cc2d058ccb14946eebcaaa503088f4f6dbc4fb6093d3d456a49aef2753f6"}, -] - -[[package]] -name = "types-pyyaml" -version = "6.0.12.20240917" -description = "Typing stubs for PyYAML" -optional = false -python-versions = ">=3.8" -files = [ - {file = "types-PyYAML-6.0.12.20240917.tar.gz", hash = "sha256:d1405a86f9576682234ef83bcb4e6fff7c9305c8b1fbad5e0bcd4f7dbdc9c587"}, - {file = "types_PyYAML-6.0.12.20240917-py3-none-any.whl", hash = "sha256:392b267f1c0fe6022952462bf5d6523f31e37f6cea49b14cee7ad634b6301570"}, -] - -[[package]] -name = "types-requests" -version = "2.32.0.20240914" -description = "Typing stubs for requests" -optional = false -python-versions = ">=3.8" -files = [ - {file = "types-requests-2.32.0.20240914.tar.gz", hash = "sha256:2850e178db3919d9bf809e434eef65ba49d0e7e33ac92d588f4a5e295fffd405"}, - {file = "types_requests-2.32.0.20240914-py3-none-any.whl", hash = "sha256:59c2f673eb55f32a99b2894faf6020e1a9f4a402ad0f192bfee0b64469054310"}, -] - -[package.dependencies] -urllib3 = ">=2" - -[[package]] -name = "typing-extensions" -version = "4.12.2" -description = "Backported and Experimental Type Hints for Python 3.8+" -optional = false -python-versions = ">=3.8" -files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, -] - -[[package]] -name = "typing-inspect" -version = "0.9.0" -description = "Runtime inspection utilities for typing module." -optional = false -python-versions = "*" -files = [ - {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, - {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, -] - -[package.dependencies] -mypy-extensions = ">=0.3.0" -typing-extensions = ">=3.7.4" - -[[package]] -name = "uri-template" -version = "1.3.0" -description = "RFC 6570 URI Template Processor" -optional = false -python-versions = ">=3.7" -files = [ - {file = "uri-template-1.3.0.tar.gz", hash = "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7"}, - {file = "uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363"}, -] - -[package.extras] -dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake8-commas", "flake8-comprehensions", "flake8-continuation", "flake8-datetimez", "flake8-docstrings", "flake8-import-order", "flake8-literal", "flake8-modern-annotations", "flake8-noqa", "flake8-pyproject", "flake8-requirements", "flake8-typechecking-import", "flake8-use-fstring", "mypy", "pep8-naming", "types-PyYAML"] - -[[package]] -name = "urllib3" -version = "2.2.3" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = ">=3.8" -files = [ - {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, - {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] -h2 = ["h2 (>=4,<5)"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "wcwidth" -version = "0.2.13" -description = "Measures the displayed width of unicode strings in a terminal" -optional = false -python-versions = "*" -files = [ - {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, - {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, -] - -[[package]] -name = "webcolors" -version = "24.8.0" -description = "A library for working with the color formats defined by HTML and CSS." -optional = false -python-versions = ">=3.8" -files = [ - {file = "webcolors-24.8.0-py3-none-any.whl", hash = "sha256:fc4c3b59358ada164552084a8ebee637c221e4059267d0f8325b3b560f6c7f0a"}, - {file = "webcolors-24.8.0.tar.gz", hash = "sha256:08b07af286a01bcd30d583a7acadf629583d1f79bfef27dd2c2c5c263817277d"}, -] - -[package.extras] -docs = ["furo", "sphinx", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-notfound-page", "sphinxext-opengraph"] -tests = ["coverage[toml]"] - -[[package]] -name = "webencodings" -version = "0.5.1" -description = "Character encoding aliases for legacy web content" -optional = false -python-versions = "*" -files = [ - {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, - {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, -] - -[[package]] -name = "websocket-client" -version = "1.8.0" -description = "WebSocket client for Python with low level API options" -optional = false -python-versions = ">=3.8" -files = [ - {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"}, - {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"}, -] - -[package.extras] -docs = ["Sphinx (>=6.0)", "myst-parser (>=2.0.0)", "sphinx-rtd-theme (>=1.1.0)"] -optional = ["python-socks", "wsaccel"] -test = ["websockets"] - -[[package]] -name = "widgetsnbextension" -version = "4.0.13" -description = "Jupyter interactive widgets for Jupyter Notebook" -optional = false -python-versions = ">=3.7" -files = [ - {file = "widgetsnbextension-4.0.13-py3-none-any.whl", hash = "sha256:74b2692e8500525cc38c2b877236ba51d34541e6385eeed5aec15a70f88a6c71"}, - {file = "widgetsnbextension-4.0.13.tar.gz", hash = "sha256:ffcb67bc9febd10234a362795f643927f4e0c05d9342c727b65d2384f8feacb6"}, -] - -[[package]] -name = "yarl" -version = "1.12.1" -description = "Yet another URL library" -optional = false -python-versions = ">=3.8" -files = [ - {file = "yarl-1.12.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:64c5b0f2b937fe40d0967516eee5504b23cb247b8b7ffeba7213a467d9646fdc"}, - {file = "yarl-1.12.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2e430ac432f969ef21770645743611c1618362309e3ad7cab45acd1ad1a540ff"}, - {file = "yarl-1.12.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3e26e64f42bce5ddf9002092b2c37b13071c2e6413d5c05f9fa9de58ed2f7749"}, - {file = "yarl-1.12.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0103c52f8dfe5d573c856322149ddcd6d28f51b4d4a3ee5c4b3c1b0a05c3d034"}, - {file = "yarl-1.12.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b63465b53baeaf2122a337d4ab57d6bbdd09fcadceb17a974cfa8a0300ad9c67"}, - {file = "yarl-1.12.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17d4dc4ff47893a06737b8788ed2ba2f5ac4e8bb40281c8603920f7d011d5bdd"}, - {file = "yarl-1.12.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8b54949267bd5704324397efe9fbb6aa306466dee067550964e994d309db5f1"}, - {file = "yarl-1.12.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10b690cd78cbaca2f96a7462f303fdd2b596d3978b49892e4b05a7567c591572"}, - {file = "yarl-1.12.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c85ab016e96a975afbdb9d49ca90f3bca9920ef27c64300843fe91c3d59d8d20"}, - {file = "yarl-1.12.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c1caa5763d1770216596e0a71b5567f27aac28c95992110212c108ec74589a48"}, - {file = "yarl-1.12.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:595bbcdbfc4a9c6989d7489dca8510cba053ff46b16c84ffd95ac8e90711d419"}, - {file = "yarl-1.12.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:e64f0421892a207d3780903085c1b04efeb53b16803b23d947de5a7261b71355"}, - {file = "yarl-1.12.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:319c206e83e46ec2421b25b300c8482b6fe8a018baca246be308c736d9dab267"}, - {file = "yarl-1.12.1-cp310-cp310-win32.whl", hash = "sha256:da045bd1147d12bd43fb032296640a7cc17a7f2eaba67495988362e99db24fd2"}, - {file = "yarl-1.12.1-cp310-cp310-win_amd64.whl", hash = "sha256:aebbd47df77190ada603157f0b3670d578c110c31746ecc5875c394fdcc59a99"}, - {file = "yarl-1.12.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:28389a68981676bf74e2e199fe42f35d1aa27a9c98e3a03e6f58d2d3d054afe1"}, - {file = "yarl-1.12.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f736f54565f8dd7e3ab664fef2bc461d7593a389a7f28d4904af8d55a91bd55f"}, - {file = "yarl-1.12.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dee0496d5f1a8f57f0f28a16f81a2033fc057a2cf9cd710742d11828f8c80e2"}, - {file = "yarl-1.12.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8981a94a27ac520a398302afb74ae2c0be1c3d2d215c75c582186a006c9e7b0"}, - {file = "yarl-1.12.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ff54340fc1129e8e181827e2234af3ff659b4f17d9bbe77f43bc19e6577fadec"}, - {file = "yarl-1.12.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:54c8cee662b5f8c30ad7eedfc26123f845f007798e4ff1001d9528fe959fd23c"}, - {file = "yarl-1.12.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e97a29b37830ba1262d8dfd48ddb5b28ad4d3ebecc5d93a9c7591d98641ec737"}, - {file = "yarl-1.12.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c89894cc6f6ddd993813e79244b36b215c14f65f9e4f1660b1f2ba9e5594b95"}, - {file = "yarl-1.12.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:712ba8722c0699daf186de089ddc4677651eb9875ed7447b2ad50697522cbdd9"}, - {file = "yarl-1.12.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6e9a9f50892153bad5046c2a6df153224aa6f0573a5a8ab44fc54a1e886f6e21"}, - {file = "yarl-1.12.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:1d4017e78fb22bc797c089b746230ad78ecd3cdb215bc0bd61cb72b5867da57e"}, - {file = "yarl-1.12.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f494c01b28645c431239863cb17af8b8d15b93b0d697a0320d5dd34cd9d7c2fa"}, - {file = "yarl-1.12.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:de4544b1fb29cf14870c4e2b8a897c0242449f5dcebd3e0366aa0aa3cf58a23a"}, - {file = "yarl-1.12.1-cp311-cp311-win32.whl", hash = "sha256:7564525a4673fde53dee7d4c307a961c0951918f0b8c7f09b2c9e02067cf6504"}, - {file = "yarl-1.12.1-cp311-cp311-win_amd64.whl", hash = "sha256:f23bb1a7a6e8e8b612a164fdd08e683bcc16c76f928d6dbb7bdbee2374fbfee6"}, - {file = "yarl-1.12.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:a3e2aff8b822ab0e0bdbed9f50494b3a35629c4b9488ae391659973a37a9f53f"}, - {file = "yarl-1.12.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:22dda2799c8d39041d731e02bf7690f0ef34f1691d9ac9dfcb98dd1e94c8b058"}, - {file = "yarl-1.12.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:18c2a7757561f05439c243f517dbbb174cadfae3a72dee4ae7c693f5b336570f"}, - {file = "yarl-1.12.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:835010cc17d0020e7931d39e487d72c8e01c98e669b6896a8b8c9aa8ca69a949"}, - {file = "yarl-1.12.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2254fe137c4a360b0a13173a56444f756252c9283ba4d267ca8e9081cd140ea"}, - {file = "yarl-1.12.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6a071d2c3d39b4104f94fc08ab349e9b19b951ad4b8e3b6d7ea92d6ef7ccaf8"}, - {file = "yarl-1.12.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73a183042ae0918c82ce2df38c3db2409b0eeae88e3afdfc80fb67471a95b33b"}, - {file = "yarl-1.12.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:326b8a079a9afcac0575971e56dabdf7abb2ea89a893e6949b77adfeb058b50e"}, - {file = "yarl-1.12.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:126309c0f52a2219b3d1048aca00766429a1346596b186d51d9fa5d2070b7b13"}, - {file = "yarl-1.12.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ba1c779b45a399cc25f511c681016626f69e51e45b9d350d7581998722825af9"}, - {file = "yarl-1.12.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:af1107299cef049ad00a93df4809517be432283a0847bcae48343ebe5ea340dc"}, - {file = "yarl-1.12.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:20d817c0893191b2ab0ba30b45b77761e8dfec30a029b7c7063055ca71157f84"}, - {file = "yarl-1.12.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d4f818f6371970d6a5d1e42878389bbfb69dcde631e4bbac5ec1cb11158565ca"}, - {file = "yarl-1.12.1-cp312-cp312-win32.whl", hash = "sha256:0ac33d22b2604b020569a82d5f8a03ba637ba42cc1adf31f616af70baf81710b"}, - {file = "yarl-1.12.1-cp312-cp312-win_amd64.whl", hash = "sha256:fd24996e12e1ba7c397c44be75ca299da14cde34d74bc5508cce233676cc68d0"}, - {file = "yarl-1.12.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dea360778e0668a7ad25d7727d03364de8a45bfd5d808f81253516b9f2217765"}, - {file = "yarl-1.12.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1f50a37aeeb5179d293465e522fd686080928c4d89e0ff215e1f963405ec4def"}, - {file = "yarl-1.12.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0274b1b7a9c9c32b7bf250583e673ff99fb9fccb389215841e2652d9982de740"}, - {file = "yarl-1.12.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4f3ab9eb8ab2d585ece959c48d234f7b39ac0ca1954a34d8b8e58a52064bdb3"}, - {file = "yarl-1.12.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8d31dd0245d88cf7239e96e8f2a99f815b06e458a5854150f8e6f0e61618d41b"}, - {file = "yarl-1.12.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a96198d5d26f40557d986c1253bfe0e02d18c9d9b93cf389daf1a3c9f7c755fa"}, - {file = "yarl-1.12.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddae504cfb556fe220efae65e35be63cd11e3c314b202723fc2119ce19f0ca2e"}, - {file = "yarl-1.12.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bce00f3b1f7f644faae89677ca68645ed5365f1c7f874fdd5ebf730a69640d38"}, - {file = "yarl-1.12.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:eee5ff934b0c9f4537ff9596169d56cab1890918004791a7a06b879b3ba2a7ef"}, - {file = "yarl-1.12.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4ea99e64b2ad2635e0f0597b63f5ea6c374791ff2fa81cdd4bad8ed9f047f56f"}, - {file = "yarl-1.12.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:5c667b383529520b8dd6bd496fc318678320cb2a6062fdfe6d3618da6b8790f6"}, - {file = "yarl-1.12.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d920401941cb898ef089422e889759dd403309eb370d0e54f1bdf6ca07fef603"}, - {file = "yarl-1.12.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:501a1576716032cc6d48c7c47bcdc42d682273415a8f2908e7e72cb4625801f3"}, - {file = "yarl-1.12.1-cp313-cp313-win32.whl", hash = "sha256:24416bb5e221e29ddf8aac5b97e94e635ca2c5be44a1617ad6fe32556df44294"}, - {file = "yarl-1.12.1-cp313-cp313-win_amd64.whl", hash = "sha256:71af3766bb46738d12cc288d9b8de7ef6f79c31fd62757e2b8a505fe3680b27f"}, - {file = "yarl-1.12.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c924deab8105f86980983eced740433fb7554a7f66db73991affa4eda99d5402"}, - {file = "yarl-1.12.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5fb475a4cdde582c9528bb412b98f899680492daaba318231e96f1a0a1bb0d53"}, - {file = "yarl-1.12.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:36ee0115b9edca904153a66bb74a9ff1ce38caff015de94eadfb9ba8e6ecd317"}, - {file = "yarl-1.12.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2631c9d7386bd2d4ce24ecc6ebf9ae90b3efd713d588d90504eaa77fec4dba01"}, - {file = "yarl-1.12.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2376d8cf506dffd0e5f2391025ae8675b09711016656590cb03b55894161fcfa"}, - {file = "yarl-1.12.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:24197ba3114cc85ddd4091e19b2ddc62650f2e4a899e51b074dfd52d56cf8c72"}, - {file = "yarl-1.12.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfdf419bf5d3644f94cd7052954fc233522f5a1b371fc0b00219ebd9c14d5798"}, - {file = "yarl-1.12.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8112f640a4f7e7bf59f7cabf0d47a29b8977528c521d73a64d5cc9e99e48a174"}, - {file = "yarl-1.12.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:607d12f0901f6419a8adceb139847c42c83864b85371f58270e42753f9780fa6"}, - {file = "yarl-1.12.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:664380c7ed524a280b6a2d5d9126389c3e96cd6e88986cdb42ca72baa27421d6"}, - {file = "yarl-1.12.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:0d0a5e87bc48d76dfcfc16295201e9812d5f33d55b4a0b7cad1025b92bf8b91b"}, - {file = "yarl-1.12.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:eff6bac402719c14e17efe845d6b98593c56c843aca6def72080fbede755fd1f"}, - {file = "yarl-1.12.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:22839d1d1eab9e4b427828a88a22beb86f67c14d8ff81175505f1cc8493f3500"}, - {file = "yarl-1.12.1-cp38-cp38-win32.whl", hash = "sha256:717f185086bb9d817d4537dd18d5df5d657598cd00e6fc22e4d54d84de266c1d"}, - {file = "yarl-1.12.1-cp38-cp38-win_amd64.whl", hash = "sha256:71978ba778948760cff528235c951ea0ef7a4f9c84ac5a49975f8540f76c3f73"}, - {file = "yarl-1.12.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:30ffc046ebddccb3c4cac72c1a3e1bc343492336f3ca86d24672e90ccc5e788a"}, - {file = "yarl-1.12.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f10954b233d4df5cc3137ffa5ced97f8894152df817e5d149bf05a0ef2ab8134"}, - {file = "yarl-1.12.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2e912b282466444023610e4498e3795c10e7cfd641744524876239fcf01d538d"}, - {file = "yarl-1.12.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6af871f70cfd5b528bd322c65793b5fd5659858cdfaa35fbe563fb99b667ed1f"}, - {file = "yarl-1.12.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3e4e1f7b08d1ec6b685ccd3e2d762219c550164fbf524498532e39f9413436e"}, - {file = "yarl-1.12.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9a7ee79183f0b17dcede8b6723e7da2ded529cf159a878214be9a5d3098f5b1e"}, - {file = "yarl-1.12.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96c8ff1e1dd680e38af0887927cab407a4e51d84a5f02ae3d6eb87233036c763"}, - {file = "yarl-1.12.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e9905fc2dc1319e4c39837b906a024cf71b1261cc66b0cd89678f779c0c61f5"}, - {file = "yarl-1.12.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:01549468858b87d36f967c97d02e6e54106f444aeb947ed76f8f71f85ed07cec"}, - {file = "yarl-1.12.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:96b34830bd6825ca0220bf005ea99ac83eb9ce51301ddb882dcf613ae6cd95fb"}, - {file = "yarl-1.12.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:2aee7594d2c2221c717a8e394bbed4740029df4c0211ceb0f04815686e99c795"}, - {file = "yarl-1.12.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:15871130439ad10abb25a4631120d60391aa762b85fcab971411e556247210a0"}, - {file = "yarl-1.12.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:838dde2cb570cfbb4cab8a876a0974e8b90973ea40b3ac27a79b8a74c8a2db15"}, - {file = "yarl-1.12.1-cp39-cp39-win32.whl", hash = "sha256:eacbcf30efaca7dc5cb264228ffecdb95fdb1e715b1ec937c0ce6b734161e0c8"}, - {file = "yarl-1.12.1-cp39-cp39-win_amd64.whl", hash = "sha256:76a59d1b63de859398bc7764c860a769499511463c1232155061fe0147f13e01"}, - {file = "yarl-1.12.1-py3-none-any.whl", hash = "sha256:dc3192a81ecd5ff954cecd690327badd5a84d00b877e1573f7c9097ce13e5bfb"}, - {file = "yarl-1.12.1.tar.gz", hash = "sha256:5b860055199aec8d6fe4dcee3c5196ce506ca198a50aab0059ffd26e8e815828"}, -] - -[package.dependencies] -idna = ">=2.0" -multidict = ">=4.0" - -[[package]] -name = "zipp" -version = "3.20.2" -description = "Backport of pathlib-compatible object wrapper for zip files" -optional = false -python-versions = ">=3.8" -files = [ - {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"}, - {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"}, -] - -[package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] -cover = ["pytest-cov"] -doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=2.2)"] -test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] -type = ["pytest-mypy"] - -[metadata] -lock-version = "2.0" -python-versions = ">=3.9,<4.0" -content-hash = "11e504bc7837cd1afaa9c589b6cd093be0a4fba8e398e785094aa51b268700b0" diff --git a/libs/experimental/poetry.toml b/libs/experimental/poetry.toml deleted file mode 100644 index ab1033bd37224..0000000000000 --- a/libs/experimental/poetry.toml +++ /dev/null @@ -1,2 +0,0 @@ -[virtualenvs] -in-project = true diff --git a/libs/experimental/pyproject.toml b/libs/experimental/pyproject.toml deleted file mode 100644 index a5bbe8bcbdda0..0000000000000 --- a/libs/experimental/pyproject.toml +++ /dev/null @@ -1,135 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.0.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "langchain-experimental" -version = "0.3.1" -description = "Building applications with LLMs through composability" -authors = [] -license = "MIT" -readme = "README.md" -repository = "https://github.com/langchain-ai/langchain" - -[tool.mypy] -ignore_missing_imports = "True" -disallow_untyped_defs = "True" -exclude = ["notebooks", "examples", "example_data"] - -[tool.poetry.urls] -"Source Code" = "https://github.com/langchain-ai/langchain/tree/master/libs/experimental" -"Release Notes" = "https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-experimental%3D%3D0%22&expanded=true" - -[tool.poetry.dependencies] -python = ">=3.9,<4.0" -langchain-core = "^0.3.5" -langchain-community = "^0.3.0" - -[tool.ruff.lint] -select = ["E", "F", "I", "T201"] - -[tool.coverage.run] -omit = ["tests/*"] - -[tool.pytest.ini_options] -addopts = "--strict-markers --strict-config --durations=5" -markers = [ - "requires: mark tests as requiring a specific library", - "asyncio: mark tests as requiring asyncio", - "compile: mark placeholder test used to compile integration tests without running them", -] -asyncio_mode = "auto" - -[tool.poetry.group.lint] -optional = true - -[tool.poetry.group.typing] -optional = true - -[tool.poetry.group.dev] -optional = true - -[tool.poetry.group.test] -optional = true - -[tool.poetry.group.test_integration] -optional = true - -[tool.poetry.group.lint.dependencies] -ruff = "^0.5" - -[tool.poetry.group.typing.dependencies] -mypy = "^1.10" -types-pyyaml = "^6.0.12.2" -types-requests = "^2.28.11.5" - -[tool.poetry.group.dev.dependencies] -jupyter = "^1.0.0" -setuptools = "^67.6.1" - -[tool.poetry.group.test.dependencies] -pytest = "^7.3.0" -pytest-asyncio = "^0.20.3" -[[tool.poetry.group.test.dependencies.numpy]] -version = "^1.24.0" -python = "<3.12" - -[[tool.poetry.group.test.dependencies.numpy]] -version = "^1.26.0" -python = ">=3.12" - -[tool.poetry.group.typing.dependencies.langchain] -path = "../langchain" -develop = true - -[tool.poetry.group.typing.dependencies.langchain-core] -path = "../core" -develop = true - -[tool.poetry.group.typing.dependencies.langchain-community] -path = "../community" -develop = true - -[tool.poetry.group.dev.dependencies.langchain] -path = "../langchain" -develop = true - -[tool.poetry.group.dev.dependencies.langchain-core] -path = "../core" -develop = true - -[tool.poetry.group.dev.dependencies.langchain-community] -path = "../community" -develop = true - -[tool.poetry.group.test.dependencies.langchain] -path = "../langchain" -develop = true - -[tool.poetry.group.test.dependencies.langchain-core] -path = "../core" -develop = true - -[tool.poetry.group.test.dependencies.langchain-community] -path = "../community" -develop = true - -[tool.poetry.group.test.dependencies.langchain-text-splitters] -path = "../text-splitters" -develop = true - -[tool.poetry.group.test_integration.dependencies.langchain] -path = "../langchain" -develop = true - -[tool.poetry.group.test_integration.dependencies.langchain-core] -path = "../core" -develop = true - -[tool.poetry.group.test_integration.dependencies.langchain-community] -path = "../community" -develop = true - -[tool.poetry.group.test_integration.dependencies.langchain-openai] -path = "../partners/openai" -develop = true diff --git a/libs/experimental/scripts/check_imports.py b/libs/experimental/scripts/check_imports.py deleted file mode 100644 index 825bea5b48737..0000000000000 --- a/libs/experimental/scripts/check_imports.py +++ /dev/null @@ -1,22 +0,0 @@ -import random -import string -import sys -import traceback -from importlib.machinery import SourceFileLoader - -if __name__ == "__main__": - files = sys.argv[1:] - has_failure = False - for file in files: - try: - module_name = "".join( - random.choice(string.ascii_letters) for _ in range(20) - ) - SourceFileLoader(module_name, file).load_module() - except Exception: - has_failure = True - print(file) # noqa: T201 - traceback.print_exc() - print() # noqa: T201 - - sys.exit(1 if has_failure else 0) diff --git a/libs/experimental/tests/__init__.py b/libs/experimental/tests/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/tests/integration_tests/__init__.py b/libs/experimental/tests/integration_tests/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/tests/integration_tests/chains/__init__.py b/libs/experimental/tests/integration_tests/chains/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/tests/integration_tests/chains/test_cpal.py b/libs/experimental/tests/integration_tests/chains/test_cpal.py deleted file mode 100644 index caa337875647c..0000000000000 --- a/libs/experimental/tests/integration_tests/chains/test_cpal.py +++ /dev/null @@ -1,554 +0,0 @@ -"""Test CPAL chain.""" - -import json -import unittest -from typing import Type -from unittest import mock - -import pydantic -import pytest -from langchain.output_parsers import PydanticOutputParser -from langchain_community.llms import OpenAI -from langchain_core.prompts.prompt import PromptTemplate - -from langchain_experimental.cpal.base import ( - CausalChain, - CPALChain, - InterventionChain, - NarrativeChain, - QueryChain, -) -from langchain_experimental.cpal.constants import Constant -from langchain_experimental.cpal.models import ( - CausalModel, - EntityModel, - EntitySettingModel, - InterventionModel, - NarrativeModel, - QueryModel, -) -from langchain_experimental.cpal.templates.univariate.causal import ( - template as causal_template, -) -from langchain_experimental.cpal.templates.univariate.intervention import ( - template as intervention_template, -) -from langchain_experimental.cpal.templates.univariate.narrative import ( - template as narrative_template, -) -from langchain_experimental.cpal.templates.univariate.query import ( - template as query_template, -) -from tests.unit_tests.fake_llm import FakeLLM - - -class TestUnitCPALChain_MathWordProblems(unittest.TestCase): - """Unit Test the CPAL chain and its component chains on math word problems. - - These tests can't run in the standard unit test directory because of - this issue, https://github.com/langchain-ai/langchain/issues/7451 - - """ - - def setUp(self) -> None: - self.fake_llm = self.make_fake_llm() - - def make_fake_llm(self) -> FakeLLM: - """ - Fake LLM service for testing CPAL chain and its components chains - on univariate math examples. - """ - - class LLMMockData(pydantic.BaseModel): - question: str - completion: str - template: str - data_model: Type[pydantic.BaseModel] - - @property - def prompt(self) -> str: - """Create LLM prompt with the question.""" - prompt_template = PromptTemplate( - input_variables=[Constant.narrative_input.value], - template=self.template, - partial_variables={ - "format_instructions": PydanticOutputParser( - pydantic_object=self.data_model - ).get_format_instructions() - }, - ) - prompt = prompt_template.format(narrative_input=self.question) - return prompt - - narrative = LLMMockData( - **{ # type: ignore[arg-type, arg-type] - "question": ( - "jan has three times the number of pets as marcia. " - "marcia has two more pets than cindy." - "if cindy has ten pets, how many pets does jan have? " - ), - "completion": json.dumps( - { - "story_outcome_question": "how many pets does jan have? ", - "story_hypothetical": "if cindy has ten pets", - "story_plot": "jan has three times the number of pets as marcia. marcia has two more pets than cindy.", # noqa: E501 - } - ), - "template": narrative_template, - "data_model": NarrativeModel, - } - ) - - causal_model = LLMMockData( - **{ # type: ignore[arg-type, arg-type] - "question": ( - "jan has three times the number of pets as marcia. " - "marcia has two more pets than cindy." - ), - "completion": ( - "\n" - "{\n" - ' "attribute": "pet_count",\n' - ' "entities": [\n' - " {\n" - ' "name": "cindy",\n' - ' "value": 0,\n' - ' "depends_on": [],\n' - ' "code": "pass"\n' - " },\n" - " {\n" - ' "name": "marcia",\n' - ' "value": 0,\n' - ' "depends_on": ["cindy"],\n' - ' "code": "marcia.value = cindy.value + 2"\n' - " },\n" - " {\n" - ' "name": "jan",\n' - ' "value": 0,\n' - ' "depends_on": ["marcia"],\n' - ' "code": "jan.value = marcia.value * 3"\n' - " }\n" - " ]\n" - "}" - ), - "template": causal_template, - "data_model": CausalModel, - } - ) - - intervention = LLMMockData( - **{ # type: ignore[arg-type, arg-type] - "question": ("if cindy has ten pets"), - "completion": ( - "{\n" - ' "entity_settings" : [\n' - ' { "name": "cindy", "attribute": "pet_count", "value": "10" }\n' # noqa: E501 - " ]\n" - "}" - ), - "template": intervention_template, - "data_model": InterventionModel, - } - ) - - query = LLMMockData( - **{ # type: ignore[arg-type, arg-type] - "question": ("how many pets does jan have? "), - "completion": ( - "{\n" - ' "narrative_input": "how many pets does jan have? ",\n' - ' "llm_error_msg": "",\n' - ' "expression": "SELECT name, value FROM df WHERE name = \'jan\'"\n' # noqa: E501 - "}" - ), - "template": query_template, - "data_model": QueryModel, - } - ) - - fake_llm = FakeLLM() - fake_llm.queries = {} - for mock_data in [narrative, causal_model, intervention, query]: - fake_llm.queries.update({mock_data.prompt: mock_data.completion}) - return fake_llm - - def test_narrative_chain(self) -> None: - """Test narrative chain returns the three main elements of the causal - narrative as a pydantic object. - """ - narrative_chain = NarrativeChain.from_univariate_prompt(llm=self.fake_llm) - output = narrative_chain( - ( - "jan has three times the number of pets as marcia. " - "marcia has two more pets than cindy." - "if cindy has ten pets, how many pets does jan have? " - ) - ) - expected_output = { - "chain_answer": None, - "chain_data": NarrativeModel( - story_outcome_question="how many pets does jan have? ", - story_hypothetical="if cindy has ten pets", - story_plot="jan has three times the number of pets as marcia. marcia has two more pets than cindy.", # noqa: E501 - ), - "narrative_input": "jan has three times the number of pets as marcia. marcia " # noqa: E501 - "has two more pets than cindy.if cindy has ten pets, how " - "many pets does jan have? ", - } - assert output == expected_output - - def test_causal_chain(self) -> None: - """ - Test causal chain returns a DAG as a pydantic object. - """ - causal_chain = CausalChain.from_univariate_prompt(llm=self.fake_llm) - output = causal_chain( - ( - "jan has three times the number of pets as " - "marcia. marcia has two more pets than cindy." - ) - ) - expected_output = { - "chain_answer": None, - "chain_data": CausalModel( - attribute="pet_count", - entities=[ - EntityModel(name="cindy", code="pass", value=0.0, depends_on=[]), - EntityModel( - name="marcia", - code="marcia.value = cindy.value + 2", - value=0.0, - depends_on=["cindy"], - ), - EntityModel( - name="jan", - code="jan.value = marcia.value * 3", - value=0.0, - depends_on=["marcia"], - ), - ], - ), - "narrative_input": "jan has three times the number of pets as marcia. marcia " # noqa: E501 - "has two more pets than cindy.", - } - assert output == expected_output - - def test_intervention_chain(self) -> None: - """ - Test intervention chain correctly transforms - the LLM's text completion into a setting-like object. - """ - intervention_chain = InterventionChain.from_univariate_prompt(llm=self.fake_llm) - output = intervention_chain("if cindy has ten pets") - expected_output = { - "chain_answer": None, - "chain_data": InterventionModel( - entity_settings=[ - EntitySettingModel(name="cindy", attribute="pet_count", value=10), - ] - ), - "narrative_input": "if cindy has ten pets", - } - assert output == expected_output - - def test_query_chain(self) -> None: - """ - Test query chain correctly transforms - the LLM's text completion into a query-like object. - """ - query_chain = QueryChain.from_univariate_prompt(llm=self.fake_llm) - output = query_chain("how many pets does jan have? ") - expected_output = { - "chain_answer": None, - "chain_data": QueryModel( - question="how many pets does jan have? ", - llm_error_msg="", - expression="SELECT name, value FROM df WHERE name = 'jan'", - ), - "narrative_input": "how many pets does jan have? ", - } - assert output == expected_output - - def test_cpal_chain(self) -> None: - """ - patch required since `networkx` package is not part of unit test environment - """ - with mock.patch( - "langchain_experimental.cpal.models.NetworkxEntityGraph" - ) as mock_networkx: - graph_instance = mock_networkx.return_value - graph_instance.get_topological_sort.return_value = [ - "cindy", - "marcia", - "jan", - ] - cpal_chain = CPALChain.from_univariate_prompt( - llm=self.fake_llm, verbose=True - ) - cpal_chain.run( - ( - "jan has three times the number of pets as " - "marcia. marcia has two more pets than cindy." - "if cindy has ten pets, how many pets does jan have? " - ) - ) - - -class TestCPALChain_MathWordProblems(unittest.TestCase): - """Test the CPAL chain and its component chains on math word problems.""" - - def test_causal_chain(self) -> None: - """Test CausalChain can translate a narrative's plot into a causal model - containing operations linked by a DAG.""" - - llm = OpenAI(temperature=0, max_tokens=512) - casual_chain = CausalChain.from_univariate_prompt(llm) - narrative_plot = ( - "Jan has three times the number of pets as Marcia. " - "Marcia has two more pets than Cindy. " - ) - output = casual_chain(narrative_plot) - expected_output = { - "chain_answer": None, - "chain_data": CausalModel( - attribute="pet_count", - entities=[ - EntityModel(name="cindy", code="pass", value=0.0, depends_on=[]), - EntityModel( - name="marcia", - code="marcia.value = cindy.value + 2", - value=0.0, - depends_on=["cindy"], - ), - EntityModel( - name="jan", - code="jan.value = marcia.value * 3", - value=0.0, - depends_on=["marcia"], - ), - ], - ), - "narrative_input": "Jan has three times the number of pets as Marcia. Marcia " # noqa: E501 - "has two more pets than Cindy. ", - } - self.assertDictEqual(output, expected_output) - self.assertEqual( - isinstance(output[Constant.chain_data.value], CausalModel), True - ) - - def test_intervention_chain(self) -> None: - """Test InterventionChain translates a hypothetical into a new value setting.""" - - llm = OpenAI(temperature=0, max_tokens=512) - story_conditions_chain = InterventionChain.from_univariate_prompt(llm) - question = "if cindy has ten pets" - data = story_conditions_chain(question)[Constant.chain_data.value] - self.assertEqual(type(data), InterventionModel) - - def test_intervention_chain_2(self) -> None: - """Test InterventionChain translates a hypothetical into a new value setting.""" - - llm = OpenAI(temperature=0, max_tokens=512) - story_conditions_chain = InterventionChain.from_univariate_prompt(llm) - narrative_condition = "What if Cindy has ten pets and Boris has 5 pets? " - data = story_conditions_chain(narrative_condition)[Constant.chain_data.value] - self.assertEqual(type(data), InterventionModel) - - def test_query_chain(self) -> None: - """Test QueryChain translates a question into a query expression.""" - llm = OpenAI(temperature=0, max_tokens=512) - query_chain = QueryChain.from_univariate_prompt(llm) - narrative_question = "How many pets will Marcia end up with? " - data = query_chain(narrative_question)[Constant.chain_data.value] - self.assertEqual(type(data), QueryModel) - - def test_narrative_chain(self) -> None: - """Test NarrativeChain decomposes a human's narrative into three story elements: - - - causal model - - intervention model - - query model - """ - - narrative = ( - "Jan has three times the number of pets as Marcia. " - "Marcia has two more pets than Cindy. " - "If Cindy has ten pets, how many pets does Jan have? " - ) - llm = OpenAI(temperature=0, max_tokens=512) - narrative_chain = NarrativeChain.from_univariate_prompt(llm) - data = narrative_chain(narrative)[Constant.chain_data.value] - self.assertEqual(type(data), NarrativeModel) - - out = narrative_chain(narrative) - expected_narrative_out = { - "chain_answer": None, - "chain_data": NarrativeModel( - story_outcome_question="how many pets does Jan have?", - story_hypothetical="If Cindy has ten pets", - story_plot="Jan has three times the number of pets as Marcia. Marcia has two more pets than Cindy.", # noqa: E501 - ), - "narrative_input": "Jan has three times the number of pets as Marcia. Marcia " # noqa: E501 - "has two more pets than Cindy. If Cindy has ten pets, how " - "many pets does Jan have? ", - } - self.assertDictEqual(out, expected_narrative_out) - - def test_against_pal_chain_doc(self) -> None: - """ - Test CPAL chain against the first example in the PAL chain notebook doc: - - https://github.com/langchain-ai/langchain/blob/master/docs/extras/modules/chains/additional/pal.ipynb - """ - - narrative_input = ( - "Jan has three times the number of pets as Marcia." - " Marcia has two more pets than Cindy." - " If Cindy has four pets, how many total pets do the three have?" - ) - - llm = OpenAI(temperature=0, max_tokens=512) - cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True) - answer = cpal_chain.run(narrative_input) - - """ - >>> story._outcome_table - name code value depends_on - 0 cindy pass 4.0 [] - 1 marcia marcia.value = cindy.value + 2 6.0 [cindy] - 2 jan jan.value = marcia.value * 3 18.0 [marcia] - - """ - self.assertEqual(answer, 28.0) - - def test_simple(self) -> None: - """ - Given a simple math word problem here we are test and illustrate the - the data structures that are produced by the CPAL chain. - """ - - narrative_input = ( - "jan has three times the number of pets as marcia." - "marcia has two more pets than cindy." - "If cindy has ten pets, how many pets does jan have?" - ) - llm = OpenAI(temperature=0, max_tokens=512) - cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True) - output = cpal_chain(narrative_input) - data = output[Constant.chain_data.value] - - expected_output = { - "causal_operations": { - "attribute": "pet_count", - "entities": [ - {"code": "pass", "depends_on": [], "name": "cindy", "value": 10.0}, - { - "code": "marcia.value = cindy.value + 2", - "depends_on": ["cindy"], - "name": "marcia", - "value": 12.0, - }, - { - "code": "jan.value = marcia.value * 3", - "depends_on": ["marcia"], - "name": "jan", - "value": 36.0, - }, - ], - }, - "intervention": { - "entity_settings": [ - {"attribute": "pet_count", "name": "cindy", "value": 10.0} - ], - "system_settings": None, - }, - "query": { - "expression": "SELECT name, value FROM df WHERE name = 'jan'", - "llm_error_msg": "", - "question": "how many pets does jan have?", - }, - } - self.assertDictEqual(data.dict(), expected_output) - - """ - Illustrate the query model's result table as a printed pandas dataframe - >>> data._outcome_table - name code value depends_on - 0 cindy pass 10.0 [] - 1 marcia marcia.value = cindy.value + 2 12.0 [cindy] - 2 jan jan.value = marcia.value * 3 36.0 [marcia] - """ - - expected_output = { - "code": { - 0: "pass", - 1: "marcia.value = cindy.value + 2", - 2: "jan.value = marcia.value * 3", - }, - "depends_on": {0: [], 1: ["cindy"], 2: ["marcia"]}, - "name": {0: "cindy", 1: "marcia", 2: "jan"}, - "value": {0: 10.0, 1: 12.0, 2: 36.0}, - } - self.assertDictEqual(data._outcome_table.to_dict(), expected_output) - - expected_output = {"name": {0: "jan"}, "value": {0: 36.0}} - self.assertDictEqual(data.query._result_table.to_dict(), expected_output) - - # TODO: use an LLM chain to translate numbers to words - df = data.query._result_table - expr = "name == 'jan'" - answer = df.query(expr).iloc[0]["value"] - self.assertEqual(float(answer), 36.0) - - def test_hallucinating(self) -> None: - """ - Test CPAL approach does not hallucinate when given - an invalid entity in the question. - - The PAL chain would hallucinates here! - """ - - narrative_input = ( - "Jan has three times the number of pets as Marcia." - "Marcia has two more pets than Cindy." - "If Cindy has ten pets, how many pets does Barak have?" - ) - llm = OpenAI(temperature=0, max_tokens=512) - cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True) - with pytest.raises(Exception) as e_info: - print(e_info) # noqa: T201 - cpal_chain.run(narrative_input) - - def test_causal_mediator(self) -> None: - """ - Test CPAL approach on causal mediator. - """ - - narrative_input = ( - "jan has three times the number of pets as marcia." - "marcia has two more pets than cindy." - "If marcia has ten pets, how many pets does jan have?" - ) - llm = OpenAI(temperature=0, max_tokens=512) - cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True) - answer = cpal_chain.run(narrative_input) - self.assertEqual(answer, 30.0) - - @pytest.mark.skip(reason="requires manual install of debian and py packages") - def test_draw(self) -> None: - """ - Test CPAL chain can draw its resulting DAG. - """ - import os - - narrative_input = ( - "Jan has three times the number of pets as Marcia." - "Marcia has two more pets than Cindy." - "If Marcia has ten pets, how many pets does Jan have?" - ) - llm = OpenAI(temperature=0, max_tokens=512) - cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True) - cpal_chain.run(narrative_input) - path = "graph.svg" - cpal_chain.draw(path=path) - self.assertTrue(os.path.exists(path)) diff --git a/libs/experimental/tests/integration_tests/chains/test_pal.py b/libs/experimental/tests/integration_tests/chains/test_pal.py deleted file mode 100644 index ce58e3606c644..0000000000000 --- a/libs/experimental/tests/integration_tests/chains/test_pal.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Test PAL chain.""" - -from langchain_community.llms import OpenAI - -from langchain_experimental.pal_chain.base import PALChain - - -def test_math_prompt() -> None: - """Test math prompt.""" - llm = OpenAI(temperature=0, max_tokens=512) - pal_chain = PALChain.from_math_prompt(llm, timeout=None, allow_dangerous_code=False) - question = ( - "Jan has three times the number of pets as Marcia. " - "Marcia has two more pets than Cindy. " - "If Cindy has four pets, how many total pets do the three have?" - ) - output = pal_chain.run(question) - assert output == "28" - - -def test_colored_object_prompt() -> None: - """Test colored object prompt.""" - llm = OpenAI(temperature=0, max_tokens=512) - pal_chain = PALChain.from_colored_object_prompt( - llm, timeout=None, allow_dangerous_code=False - ) - question = ( - "On the desk, you see two blue booklets, " - "two purple booklets, and two yellow pairs of sunglasses. " - "If I remove all the pairs of sunglasses from the desk, " - "how many purple items remain on it?" - ) - output = pal_chain.run(question) - assert output == "2" diff --git a/libs/experimental/tests/integration_tests/chains/test_sql_database.py b/libs/experimental/tests/integration_tests/chains/test_sql_database.py deleted file mode 100644 index 4693f9154ba34..0000000000000 --- a/libs/experimental/tests/integration_tests/chains/test_sql_database.py +++ /dev/null @@ -1,93 +0,0 @@ -"""Test SQL Database Chain.""" - -from langchain_community.llms.openai import OpenAI -from langchain_community.utilities.sql_database import SQLDatabase -from sqlalchemy import Column, Integer, MetaData, String, Table, create_engine, insert - -from langchain_experimental.sql.base import ( - SQLDatabaseChain, - SQLDatabaseSequentialChain, -) - -metadata_obj = MetaData() - -user = Table( - "user", - metadata_obj, - Column("user_id", Integer, primary_key=True), - Column("user_name", String(16), nullable=False), - Column("user_company", String(16), nullable=False), -) - - -def test_sql_database_run() -> None: - """Test that commands can be run successfully and returned in correct format.""" - engine = create_engine("sqlite:///:memory:") - metadata_obj.create_all(engine) - stmt = insert(user).values(user_id=13, user_name="Harrison", user_company="Foo") - with engine.connect() as conn: - conn.execute(stmt) - db = SQLDatabase(engine) - db_chain = SQLDatabaseChain.from_llm(OpenAI(temperature=0), db) - output = db_chain.run("What company does Harrison work at?") - expected_output = " Harrison works at Foo." - assert output == expected_output - - -def test_sql_database_run_update() -> None: - """Test that update commands run successfully and returned in correct format.""" - engine = create_engine("sqlite:///:memory:") - metadata_obj.create_all(engine) - stmt = insert(user).values(user_id=13, user_name="Harrison", user_company="Foo") - with engine.connect() as conn: - conn.execute(stmt) - db = SQLDatabase(engine) - db_chain = SQLDatabaseChain.from_llm(OpenAI(temperature=0), db) - output = db_chain.run("Update Harrison's workplace to Bar") - expected_output = " Harrison's workplace has been updated to Bar." - assert output == expected_output - output = db_chain.run("What company does Harrison work at?") - expected_output = " Harrison works at Bar." - assert output == expected_output - - -def test_sql_database_sequential_chain_run() -> None: - """Test that commands can be run successfully SEQUENTIALLY - and returned in correct format.""" - engine = create_engine("sqlite:///:memory:") - metadata_obj.create_all(engine) - stmt = insert(user).values(user_id=13, user_name="Harrison", user_company="Foo") - with engine.connect() as conn: - conn.execute(stmt) - db = SQLDatabase(engine) - db_chain = SQLDatabaseSequentialChain.from_llm(OpenAI(temperature=0), db) - output = db_chain.run("What company does Harrison work at?") - expected_output = " Harrison works at Foo." - assert output == expected_output - - -def test_sql_database_sequential_chain_intermediate_steps() -> None: - """Test that commands can be run successfully SEQUENTIALLY and returned - in correct format. switch Intermediate steps""" - engine = create_engine("sqlite:///:memory:") - metadata_obj.create_all(engine) - stmt = insert(user).values(user_id=13, user_name="Harrison", user_company="Foo") - with engine.connect() as conn: - conn.execute(stmt) - db = SQLDatabase(engine) - db_chain = SQLDatabaseSequentialChain.from_llm( - OpenAI(temperature=0), db, return_intermediate_steps=True - ) - output = db_chain("What company does Harrison work at?") - expected_output = " Harrison works at Foo." - assert output["result"] == expected_output - - query = output["intermediate_steps"][0] - expected_query = ( - " SELECT user_company FROM user WHERE user_name = 'Harrison' LIMIT 1;" - ) - assert query == expected_query - - query_results = output["intermediate_steps"][1] - expected_query_results = "[('Foo',)]" - assert query_results == expected_query_results diff --git a/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py b/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py deleted file mode 100644 index 9570042dfc4e8..0000000000000 --- a/libs/experimental/tests/integration_tests/chains/test_synthetic_data_openai.py +++ /dev/null @@ -1,103 +0,0 @@ -import pytest -from langchain_community.chat_models import ChatOpenAI -from langchain_core.prompts.few_shot import FewShotPromptTemplate -from pydantic import BaseModel - -from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator -from langchain_experimental.tabular_synthetic_data.openai import ( - OPENAI_TEMPLATE, - create_openai_data_generator, -) -from langchain_experimental.tabular_synthetic_data.prompts import ( - SYNTHETIC_FEW_SHOT_PREFIX, - SYNTHETIC_FEW_SHOT_SUFFIX, -) - - -# Define the desired output schema for individual medical billing record -class MedicalBilling(BaseModel): - patient_id: int - patient_name: str - diagnosis_code: str - procedure_code: str - total_charge: float - insurance_claim_amount: float - - -examples = [ - { - "example": """Patient ID: 123456, Patient Name: John Doe, Diagnosis Code: - J20.9, Procedure Code: 99203, Total Charge: $500, Insurance Claim Amount: - $350""" - }, - { - "example": """Patient ID: 789012, Patient Name: Johnson Smith, Diagnosis - Code: M54.5, Procedure Code: 99213, Total Charge: $150, Insurance Claim - Amount: $120""" - }, - { - "example": """Patient ID: 345678, Patient Name: Emily Stone, Diagnosis Code: - E11.9, Procedure Code: 99214, Total Charge: $300, Insurance Claim Amount: - $250""" - }, - { - "example": """Patient ID: 901234, Patient Name: Robert Miles, Diagnosis Code: - B07.9, Procedure Code: 99204, Total Charge: $200, Insurance Claim Amount: - $160""" - }, - { - "example": """Patient ID: 567890, Patient Name: Clara Jensen, Diagnosis Code: - F41.9, Procedure Code: 99205, Total Charge: $450, Insurance Claim Amount: - $310""" - }, - { - "example": """Patient ID: 234567, Patient Name: Alan Turing, Diagnosis Code: - G40.909, Procedure Code: 99215, Total Charge: $220, Insurance Claim Amount: - $180""" - }, -] - -prompt_template = FewShotPromptTemplate( - prefix=SYNTHETIC_FEW_SHOT_PREFIX, - examples=examples, - suffix=SYNTHETIC_FEW_SHOT_SUFFIX, - input_variables=["subject", "extra"], - example_prompt=OPENAI_TEMPLATE, -) - - -@pytest.fixture(scope="function") -def synthetic_data_generator() -> SyntheticDataGenerator: - return create_openai_data_generator( - output_schema=MedicalBilling, - llm=ChatOpenAI(temperature=1), # replace with your LLM instance - prompt=prompt_template, - ) - - -@pytest.mark.requires("openai") -def test_generate_synthetic(synthetic_data_generator: SyntheticDataGenerator) -> None: - synthetic_results = synthetic_data_generator.generate( - subject="medical_billing", - extra="""the name must be chosen at random. Make it something you wouldn't - normally choose.""", - runs=10, - ) - assert len(synthetic_results) == 10 - for row in synthetic_results: - assert isinstance(row, MedicalBilling) - - -@pytest.mark.requires("openai") -async def test_agenerate_synthetic( - synthetic_data_generator: SyntheticDataGenerator, -) -> None: - synthetic_results = await synthetic_data_generator.agenerate( - subject="medical_billing", - extra="""the name must be chosen at random. Make it something you wouldn't - normally choose.""", - runs=10, - ) - assert len(synthetic_results) == 10 - for row in synthetic_results: - assert isinstance(row, MedicalBilling) diff --git a/libs/experimental/tests/integration_tests/llms/__init__.py b/libs/experimental/tests/integration_tests/llms/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/tests/integration_tests/llms/test_anthropic_functions.py b/libs/experimental/tests/integration_tests/llms/test_anthropic_functions.py deleted file mode 100644 index b83fc35d81fef..0000000000000 --- a/libs/experimental/tests/integration_tests/llms/test_anthropic_functions.py +++ /dev/null @@ -1,109 +0,0 @@ -"""Test AnthropicFunctions""" - -import unittest - -from langchain_community.chat_models.anthropic import ChatAnthropic -from langchain_community.chat_models.bedrock import BedrockChat - -from langchain_experimental.llms.anthropic_functions import AnthropicFunctions - - -class TestAnthropicFunctions(unittest.TestCase): - """ - Test AnthropicFunctions with default llm (ChatAnthropic) as well as a passed-in llm - """ - - def test_default_chat_anthropic(self) -> None: - base_model = AnthropicFunctions(model="claude-2") # type: ignore[call-arg] - self.assertIsInstance(base_model.model, ChatAnthropic) - - # bind functions - model = base_model.bind( - functions=[ - { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, " - "e.g. San Francisco, CA", - }, - "unit": { - "type": "string", - "enum": ["celsius", "fahrenheit"], - }, - }, - "required": ["location"], - }, - } - ], - function_call={"name": "get_current_weather"}, - ) - - res = model.invoke("What's the weather in San Francisco?") - - function_call = res.additional_kwargs.get("function_call") - assert function_call - self.assertEqual(function_call.get("name"), "get_current_weather") - self.assertEqual( - function_call.get("arguments"), - '{"location": "San Francisco, CA", "unit": "fahrenheit"}', - ) - - def test_bedrock_chat_anthropic(self) -> None: - """ - const chatBedrock = new ChatBedrock({ - region: process.env.BEDROCK_AWS_REGION ?? "us-east-1", - model: "anthropic.claude-v2", - temperature: 0.1, - credentials: { - secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!, - accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!, - }, - });""" - llm = BedrockChat( # type: ignore[call-arg] - model_id="anthropic.claude-v2", - model_kwargs={"temperature": 0.1}, - region_name="us-east-1", - ) - base_model = AnthropicFunctions(llm=llm) - assert isinstance(base_model.model, BedrockChat) - - # bind functions - model = base_model.bind( - functions=[ - { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, " - "e.g. San Francisco, CA", - }, - "unit": { - "type": "string", - "enum": ["celsius", "fahrenheit"], - }, - }, - "required": ["location"], - }, - } - ], - function_call={"name": "get_current_weather"}, - ) - - res = model.invoke("What's the weather in San Francisco?") - - function_call = res.additional_kwargs.get("function_call") - assert function_call - self.assertEqual(function_call.get("name"), "get_current_weather") - self.assertEqual( - function_call.get("arguments"), - '{"location": "San Francisco, CA", "unit": "fahrenheit"}', - ) diff --git a/libs/experimental/tests/integration_tests/llms/test_ollama_functions.py b/libs/experimental/tests/integration_tests/llms/test_ollama_functions.py deleted file mode 100644 index 58fbff7c13027..0000000000000 --- a/libs/experimental/tests/integration_tests/llms/test_ollama_functions.py +++ /dev/null @@ -1,142 +0,0 @@ -"""Test OllamaFunctions""" - -import unittest - -from langchain_community.tools import DuckDuckGoSearchResults -from langchain_community.tools.pubmed.tool import PubmedQueryRun -from langchain_core.messages import AIMessage -from pydantic import BaseModel, Field - -from langchain_experimental.llms.ollama_functions import ( - OllamaFunctions, - convert_to_ollama_tool, -) - - -class Joke(BaseModel): - setup: str = Field(description="The setup of the joke") - punchline: str = Field(description="The punchline to the joke") - - -class TestOllamaFunctions(unittest.TestCase): - """ - Test OllamaFunctions - """ - - def test_default_ollama_functions(self) -> None: - base_model = OllamaFunctions(model="phi3", format="json") - - # bind functions - model = base_model.bind_tools( - tools=[ - { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, " - "e.g. San Francisco, CA", - }, - "unit": { - "type": "string", - "enum": ["celsius", "fahrenheit"], - }, - }, - "required": ["location"], - }, - } - ], - function_call={"name": "get_current_weather"}, - ) - - res = model.invoke("What's the weather in San Francisco?") - - self.assertIsInstance(res, AIMessage) - res = AIMessage(**res.__dict__) - tool_calls = res.tool_calls - assert tool_calls - tool_call = tool_calls[0] - assert tool_call - self.assertEqual("get_current_weather", tool_call.get("name")) - - def test_ollama_functions_tools(self) -> None: - base_model = OllamaFunctions(model="phi3", format="json") - model = base_model.bind_tools( - tools=[PubmedQueryRun(), DuckDuckGoSearchResults(max_results=2)] # type: ignore[call-arg] - ) - res = model.invoke("What causes lung cancer?") - self.assertIsInstance(res, AIMessage) - res = AIMessage(**res.__dict__) - tool_calls = res.tool_calls - assert tool_calls - tool_call = tool_calls[0] - assert tool_call - self.assertEqual("pub_med", tool_call.get("name")) - - def test_default_ollama_functions_default_response(self) -> None: - base_model = OllamaFunctions(model="phi3", format="json") - - # bind functions - model = base_model.bind_tools( - tools=[ - { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, " - "e.g. San Francisco, CA", - }, - "unit": { - "type": "string", - "enum": ["celsius", "fahrenheit"], - }, - }, - "required": ["location"], - }, - } - ] - ) - - res = model.invoke("What is the capital of France?") - - self.assertIsInstance(res, AIMessage) - res = AIMessage(**res.__dict__) - tool_calls = res.tool_calls - if len(tool_calls) > 0: - tool_call = tool_calls[0] - assert tool_call - self.assertEqual("__conversational_response", tool_call.get("name")) - - def test_ollama_structured_output(self) -> None: - model = OllamaFunctions(model="phi3") - structured_llm = model.with_structured_output(Joke, include_raw=False) - - res = structured_llm.invoke("Tell me a joke about cats") - assert isinstance(res, Joke) - - def test_ollama_structured_output_with_json(self) -> None: - model = OllamaFunctions(model="phi3") - joke_schema = convert_to_ollama_tool(Joke) - structured_llm = model.with_structured_output(joke_schema, include_raw=False) - - res = structured_llm.invoke("Tell me a joke about cats") - assert "setup" in res - assert "punchline" in res - - def test_ollama_structured_output_raw(self) -> None: - model = OllamaFunctions(model="phi3") - structured_llm = model.with_structured_output(Joke, include_raw=True) - - res = structured_llm.invoke("Tell me a joke about cars") - assert isinstance(res, dict) - assert "raw" in res - assert "parsed" in res - assert isinstance(res["raw"], AIMessage) - assert isinstance(res["parsed"], Joke) diff --git a/libs/experimental/tests/integration_tests/test_compile.py b/libs/experimental/tests/integration_tests/test_compile.py deleted file mode 100644 index 33ecccdfa0fbd..0000000000000 --- a/libs/experimental/tests/integration_tests/test_compile.py +++ /dev/null @@ -1,7 +0,0 @@ -import pytest - - -@pytest.mark.compile -def test_placeholder() -> None: - """Used for compiling integration tests without running any real tests.""" - pass diff --git a/libs/experimental/tests/integration_tests/test_video_captioning.py b/libs/experimental/tests/integration_tests/test_video_captioning.py deleted file mode 100644 index d1a602dc40041..0000000000000 --- a/libs/experimental/tests/integration_tests/test_video_captioning.py +++ /dev/null @@ -1,29 +0,0 @@ -"""Integration test for video captioning.""" - -from langchain_openai import ChatOpenAI - -from langchain_experimental.video_captioning.base import VideoCaptioningChain - - -def test_video_captioning_hard() -> None: - """Test input that is considered hard for this chain to process.""" - URL = """ - https://ia904700.us.archive.org/22/items/any-chibes/X2Download.com - -FXX%20USA%20%C2%ABPromo%20Noon%20-%204A%20Every%20Day%EF%BF%BD%EF - %BF%BD%C2%BB%20November%202021%EF%BF%BD%EF%BF%BD-%281080p60%29.mp4 - """ - chain = VideoCaptioningChain( # type: ignore[call-arg] - llm=ChatOpenAI( - model="gpt-4", - max_tokens=4000, - ) - ) - srt_content = chain.run(video_file_path=URL) - - assert ( - "mustache" in srt_content - and "Any chives?" in srt_content - and "How easy? A little tighter." in srt_content - and "it's a little tight in" in srt_content - and "every day" in srt_content - ) diff --git a/libs/experimental/tests/unit_tests/__init__.py b/libs/experimental/tests/unit_tests/__init__.py deleted file mode 100644 index f17799042153d..0000000000000 --- a/libs/experimental/tests/unit_tests/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -import ctypes - - -def is_libcublas_available() -> bool: - try: - ctypes.CDLL("libcublas.so") - return True - except OSError: - return False diff --git a/libs/experimental/tests/unit_tests/agents/__init__.py b/libs/experimental/tests/unit_tests/agents/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/tests/unit_tests/agents/agent_toolkits/__init__.py b/libs/experimental/tests/unit_tests/agents/agent_toolkits/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/tests/unit_tests/agents/agent_toolkits/pandas/__init__.py b/libs/experimental/tests/unit_tests/agents/agent_toolkits/pandas/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/tests/unit_tests/agents/agent_toolkits/pandas/test_base.py b/libs/experimental/tests/unit_tests/agents/agent_toolkits/pandas/test_base.py deleted file mode 100644 index 9e99e4a7b5496..0000000000000 --- a/libs/experimental/tests/unit_tests/agents/agent_toolkits/pandas/test_base.py +++ /dev/null @@ -1,22 +0,0 @@ -import sys - -import pytest - -from langchain_experimental.agents import create_pandas_dataframe_agent -from tests.unit_tests.fake_llm import FakeLLM - - -@pytest.mark.requires("pandas", "tabulate") -@pytest.mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher") -def test_create_pandas_dataframe_agent() -> None: - import pandas as pd - - with pytest.raises(ValueError): - create_pandas_dataframe_agent( - FakeLLM(), pd.DataFrame(), allow_dangerous_code=False - ) - - create_pandas_dataframe_agent(FakeLLM(), pd.DataFrame(), allow_dangerous_code=True) - create_pandas_dataframe_agent( - FakeLLM(), [pd.DataFrame(), pd.DataFrame()], allow_dangerous_code=True - ) diff --git a/libs/experimental/tests/unit_tests/chat_models/__init__.py b/libs/experimental/tests/unit_tests/chat_models/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_llama2chat.py b/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_llama2chat.py deleted file mode 100644 index 55501833373be..0000000000000 --- a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_llama2chat.py +++ /dev/null @@ -1,156 +0,0 @@ -from typing import Any, List, Optional - -import pytest -from langchain_core.callbacks.manager import ( - AsyncCallbackManagerForLLMRun, - CallbackManagerForLLMRun, -) -from langchain_core.language_models import LLM -from langchain_core.messages import AIMessage, HumanMessage, SystemMessage - -from langchain_experimental.chat_models import Llama2Chat -from langchain_experimental.chat_models.llm_wrapper import DEFAULT_SYSTEM_PROMPT - - -class FakeLLM(LLM): - def _call( - self, - prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> str: - return prompt - - async def _acall( - self, - prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> str: - return prompt - - @property - def _llm_type(self) -> str: - return "fake-llm" - - -@pytest.fixture -def model() -> Llama2Chat: - return Llama2Chat(llm=FakeLLM()) - - -@pytest.fixture -def model_cfg_sys_msg() -> Llama2Chat: - return Llama2Chat(llm=FakeLLM(), system_message=SystemMessage(content="sys-msg")) - - -def test_default_system_message(model: Llama2Chat) -> None: - messages = [HumanMessage(content="usr-msg-1")] - - actual = model.invoke(messages).content # type: ignore - expected = ( - f"[INST] <>\n{DEFAULT_SYSTEM_PROMPT}\n<>\n\nusr-msg-1 [/INST]" - ) - - assert actual == expected - - -def test_configured_system_message( - model_cfg_sys_msg: Llama2Chat, -) -> None: - messages = [HumanMessage(content="usr-msg-1")] - - actual = model_cfg_sys_msg.invoke(messages).content # type: ignore - expected = "[INST] <>\nsys-msg\n<>\n\nusr-msg-1 [/INST]" - - assert actual == expected - - -async def test_configured_system_message_async( - model_cfg_sys_msg: Llama2Chat, -) -> None: - messages = [HumanMessage(content="usr-msg-1")] - - actual = await model_cfg_sys_msg.ainvoke(messages) # type: ignore - expected = "[INST] <>\nsys-msg\n<>\n\nusr-msg-1 [/INST]" - - assert actual.content == expected - - -def test_provided_system_message( - model_cfg_sys_msg: Llama2Chat, -) -> None: - messages = [ - SystemMessage(content="custom-sys-msg"), - HumanMessage(content="usr-msg-1"), - ] - - actual = model_cfg_sys_msg.invoke(messages).content - expected = "[INST] <>\ncustom-sys-msg\n<>\n\nusr-msg-1 [/INST]" - - assert actual == expected - - -def test_human_ai_dialogue(model_cfg_sys_msg: Llama2Chat) -> None: - messages = [ - HumanMessage(content="usr-msg-1"), - AIMessage(content="ai-msg-1"), - HumanMessage(content="usr-msg-2"), - AIMessage(content="ai-msg-2"), - HumanMessage(content="usr-msg-3"), - ] - - actual = model_cfg_sys_msg.invoke(messages).content - expected = ( - "[INST] <>\nsys-msg\n<>\n\nusr-msg-1 [/INST] ai-msg-1 " - "[INST] usr-msg-2 [/INST] ai-msg-2 [INST] usr-msg-3 [/INST]" - ) - - assert actual == expected - - -def test_no_message(model: Llama2Chat) -> None: - with pytest.raises(ValueError) as info: - model.invoke([]) - - assert info.value.args[0] == "at least one HumanMessage must be provided" - - -def test_ai_message_first(model: Llama2Chat) -> None: - with pytest.raises(ValueError) as info: - model.invoke([AIMessage(content="ai-msg-1")]) - - assert ( - info.value.args[0] - == "messages list must start with a SystemMessage or UserMessage" - ) - - -def test_human_ai_messages_not_alternating(model: Llama2Chat) -> None: - messages = [ - HumanMessage(content="usr-msg-1"), - HumanMessage(content="usr-msg-2"), - HumanMessage(content="ai-msg-1"), - ] - - with pytest.raises(ValueError) as info: - model.invoke(messages) # type: ignore - - assert info.value.args[0] == ( - "messages must be alternating human- and ai-messages, " - "optionally prepended by a system message" - ) - - -def test_last_message_not_human_message(model: Llama2Chat) -> None: - messages = [ - HumanMessage(content="usr-msg-1"), - AIMessage(content="ai-msg-1"), - ] - - with pytest.raises(ValueError) as info: - model.invoke(messages) - - assert info.value.args[0] == "last message must be a HumanMessage" diff --git a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_mixtral.py b/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_mixtral.py deleted file mode 100644 index 63871ea30e205..0000000000000 --- a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_mixtral.py +++ /dev/null @@ -1,31 +0,0 @@ -import pytest -from langchain.schema import AIMessage, HumanMessage, SystemMessage - -from langchain_experimental.chat_models import Mixtral -from tests.unit_tests.chat_models.test_llm_wrapper_llama2chat import FakeLLM - - -@pytest.fixture -def model() -> Mixtral: - return Mixtral(llm=FakeLLM()) - - -@pytest.fixture -def model_cfg_sys_msg() -> Mixtral: - return Mixtral(llm=FakeLLM(), system_message=SystemMessage(content="sys-msg")) - - -def test_prompt(model: Mixtral) -> None: - messages = [ - SystemMessage(content="sys-msg"), - HumanMessage(content="usr-msg-1"), - AIMessage(content="ai-msg-1"), - HumanMessage(content="usr-msg-2"), - ] - - actual = model.invoke(messages).content # type: ignore - expected = ( - "[INST] sys-msg\nusr-msg-1 [/INST] ai-msg-1 [INST] usr-msg-2 [/INST]" - ) - - assert actual == expected diff --git a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_orca.py b/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_orca.py deleted file mode 100644 index c0ecb609877ed..0000000000000 --- a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_orca.py +++ /dev/null @@ -1,29 +0,0 @@ -import pytest -from langchain_core.messages import AIMessage, HumanMessage, SystemMessage - -from langchain_experimental.chat_models import Orca -from tests.unit_tests.chat_models.test_llm_wrapper_llama2chat import FakeLLM - - -@pytest.fixture -def model() -> Orca: - return Orca(llm=FakeLLM()) - - -@pytest.fixture -def model_cfg_sys_msg() -> Orca: - return Orca(llm=FakeLLM(), system_message=SystemMessage(content="sys-msg")) - - -def test_prompt(model: Orca) -> None: - messages = [ - SystemMessage(content="sys-msg"), - HumanMessage(content="usr-msg-1"), - AIMessage(content="ai-msg-1"), - HumanMessage(content="usr-msg-2"), - ] - - actual = model.invoke(messages).content # type: ignore - expected = "### System:\nsys-msg\n\n### User:\nusr-msg-1\n\n### Assistant:\nai-msg-1\n\n### User:\nusr-msg-2\n\n" # noqa: E501 - - assert actual == expected diff --git a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_vicuna.py b/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_vicuna.py deleted file mode 100644 index 4948c7a8deb83..0000000000000 --- a/libs/experimental/tests/unit_tests/chat_models/test_llm_wrapper_vicuna.py +++ /dev/null @@ -1,29 +0,0 @@ -import pytest -from langchain_core.messages import AIMessage, HumanMessage, SystemMessage - -from langchain_experimental.chat_models import Vicuna -from tests.unit_tests.chat_models.test_llm_wrapper_llama2chat import FakeLLM - - -@pytest.fixture -def model() -> Vicuna: - return Vicuna(llm=FakeLLM()) - - -@pytest.fixture -def model_cfg_sys_msg() -> Vicuna: - return Vicuna(llm=FakeLLM(), system_message=SystemMessage(content="sys-msg")) - - -def test_prompt(model: Vicuna) -> None: - messages = [ - SystemMessage(content="sys-msg"), - HumanMessage(content="usr-msg-1"), - AIMessage(content="ai-msg-1"), - HumanMessage(content="usr-msg-2"), - ] - - actual = model.invoke(messages).content # type: ignore - expected = "sys-msg USER: usr-msg-1 ASSISTANT: ai-msg-1 USER: usr-msg-2 " - - assert actual == expected diff --git a/libs/experimental/tests/unit_tests/conftest.py b/libs/experimental/tests/unit_tests/conftest.py deleted file mode 100644 index 4d9e78078261c..0000000000000 --- a/libs/experimental/tests/unit_tests/conftest.py +++ /dev/null @@ -1,84 +0,0 @@ -"""Configuration for unit tests.""" - -from importlib import util -from typing import Dict, Sequence - -import pytest -from pytest import Config, Function, Parser - - -def pytest_addoption(parser: Parser) -> None: - """Add custom command line options to pytest.""" - parser.addoption( - "--only-extended", - action="store_true", - help="Only run extended tests. Does not allow skipping any extended tests.", - ) - parser.addoption( - "--only-core", - action="store_true", - help="Only run core tests. Never runs any extended tests.", - ) - - -def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) -> None: - """Add implementations for handling custom markers. - - At the moment, this adds support for a custom `requires` marker. - - The `requires` marker is used to denote tests that require one or more packages - to be installed to run. If the package is not installed, the test is skipped. - - The `requires` marker syntax is: - - .. code-block:: python - - @pytest.mark.requires("package1", "package2") - def test_something(): - ... - """ - # Mapping from the name of a package to whether it is installed or not. - # Used to avoid repeated calls to `util.find_spec` - required_pkgs_info: Dict[str, bool] = {} - - only_extended = config.getoption("--only-extended") or False - only_core = config.getoption("--only-core") or False - - if only_extended and only_core: - raise ValueError("Cannot specify both `--only-extended` and `--only-core`.") - - for item in items: - requires_marker = item.get_closest_marker("requires") - if requires_marker is not None: - if only_core: - item.add_marker(pytest.mark.skip(reason="Skipping not a core test.")) - continue - - # Iterate through the list of required packages - required_pkgs = requires_marker.args - for pkg in required_pkgs: - # If we haven't yet checked whether the pkg is installed - # let's check it and store the result. - if pkg not in required_pkgs_info: - required_pkgs_info[pkg] = util.find_spec(pkg) is not None - - if not required_pkgs_info[pkg]: - if only_extended: - pytest.fail( - f"Package `{pkg}` is not installed but is required for " - f"extended tests. Please install the given package and " - f"try again.", - ) - - else: - # If the package is not installed, we immediately break - # and mark the test as skipped. - item.add_marker( - pytest.mark.skip(reason=f"Requires pkg: `{pkg}`") - ) - break - else: - if only_extended: - item.add_marker( - pytest.mark.skip(reason="Skipping not an extended test.") - ) diff --git a/libs/experimental/tests/unit_tests/fake_llm.py b/libs/experimental/tests/unit_tests/fake_llm.py deleted file mode 100644 index f1c0652ab9c91..0000000000000 --- a/libs/experimental/tests/unit_tests/fake_llm.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Fake LLM wrapper for testing purposes.""" - -from typing import Any, Dict, List, Mapping, Optional, cast - -from langchain_core.callbacks.manager import CallbackManagerForLLMRun -from langchain_core.language_models import LLM -from pydantic import model_validator - - -class FakeLLM(LLM): - """Fake LLM wrapper for testing purposes.""" - - queries: Optional[Mapping] = None - sequential_responses: Optional[bool] = False - response_index: int = 0 - - @model_validator(mode="before") - @classmethod - def check_queries_required(cls, values: dict) -> dict: - if values.get("sequential_response") and not values.get("queries"): - raise ValueError( - "queries is required when sequential_response is set to True" - ) - return values - - def get_num_tokens(self, text: str) -> int: - """Return number of tokens.""" - return len(text.split()) - - @property - def _llm_type(self) -> str: - """Return type of llm.""" - return "fake" - - def _call( - self, - prompt: str, - stop: Optional[List[str]] = None, - run_manager: Optional[CallbackManagerForLLMRun] = None, - **kwargs: Any, - ) -> str: - if self.sequential_responses: - return self._get_next_response_in_sequence - - if self.queries is not None: - return self.queries[prompt] - if stop is None: - return "foo" - else: - return "bar" - - @property - def _identifying_params(self) -> Dict[str, Any]: - return {} - - @property - def _get_next_response_in_sequence(self) -> str: - queries = cast(Mapping, self.queries) - response = queries[list(queries.keys())[self.response_index]] - self.response_index = self.response_index + 1 - return response diff --git a/libs/experimental/tests/unit_tests/python/__init__.py b/libs/experimental/tests/unit_tests/python/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/libs/experimental/tests/unit_tests/python/test_python_1.py b/libs/experimental/tests/unit_tests/python/test_python_1.py deleted file mode 100644 index 4c961d84371b8..0000000000000 --- a/libs/experimental/tests/unit_tests/python/test_python_1.py +++ /dev/null @@ -1,113 +0,0 @@ -"""Test functionality of Python REPL.""" - -import sys - -import pytest - -from langchain_experimental.tools.python.tool import PythonAstREPLTool, PythonREPLTool -from langchain_experimental.utilities.python import PythonREPL - -_SAMPLE_CODE = """ -``` -def multiply(): - print(5*6) # noqa: T201 -multiply() -``` -""" - -_AST_SAMPLE_CODE = """ -``` -def multiply(): - return(5*6) -multiply() -``` -""" - -_AST_SAMPLE_CODE_EXECUTE = """ -``` -def multiply(a, b): - return(5*6) -a = 5 -b = 6 - -multiply(a, b) -``` -""" - - -def test_python_repl() -> None: - """Test functionality when globals/locals are not provided.""" - repl = PythonREPL() - - # Run a simple initial command. - repl.run("foo = 1") - assert repl.locals is not None - assert repl.locals["foo"] == 1 - - # Now run a command that accesses `foo` to make sure it still has it. - repl.run("bar = foo * 2") - assert repl.locals is not None - assert repl.locals["bar"] == 2 - - -def test_python_repl_no_previous_variables() -> None: - """Test that it does not have access to variables created outside the scope.""" - foo = 3 # noqa: F841 - repl = PythonREPL() - output = repl.run("print(foo)") - assert output == """NameError("name 'foo' is not defined")""" - - -def test_python_repl_pass_in_locals() -> None: - """Test functionality when passing in locals.""" - _locals = {"foo": 4} - repl = PythonREPL(_locals=_locals) - repl.run("bar = foo * 2") - assert repl.locals is not None - assert repl.locals["bar"] == 8 - - -def test_functionality() -> None: - """Test correct functionality.""" - chain = PythonREPL() - code = "print(1 + 1)" - output = chain.run(code) - assert output == "2\n" - - -def test_functionality_multiline() -> None: - """Test correct functionality for ChatGPT multiline commands.""" - chain = PythonREPL() - tool = PythonREPLTool(python_repl=chain) - output = tool.run(_SAMPLE_CODE) - assert output == "30\n" - - -def test_python_ast_repl_multiline() -> None: - """Test correct functionality for ChatGPT multiline commands.""" - if sys.version_info < (3, 9): - pytest.skip("Python 3.9+ is required for this test") - tool = PythonAstREPLTool() - output = tool.run(_AST_SAMPLE_CODE) - assert output == 30 - - -def test_python_ast_repl_multi_statement() -> None: - """Test correct functionality for ChatGPT multi statement commands.""" - if sys.version_info < (3, 9): - pytest.skip("Python 3.9+ is required for this test") - tool = PythonAstREPLTool() - output = tool.run(_AST_SAMPLE_CODE_EXECUTE) - assert output == 30 - - -def test_function() -> None: - """Test correct functionality.""" - chain = PythonREPL() - code = "def add(a, b): " " return a + b" - output = chain.run(code) - assert output == "" - - code = "print(add(1, 2))" - output = chain.run(code) - assert output == "3\n" diff --git a/libs/experimental/tests/unit_tests/python/test_python_2.py b/libs/experimental/tests/unit_tests/python/test_python_2.py deleted file mode 100644 index 56ebaaaf246fb..0000000000000 --- a/libs/experimental/tests/unit_tests/python/test_python_2.py +++ /dev/null @@ -1,165 +0,0 @@ -"""Test Python REPL Tools.""" - -import sys - -import numpy as np -import pytest - -from langchain_experimental.tools.python.tool import ( - PythonAstREPLTool, - PythonREPLTool, - sanitize_input, -) - - -def test_python_repl_tool_single_input() -> None: - """Test that the python REPL tool works with a single input.""" - tool = PythonREPLTool() - assert tool.is_single_input - assert int(tool.run("print(1 + 1)").strip()) == 2 - - -def test_python_repl_print() -> None: - program = """ -import numpy as np -v1 = np.array([1, 2, 3]) -v2 = np.array([4, 5, 6]) -dot_product = np.dot(v1, v2) -print("The dot product is {:d}.".format(dot_product)) # noqa: T201 - """ - tool = PythonREPLTool() - assert tool.run(program) == "The dot product is 32.\n" - - -@pytest.mark.skipif( - sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run." -) -def test_python_ast_repl_tool_single_input() -> None: - """Test that the python REPL tool works with a single input.""" - tool = PythonAstREPLTool() - assert tool.is_single_input - assert tool.run("1 + 1") == 2 - - -@pytest.mark.skipif( - sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run." -) -def test_python_ast_repl_return() -> None: - program = """ -``` -import numpy as np -v1 = np.array([1, 2, 3]) -v2 = np.array([4, 5, 6]) -dot_product = np.dot(v1, v2) -int(dot_product) -``` - """ - tool = PythonAstREPLTool() - assert tool.run(program) == 32 - - program = """ -```python -import numpy as np -v1 = np.array([1, 2, 3]) -v2 = np.array([4, 5, 6]) -dot_product = np.dot(v1, v2) -int(dot_product) -``` - """ - assert tool.run(program) == 32 - - -@pytest.mark.skipif( - sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run." -) -def test_python_ast_repl_print() -> None: - program = """python -string = "racecar" -if string == string[::-1]: - print(string, "is a palindrome") # noqa: T201 -else: - print(string, "is not a palindrome")""" - tool = PythonAstREPLTool() - assert tool.run(program) == "racecar is a palindrome\n" - - -@pytest.mark.skipif( - sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run." -) -def test_repl_print_python_backticks() -> None: - program = "`print('`python` is a great language.')`" - tool = PythonAstREPLTool() - assert tool.run(program) == "`python` is a great language.\n" - - -@pytest.mark.skipif( - sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run." -) -def test_python_ast_repl_raise_exception() -> None: - data = {"Name": ["John", "Alice"], "Age": [30, 25]} - program = """ -import pandas as pd -df = pd.DataFrame(data) -df['Gender'] - """ - tool = PythonAstREPLTool(locals={"data": data}) - expected_outputs = ( - "KeyError: 'Gender'", - "ModuleNotFoundError: No module named 'pandas'", - ) - assert tool.run(program) in expected_outputs - - -@pytest.mark.skipif( - sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run." -) -def test_python_ast_repl_one_line_print() -> None: - program = 'print("The square of {} is {:.2f}".format(3, 3**2))' - tool = PythonAstREPLTool() - assert tool.run(program) == "The square of 3 is 9.00\n" - - -@pytest.mark.skipif( - sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run." -) -def test_python_ast_repl_one_line_return() -> None: - arr = np.array([1, 2, 3, 4, 5]) - tool = PythonAstREPLTool(locals={"arr": arr}) - program = "`(arr**2).sum() # Returns sum of squares`" - assert tool.run(program) == 55 - - -@pytest.mark.skipif( - sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run." -) -def test_python_ast_repl_one_line_exception() -> None: - program = "[1, 2, 3][4]" - tool = PythonAstREPLTool() - assert tool.run(program) == "IndexError: list index out of range" - - -def test_sanitize_input() -> None: - query = """ - ``` - p = 5 - ``` - """ - expected = "p = 5" - actual = sanitize_input(query) - assert expected == actual - - query = """ - ```python - p = 5 - ``` - """ - expected = "p = 5" - actual = sanitize_input(query) - assert expected == actual - - query = """ - p = 5 - """ - expected = "p = 5" - actual = sanitize_input(query) - assert expected == actual diff --git a/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py b/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py deleted file mode 100644 index 97aed9d2284cc..0000000000000 --- a/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_chain_call.py +++ /dev/null @@ -1,475 +0,0 @@ -from typing import Any, Dict - -import pytest -from langchain_community.chat_models import FakeListChatModel -from langchain_core.prompts.prompt import PromptTemplate -from test_utils import MockEncoder, MockEncoderReturnsList - -import langchain_experimental.rl_chain.base as rl_chain -import langchain_experimental.rl_chain.helpers -import langchain_experimental.rl_chain.pick_best_chain as pick_best_chain - -encoded_keyword = "[encoded]" - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def setup() -> tuple: - _PROMPT_TEMPLATE = """This is a dummy prompt that will be ignored by the fake llm""" - PROMPT = PromptTemplate(input_variables=[], template=_PROMPT_TEMPLATE) - - llm = FakeListChatModel(responses=["hey"]) - return llm, PROMPT - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_multiple_ToSelectFrom_throws() -> None: - llm, PROMPT = setup() - chain = pick_best_chain.PickBest.from_llm( - llm=llm, - prompt=PROMPT, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ), - ) - actions = ["0", "1", "2"] - with pytest.raises(ValueError): - chain.run( - User=rl_chain.BasedOn("Context"), - action=rl_chain.ToSelectFrom(actions), - another_action=rl_chain.ToSelectFrom(actions), - ) - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_missing_basedOn_from_throws() -> None: - llm, PROMPT = setup() - chain = pick_best_chain.PickBest.from_llm( - llm=llm, - prompt=PROMPT, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ), - ) - actions = ["0", "1", "2"] - with pytest.raises(ValueError): - chain.run(action=rl_chain.ToSelectFrom(actions)) - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_ToSelectFrom_not_a_list_throws() -> None: - llm, PROMPT = setup() - chain = pick_best_chain.PickBest.from_llm( - llm=llm, - prompt=PROMPT, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ), - ) - actions = {"actions": ["0", "1", "2"]} - with pytest.raises(ValueError): - chain.run( - User=rl_chain.BasedOn("Context"), - action=rl_chain.ToSelectFrom(actions), - ) - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_update_with_delayed_score_with_auto_validator_throws() -> None: - llm, PROMPT = setup() - # this LLM returns a number so that the auto validator will return that - auto_val_llm = FakeListChatModel(responses=["3"]) - chain = pick_best_chain.PickBest.from_llm( - llm=llm, - prompt=PROMPT, - selection_scorer=rl_chain.AutoSelectionScorer(llm=auto_val_llm), # type: ignore[call-arg] - feature_embedder=pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ), - ) - actions = ["0", "1", "2"] - response = chain.run( - User=rl_chain.BasedOn("Context"), - action=rl_chain.ToSelectFrom(actions), - ) - assert response["response"] == "hey" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score == 3.0 # type: ignore - with pytest.raises(RuntimeError): - chain.update_with_delayed_score( - chain_response=response, - score=100, # type: ignore - ) - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_update_with_delayed_score_force() -> None: - llm, PROMPT = setup() - # this LLM returns a number so that the auto validator will return that - auto_val_llm = FakeListChatModel(responses=["3"]) - chain = pick_best_chain.PickBest.from_llm( - llm=llm, - prompt=PROMPT, - selection_scorer=rl_chain.AutoSelectionScorer(llm=auto_val_llm), # type: ignore[call-arg] - feature_embedder=pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ), - ) - actions = ["0", "1", "2"] - response = chain.run( - User=rl_chain.BasedOn("Context"), - action=rl_chain.ToSelectFrom(actions), - ) - assert response["response"] == "hey" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score == 3.0 # type: ignore - chain.update_with_delayed_score( - chain_response=response, - score=100, - force_score=True, # type: ignore - ) - assert selection_metadata.selected.score == 100.0 # type: ignore - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_update_with_delayed_score() -> None: - llm, PROMPT = setup() - chain = pick_best_chain.PickBest.from_llm( - llm=llm, - prompt=PROMPT, - selection_scorer=None, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ), - ) - actions = ["0", "1", "2"] - response = chain.run( - User=rl_chain.BasedOn("Context"), - action=rl_chain.ToSelectFrom(actions), - ) - assert response["response"] == "hey" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score is None # type: ignore - chain.update_with_delayed_score(chain_response=response, score=100) # type: ignore - assert selection_metadata.selected.score == 100.0 # type: ignore - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_user_defined_scorer() -> None: - llm, PROMPT = setup() - - class CustomSelectionScorer(rl_chain.SelectionScorer): - def score_response( - self, - inputs: Dict[str, Any], - llm_response: str, - event: pick_best_chain.PickBestEvent, - ) -> float: - score = 200 - return score - - chain = pick_best_chain.PickBest.from_llm( - llm=llm, - prompt=PROMPT, - selection_scorer=CustomSelectionScorer(), - feature_embedder=pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ), - ) - actions = ["0", "1", "2"] - response = chain.run( - User=rl_chain.BasedOn("Context"), - action=rl_chain.ToSelectFrom(actions), - ) - assert response["response"] == "hey" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score == 200.0 # type: ignore - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_everything_embedded() -> None: - llm, PROMPT = setup() - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - chain = pick_best_chain.PickBest.from_llm( - llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=False - ) - - str1 = "0" - str2 = "1" - str3 = "2" - encoded_str1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str1) - ) - encoded_str2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str2) - ) - encoded_str3 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str3) - ) - - ctx_str_1 = "context1" - - encoded_ctx_str_1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + ctx_str_1) - ) - - expected = f"""shared |User {ctx_str_1 + " " + encoded_ctx_str_1} \n|action {str1 + " " + encoded_str1} \n|action {str2 + " " + encoded_str2} \n|action {str3 + " " + encoded_str3} """ # noqa - - actions = [str1, str2, str3] - - response = chain.run( - User=rl_chain.EmbedAndKeep(rl_chain.BasedOn(ctx_str_1)), - action=rl_chain.EmbedAndKeep(rl_chain.ToSelectFrom(actions)), - ) - selection_metadata = response["selection_metadata"] # type: ignore - vw_str = feature_embedder.format(selection_metadata) # type: ignore - assert vw_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_default_auto_embedder_is_off() -> None: - llm, PROMPT = setup() - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - chain = pick_best_chain.PickBest.from_llm( - llm=llm, prompt=PROMPT, feature_embedder=feature_embedder - ) - - str1 = "0" - str2 = "1" - str3 = "2" - ctx_str_1 = "context1" - - expected = f"""shared |User {ctx_str_1} \n|action {str1} \n|action {str2} \n|action {str3} """ # noqa - - actions = [str1, str2, str3] - - response = chain.run( - User=pick_best_chain.base.BasedOn(ctx_str_1), - action=pick_best_chain.base.ToSelectFrom(actions), - ) - selection_metadata = response["selection_metadata"] # type: ignore - vw_str = feature_embedder.format(selection_metadata) # type: ignore - assert vw_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_default_w_embeddings_off() -> None: - llm, PROMPT = setup() - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - chain = pick_best_chain.PickBest.from_llm( - llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=False - ) - - str1 = "0" - str2 = "1" - str3 = "2" - ctx_str_1 = "context1" - - expected = f"""shared |User {ctx_str_1} \n|action {str1} \n|action {str2} \n|action {str3} """ # noqa - - actions = [str1, str2, str3] - - response = chain.run( - User=rl_chain.BasedOn(ctx_str_1), - action=rl_chain.ToSelectFrom(actions), - ) - selection_metadata = response["selection_metadata"] # type: ignore - vw_str = feature_embedder.format(selection_metadata) # type: ignore - assert vw_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_default_w_embeddings_on() -> None: - llm, PROMPT = setup() - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=True, model=MockEncoderReturnsList() - ) - chain = pick_best_chain.PickBest.from_llm( - llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=True - ) - - str1 = "0" - str2 = "1" - ctx_str_1 = "context1" - dot_prod = "dotprod 0:5.0" # dot prod of [1.0, 2.0] and [1.0, 2.0] - - expected = f"""shared |User {ctx_str_1} |@ User={ctx_str_1}\n|action {str1} |# action={str1} |{dot_prod}\n|action {str2} |# action={str2} |{dot_prod}""" # noqa - - actions = [str1, str2] - - response = chain.run( - User=rl_chain.BasedOn(ctx_str_1), - action=rl_chain.ToSelectFrom(actions), - ) - selection_metadata = response["selection_metadata"] # type: ignore - vw_str = feature_embedder.format(selection_metadata) # type: ignore - assert vw_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_default_embeddings_mixed_w_explicit_user_embeddings() -> None: - llm, PROMPT = setup() - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=True, model=MockEncoderReturnsList() - ) - chain = pick_best_chain.PickBest.from_llm( - llm=llm, prompt=PROMPT, feature_embedder=feature_embedder, auto_embed=True - ) - - str1 = "0" - str2 = "1" - encoded_str2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - [1.0, 2.0] - ) - ctx_str_1 = "context1" - ctx_str_2 = "context2" - encoded_ctx_str_1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - [1.0, 2.0] - ) - dot_prod = "dotprod 0:5.0 1:5.0" # dot prod of [1.0, 2.0] and [1.0, 2.0] - - expected = f"""shared |User {encoded_ctx_str_1} |@ User={encoded_ctx_str_1} |User2 {ctx_str_2} |@ User2={ctx_str_2}\n|action {str1} |# action={str1} |{dot_prod}\n|action {encoded_str2} |# action={encoded_str2} |{dot_prod}""" # noqa - - actions = [str1, rl_chain.Embed(str2)] - - response = chain.run( - User=rl_chain.BasedOn(rl_chain.Embed(ctx_str_1)), - User2=rl_chain.BasedOn(ctx_str_2), - action=rl_chain.ToSelectFrom(actions), - ) - selection_metadata = response["selection_metadata"] # type: ignore - vw_str = feature_embedder.format(selection_metadata) # type: ignore - assert vw_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_default_no_scorer_specified() -> None: - _, PROMPT = setup() - chain_llm = FakeListChatModel(responses=["hey", "100"]) - chain = pick_best_chain.PickBest.from_llm( - llm=chain_llm, - prompt=PROMPT, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ), - ) - response = chain.run( - User=rl_chain.BasedOn("Context"), - action=rl_chain.ToSelectFrom(["0", "1", "2"]), - ) - # chain llm used for both basic prompt and for scoring - assert response["response"] == "hey" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score == 100.0 # type: ignore - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_explicitly_no_scorer() -> None: - llm, PROMPT = setup() - chain = pick_best_chain.PickBest.from_llm( - llm=llm, - prompt=PROMPT, - selection_scorer=None, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ), - ) - response = chain.run( - User=rl_chain.BasedOn("Context"), - action=rl_chain.ToSelectFrom(["0", "1", "2"]), - ) - # chain llm used for both basic prompt and for scoring - assert response["response"] == "hey" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score is None # type: ignore - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_auto_scorer_with_user_defined_llm() -> None: - llm, PROMPT = setup() - scorer_llm = FakeListChatModel(responses=["300"]) - chain = pick_best_chain.PickBest.from_llm( - llm=llm, - prompt=PROMPT, - selection_scorer=rl_chain.AutoSelectionScorer(llm=scorer_llm), # type: ignore[call-arg] - feature_embedder=pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ), - ) - response = chain.run( - User=rl_chain.BasedOn("Context"), - action=rl_chain.ToSelectFrom(["0", "1", "2"]), - ) - # chain llm used for both basic prompt and for scoring - assert response["response"] == "hey" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score == 300.0 # type: ignore - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_calling_chain_w_reserved_inputs_throws() -> None: - llm, PROMPT = setup() - chain = pick_best_chain.PickBest.from_llm( - llm=llm, - prompt=PROMPT, - feature_embedder=pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ), - ) - with pytest.raises(ValueError): - chain.run( - User=rl_chain.BasedOn("Context"), - rl_chain_selected_based_on=rl_chain.ToSelectFrom(["0", "1", "2"]), - ) - - with pytest.raises(ValueError): - chain.run( - User=rl_chain.BasedOn("Context"), - rl_chain_selected=rl_chain.ToSelectFrom(["0", "1", "2"]), - ) - - -@pytest.mark.requires("vowpal_wabbit_next", "sentence_transformers") -def test_activate_and_deactivate_scorer() -> None: - _, PROMPT = setup() - llm = FakeListChatModel(responses=["hey1", "hey2", "hey3"]) - scorer_llm = FakeListChatModel(responses=["300", "400"]) - chain = pick_best_chain.PickBest.from_llm( - llm=llm, - prompt=PROMPT, - selection_scorer=pick_best_chain.base.AutoSelectionScorer(llm=scorer_llm), # type: ignore[call-arg] - feature_embedder=pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ), - ) - response = chain.run( - User=pick_best_chain.base.BasedOn("Context"), - action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), - ) - # chain llm used for both basic prompt and for scoring - assert response["response"] == "hey1" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score == 300.0 # type: ignore - - chain.deactivate_selection_scorer() - response = chain.run( - User=pick_best_chain.base.BasedOn("Context"), - action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), - ) - assert response["response"] == "hey2" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score is None # type: ignore - - chain.activate_selection_scorer() - response = chain.run( - User=pick_best_chain.base.BasedOn("Context"), - action=pick_best_chain.base.ToSelectFrom(["0", "1", "2"]), - ) - assert response["response"] == "hey3" # type: ignore - selection_metadata = response["selection_metadata"] # type: ignore - assert selection_metadata.selected.score == 400.0 # type: ignore diff --git a/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_text_embedder.py b/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_text_embedder.py deleted file mode 100644 index e080f83dfc8e7..0000000000000 --- a/libs/experimental/tests/unit_tests/rl_chain/test_pick_best_text_embedder.py +++ /dev/null @@ -1,423 +0,0 @@ -import pytest -from test_utils import MockEncoder - -import langchain_experimental.rl_chain.base as rl_chain -import langchain_experimental.rl_chain.helpers -import langchain_experimental.rl_chain.pick_best_chain as pick_best_chain - -encoded_keyword = "[encoded]" - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_missing_context_throws() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - named_action = {"action": ["0", "1", "2"]} - event = pick_best_chain.PickBestEvent( - inputs={}, to_select_from=named_action, based_on={} - ) - with pytest.raises(ValueError): - feature_embedder.format(event) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_missing_actions_throws() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - event = pick_best_chain.PickBestEvent( - inputs={}, to_select_from={}, based_on={"context": "context"} - ) - with pytest.raises(ValueError): - feature_embedder.format(event) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_no_label_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - named_actions = {"action1": ["0", "1", "2"]} - expected = """shared |context context \n|action1 0 \n|action1 1 \n|action1 2 """ - event = pick_best_chain.PickBestEvent( - inputs={}, to_select_from=named_actions, based_on={"context": "context"} - ) - vw_ex_str = feature_embedder.format(event) - assert vw_ex_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_w_label_no_score_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - named_actions = {"action1": ["0", "1", "2"]} - expected = """shared |context context \n|action1 0 \n|action1 1 \n|action1 2 """ - selected = pick_best_chain.PickBestSelected(index=0, probability=1.0) - event = pick_best_chain.PickBestEvent( - inputs={}, - to_select_from=named_actions, - based_on={"context": "context"}, - selected=selected, - ) - vw_ex_str = feature_embedder.format(event) - assert vw_ex_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_w_full_label_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - named_actions = {"action1": ["0", "1", "2"]} - expected = ( - """shared |context context \n0:-0.0:1.0 |action1 0 \n|action1 1 \n|action1 2 """ - ) - selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBestEvent( - inputs={}, - to_select_from=named_actions, - based_on={"context": "context"}, - selected=selected, - ) - vw_ex_str = feature_embedder.format(event) - assert vw_ex_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_w_full_label_w_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - str1 = "0" - str2 = "1" - str3 = "2" - encoded_str1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str1) - ) - encoded_str2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str2) - ) - encoded_str3 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str3) - ) - - ctx_str_1 = "context1" - encoded_ctx_str_1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + ctx_str_1) - ) - - named_actions = {"action1": rl_chain.Embed([str1, str2, str3])} - context = {"context": rl_chain.Embed(ctx_str_1)} - expected = f"""shared |context {encoded_ctx_str_1} \n0:-0.0:1.0 |action1 {encoded_str1} \n|action1 {encoded_str2} \n|action1 {encoded_str3} """ # noqa: E501 - selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBestEvent( - inputs={}, to_select_from=named_actions, based_on=context, selected=selected - ) - vw_ex_str = feature_embedder.format(event) - assert vw_ex_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_w_full_label_w_embed_and_keep() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - str1 = "0" - str2 = "1" - str3 = "2" - encoded_str1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str1) - ) - encoded_str2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str2) - ) - encoded_str3 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str3) - ) - - ctx_str_1 = "context1" - encoded_ctx_str_1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + ctx_str_1) - ) - - named_actions = {"action1": rl_chain.EmbedAndKeep([str1, str2, str3])} - context = {"context": rl_chain.EmbedAndKeep(ctx_str_1)} - expected = f"""shared |context {ctx_str_1 + " " + encoded_ctx_str_1} \n0:-0.0:1.0 |action1 {str1 + " " + encoded_str1} \n|action1 {str2 + " " + encoded_str2} \n|action1 {str3 + " " + encoded_str3} """ # noqa: E501 - selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBestEvent( - inputs={}, to_select_from=named_actions, based_on=context, selected=selected - ) - vw_ex_str = feature_embedder.format(event) - assert vw_ex_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_more_namespaces_no_label_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} - context = {"context1": "context1", "context2": "context2"} - expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ # noqa: E501 - event = pick_best_chain.PickBestEvent( - inputs={}, to_select_from=named_actions, based_on=context - ) - vw_ex_str = feature_embedder.format(event) - assert vw_ex_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_more_namespaces_w_label_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} - context = {"context1": "context1", "context2": "context2"} - expected = """shared |context1 context1 |context2 context2 \n|a 0 |b 0 \n|action1 1 \n|action1 2 """ # noqa: E501 - selected = pick_best_chain.PickBestSelected(index=0, probability=1.0) - event = pick_best_chain.PickBestEvent( - inputs={}, to_select_from=named_actions, based_on=context, selected=selected - ) - vw_ex_str = feature_embedder.format(event) - assert vw_ex_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_more_namespaces_w_full_label_no_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - named_actions = {"action1": [{"a": "0", "b": "0"}, "1", "2"]} - context = {"context1": "context1", "context2": "context2"} - expected = """shared |context1 context1 |context2 context2 \n0:-0.0:1.0 |a 0 |b 0 \n|action1 1 \n|action1 2 """ # noqa: E501 - selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBestEvent( - inputs={}, to_select_from=named_actions, based_on=context, selected=selected - ) - vw_ex_str = feature_embedder.format(event) - assert vw_ex_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - - str1 = "0" - str2 = "1" - str3 = "2" - encoded_str1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str1) - ) - encoded_str2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str2) - ) - encoded_str3 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str3) - ) - - ctx_str_1 = "context1" - ctx_str_2 = "context2" - encoded_ctx_str_1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + ctx_str_1) - ) - encoded_ctx_str_2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + ctx_str_2) - ) - - named_actions = {"action1": rl_chain.Embed([{"a": str1, "b": str1}, str2, str3])} - context = { - "context1": rl_chain.Embed(ctx_str_1), - "context2": rl_chain.Embed(ctx_str_2), - } - expected = f"""shared |context1 {encoded_ctx_str_1} |context2 {encoded_ctx_str_2} \n0:-0.0:1.0 |a {encoded_str1} |b {encoded_str1} \n|action1 {encoded_str2} \n|action1 {encoded_str3} """ # noqa: E501 - - selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBestEvent( - inputs={}, to_select_from=named_actions, based_on=context, selected=selected - ) - vw_ex_str = feature_embedder.format(event) - assert vw_ex_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_more_namespaces_w_full_label_w_full_embed_and_keep() -> ( - None -): - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - - str1 = "0" - str2 = "1" - str3 = "2" - encoded_str1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str1) - ) - encoded_str2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str2) - ) - encoded_str3 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str3) - ) - - ctx_str_1 = "context1" - ctx_str_2 = "context2" - encoded_ctx_str_1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + ctx_str_1) - ) - encoded_ctx_str_2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + ctx_str_2) - ) - - named_actions = { - "action1": rl_chain.EmbedAndKeep([{"a": str1, "b": str1}, str2, str3]) - } - context = { - "context1": rl_chain.EmbedAndKeep(ctx_str_1), - "context2": rl_chain.EmbedAndKeep(ctx_str_2), - } - expected = f"""shared |context1 {ctx_str_1 + " " + encoded_ctx_str_1} |context2 {ctx_str_2 + " " + encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1 + " " + encoded_str1} |b {str1 + " " + encoded_str1} \n|action1 {str2 + " " + encoded_str2} \n|action1 {str3 + " " + encoded_str3} """ # noqa: E501 - - selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBestEvent( - inputs={}, to_select_from=named_actions, based_on=context, selected=selected - ) - vw_ex_str = feature_embedder.format(event) - assert vw_ex_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emb() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - - str1 = "0" - str2 = "1" - str3 = "2" - encoded_str1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str1) - ) - encoded_str3 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str3) - ) - - ctx_str_1 = "context1" - ctx_str_2 = "context2" - encoded_ctx_str_2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + ctx_str_2) - ) - - named_actions = { - "action1": [ - {"a": str1, "b": rl_chain.Embed(str1)}, - str2, - rl_chain.Embed(str3), - ] - } - context = {"context1": ctx_str_1, "context2": rl_chain.Embed(ctx_str_2)} - expected = f"""shared |context1 {ctx_str_1} |context2 {encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1} |b {encoded_str1} \n|action1 {str2} \n|action1 {encoded_str3} """ # noqa: E501 - - selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBestEvent( - inputs={}, to_select_from=named_actions, based_on=context, selected=selected - ) - vw_ex_str = feature_embedder.format(event) - assert vw_ex_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_pickbest_textembedder_more_namespaces_w_full_label_w_partial_emakeep() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - - str1 = "0" - str2 = "1" - str3 = "2" - encoded_str1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str1) - ) - encoded_str3 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str3) - ) - - ctx_str_1 = "context1" - ctx_str_2 = "context2" - encoded_ctx_str_2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + ctx_str_2) - ) - - named_actions = { - "action1": [ - {"a": str1, "b": rl_chain.EmbedAndKeep(str1)}, - str2, - rl_chain.EmbedAndKeep(str3), - ] - } - context = { - "context1": ctx_str_1, - "context2": rl_chain.EmbedAndKeep(ctx_str_2), - } - expected = f"""shared |context1 {ctx_str_1} |context2 {ctx_str_2 + " " + encoded_ctx_str_2} \n0:-0.0:1.0 |a {str1} |b {str1 + " " + encoded_str1} \n|action1 {str2} \n|action1 {str3 + " " + encoded_str3} """ # noqa: E501 - - selected = pick_best_chain.PickBestSelected(index=0, probability=1.0, score=0.0) - event = pick_best_chain.PickBestEvent( - inputs={}, to_select_from=named_actions, based_on=context, selected=selected - ) - vw_ex_str = feature_embedder.format(event) - assert vw_ex_str == expected - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_raw_features_underscored() -> None: - feature_embedder = pick_best_chain.PickBestFeatureEmbedder( - auto_embed=False, model=MockEncoder() - ) - str1 = "this is a long string" - str1_underscored = str1.replace(" ", "_") - encoded_str1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str1) - ) - - ctx_str = "this is a long context" - ctx_str_underscored = ctx_str.replace(" ", "_") - encoded_ctx_str = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + ctx_str) - ) - - # No embeddings - named_actions = {"action": [str1]} - context = {"context": ctx_str} - expected_no_embed = ( - f"""shared |context {ctx_str_underscored} \n|action {str1_underscored} """ - ) - event = pick_best_chain.PickBestEvent( - inputs={}, to_select_from=named_actions, based_on=context - ) - vw_ex_str = feature_embedder.format(event) - assert vw_ex_str == expected_no_embed - - # Just embeddings - named_actions = {"action": rl_chain.Embed([str1])} - context = {"context": rl_chain.Embed(ctx_str)} - expected_embed = f"""shared |context {encoded_ctx_str} \n|action {encoded_str1} """ - event = pick_best_chain.PickBestEvent( - inputs={}, to_select_from=named_actions, based_on=context - ) - vw_ex_str = feature_embedder.format(event) - assert vw_ex_str == expected_embed - - # Embeddings and raw features - named_actions = {"action": rl_chain.EmbedAndKeep([str1])} - context = {"context": rl_chain.EmbedAndKeep(ctx_str)} - expected_embed_and_keep = f"""shared |context {ctx_str_underscored + " " + encoded_ctx_str} \n|action {str1_underscored + " " + encoded_str1} """ # noqa: E501 - event = pick_best_chain.PickBestEvent( - inputs={}, to_select_from=named_actions, based_on=context - ) - vw_ex_str = feature_embedder.format(event) - assert vw_ex_str == expected_embed_and_keep diff --git a/libs/experimental/tests/unit_tests/rl_chain/test_rl_chain_base_embedder.py b/libs/experimental/tests/unit_tests/rl_chain/test_rl_chain_base_embedder.py deleted file mode 100644 index 8e8465b0b88ef..0000000000000 --- a/libs/experimental/tests/unit_tests/rl_chain/test_rl_chain_base_embedder.py +++ /dev/null @@ -1,530 +0,0 @@ -from typing import List, Union - -import pytest -from test_utils import MockEncoder - -import langchain_experimental.rl_chain.base as base -import langchain_experimental.rl_chain.helpers - -encoded_keyword = "[encoded]" - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_simple_context_str_no_emb() -> None: - expected = [{"a_namespace": "test"}] - assert ( - langchain_experimental.rl_chain.helpers.embed( - "test", MockEncoder(), "a_namespace" - ) - == expected - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_simple_context_str_w_emb() -> None: - str1 = "test" - encoded_str1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str1) - ) - expected = [{"a_namespace": encoded_str1}] - assert ( - langchain_experimental.rl_chain.helpers.embed( - base.Embed(str1), MockEncoder(), "a_namespace" - ) - == expected - ) - expected_embed_and_keep = [{"a_namespace": str1 + " " + encoded_str1}] - assert ( - langchain_experimental.rl_chain.helpers.embed( - base.EmbedAndKeep(str1), MockEncoder(), "a_namespace" - ) - == expected_embed_and_keep - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_simple_context_str_w_nested_emb() -> None: - # nested embeddings, innermost wins - str1 = "test" - encoded_str1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str1) - ) - expected = [{"a_namespace": encoded_str1}] - assert ( - langchain_experimental.rl_chain.helpers.embed( - base.EmbedAndKeep(base.Embed(str1)), MockEncoder(), "a_namespace" - ) - == expected - ) - - expected2 = [{"a_namespace": str1 + " " + encoded_str1}] - assert ( - langchain_experimental.rl_chain.helpers.embed( - base.Embed(base.EmbedAndKeep(str1)), MockEncoder(), "a_namespace" - ) - == expected2 - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_context_w_namespace_no_emb() -> None: - expected = [{"test_namespace": "test"}] - assert ( - langchain_experimental.rl_chain.helpers.embed( - {"test_namespace": "test"}, MockEncoder() - ) - == expected - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_context_w_namespace_w_emb() -> None: - str1 = "test" - encoded_str1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str1) - ) - expected = [{"test_namespace": encoded_str1}] - assert ( - langchain_experimental.rl_chain.helpers.embed( - {"test_namespace": base.Embed(str1)}, MockEncoder() - ) - == expected - ) - expected_embed_and_keep = [{"test_namespace": str1 + " " + encoded_str1}] - assert ( - langchain_experimental.rl_chain.helpers.embed( - {"test_namespace": base.EmbedAndKeep(str1)}, MockEncoder() - ) - == expected_embed_and_keep - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_context_w_namespace_w_emb2() -> None: - str1 = "test" - encoded_str1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str1) - ) - expected = [{"test_namespace": encoded_str1}] - assert ( - langchain_experimental.rl_chain.helpers.embed( - base.Embed({"test_namespace": str1}), MockEncoder() - ) - == expected - ) - expected_embed_and_keep = [{"test_namespace": str1 + " " + encoded_str1}] - assert ( - langchain_experimental.rl_chain.helpers.embed( - base.EmbedAndKeep({"test_namespace": str1}), MockEncoder() - ) - == expected_embed_and_keep - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_context_w_namespace_w_some_emb() -> None: - str1 = "test1" - str2 = "test2" - encoded_str2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str2) - ) - expected = [{"test_namespace": str1, "test_namespace2": encoded_str2}] - assert ( - langchain_experimental.rl_chain.helpers.embed( - {"test_namespace": str1, "test_namespace2": base.Embed(str2)}, MockEncoder() - ) - == expected - ) - expected_embed_and_keep = [ - { - "test_namespace": str1, - "test_namespace2": str2 + " " + encoded_str2, - } - ] - assert ( - langchain_experimental.rl_chain.helpers.embed( - {"test_namespace": str1, "test_namespace2": base.EmbedAndKeep(str2)}, - MockEncoder(), - ) - == expected_embed_and_keep - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_simple_action_strlist_no_emb() -> None: - str1 = "test1" - str2 = "test2" - str3 = "test3" - expected = [{"a_namespace": str1}, {"a_namespace": str2}, {"a_namespace": str3}] - to_embed: List[Union[str, langchain_experimental.rl_chain.helpers._Embed]] = [ - str1, - str2, - str3, - ] - assert ( - langchain_experimental.rl_chain.helpers.embed( - to_embed, MockEncoder(), "a_namespace" - ) - == expected - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_simple_action_strlist_w_emb() -> None: - str1 = "test1" - str2 = "test2" - str3 = "test3" - encoded_str1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str1) - ) - encoded_str2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str2) - ) - encoded_str3 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str3) - ) - expected = [ - {"a_namespace": encoded_str1}, - {"a_namespace": encoded_str2}, - {"a_namespace": encoded_str3}, - ] - assert ( - langchain_experimental.rl_chain.helpers.embed( - base.Embed([str1, str2, str3]), MockEncoder(), "a_namespace" - ) - == expected - ) - expected_embed_and_keep = [ - {"a_namespace": str1 + " " + encoded_str1}, - {"a_namespace": str2 + " " + encoded_str2}, - {"a_namespace": str3 + " " + encoded_str3}, - ] - assert ( - langchain_experimental.rl_chain.helpers.embed( - base.EmbedAndKeep([str1, str2, str3]), MockEncoder(), "a_namespace" - ) - == expected_embed_and_keep - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_simple_action_strlist_w_some_emb() -> None: - str1 = "test1" - str2 = "test2" - str3 = "test3" - encoded_str2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str2) - ) - encoded_str3 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str3) - ) - expected = [ - {"a_namespace": str1}, - {"a_namespace": encoded_str2}, - {"a_namespace": encoded_str3}, - ] - assert ( - langchain_experimental.rl_chain.helpers.embed( - [str1, base.Embed(str2), base.Embed(str3)], MockEncoder(), "a_namespace" - ) - == expected - ) - expected_embed_and_keep = [ - {"a_namespace": str1}, - {"a_namespace": str2 + " " + encoded_str2}, - {"a_namespace": str3 + " " + encoded_str3}, - ] - assert ( - langchain_experimental.rl_chain.helpers.embed( - [str1, base.EmbedAndKeep(str2), base.EmbedAndKeep(str3)], - MockEncoder(), - "a_namespace", - ) - == expected_embed_and_keep - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_action_w_namespace_no_emb() -> None: - str1 = "test1" - str2 = "test2" - str3 = "test3" - expected = [ - {"test_namespace": str1}, - {"test_namespace": str2}, - {"test_namespace": str3}, - ] - assert ( - langchain_experimental.rl_chain.helpers.embed( - [ - {"test_namespace": str1}, - {"test_namespace": str2}, - {"test_namespace": str3}, - ], - MockEncoder(), - ) - == expected - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_action_w_namespace_w_emb() -> None: - str1 = "test1" - str2 = "test2" - str3 = "test3" - encoded_str1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str1) - ) - encoded_str2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str2) - ) - encoded_str3 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str3) - ) - expected = [ - {"test_namespace": encoded_str1}, - {"test_namespace": encoded_str2}, - {"test_namespace": encoded_str3}, - ] - assert ( - langchain_experimental.rl_chain.helpers.embed( - [ - {"test_namespace": base.Embed(str1)}, - {"test_namespace": base.Embed(str2)}, - {"test_namespace": base.Embed(str3)}, - ], - MockEncoder(), - ) - == expected - ) - expected_embed_and_keep = [ - {"test_namespace": str1 + " " + encoded_str1}, - {"test_namespace": str2 + " " + encoded_str2}, - {"test_namespace": str3 + " " + encoded_str3}, - ] - assert ( - langchain_experimental.rl_chain.helpers.embed( - [ - {"test_namespace": base.EmbedAndKeep(str1)}, - {"test_namespace": base.EmbedAndKeep(str2)}, - {"test_namespace": base.EmbedAndKeep(str3)}, - ], - MockEncoder(), - ) - == expected_embed_and_keep - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_action_w_namespace_w_emb2() -> None: - str1 = "test1" - str2 = "test2" - str3 = "test3" - encoded_str1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str1) - ) - encoded_str2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str2) - ) - encoded_str3 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str3) - ) - expected = [ - {"test_namespace1": encoded_str1}, - {"test_namespace2": encoded_str2}, - {"test_namespace3": encoded_str3}, - ] - assert ( - langchain_experimental.rl_chain.helpers.embed( - base.Embed( - [ - {"test_namespace1": str1}, - {"test_namespace2": str2}, - {"test_namespace3": str3}, - ] - ), - MockEncoder(), - ) - == expected - ) - expected_embed_and_keep = [ - {"test_namespace1": str1 + " " + encoded_str1}, - {"test_namespace2": str2 + " " + encoded_str2}, - {"test_namespace3": str3 + " " + encoded_str3}, - ] - assert ( - langchain_experimental.rl_chain.helpers.embed( - base.EmbedAndKeep( - [ - {"test_namespace1": str1}, - {"test_namespace2": str2}, - {"test_namespace3": str3}, - ] - ), - MockEncoder(), - ) - == expected_embed_and_keep - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_action_w_namespace_w_some_emb() -> None: - str1 = "test1" - str2 = "test2" - str3 = "test3" - encoded_str2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str2) - ) - encoded_str3 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str3) - ) - expected = [ - {"test_namespace": str1}, - {"test_namespace": encoded_str2}, - {"test_namespace": encoded_str3}, - ] - assert ( - langchain_experimental.rl_chain.helpers.embed( - [ - {"test_namespace": str1}, - {"test_namespace": base.Embed(str2)}, - {"test_namespace": base.Embed(str3)}, - ], - MockEncoder(), - ) - == expected - ) - expected_embed_and_keep = [ - {"test_namespace": str1}, - {"test_namespace": str2 + " " + encoded_str2}, - {"test_namespace": str3 + " " + encoded_str3}, - ] - assert ( - langchain_experimental.rl_chain.helpers.embed( - [ - {"test_namespace": str1}, - {"test_namespace": base.EmbedAndKeep(str2)}, - {"test_namespace": base.EmbedAndKeep(str3)}, - ], - MockEncoder(), - ) - == expected_embed_and_keep - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_action_w_namespace_w_emb_w_more_than_one_item_in_first_dict() -> None: - str1 = "test1" - str2 = "test2" - str3 = "test3" - encoded_str1 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str1) - ) - encoded_str2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str2) - ) - encoded_str3 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str3) - ) - expected = [ - {"test_namespace": encoded_str1, "test_namespace2": str1}, - {"test_namespace": encoded_str2, "test_namespace2": str2}, - {"test_namespace": encoded_str3, "test_namespace2": str3}, - ] - assert ( - langchain_experimental.rl_chain.helpers.embed( - [ - {"test_namespace": base.Embed(str1), "test_namespace2": str1}, - {"test_namespace": base.Embed(str2), "test_namespace2": str2}, - {"test_namespace": base.Embed(str3), "test_namespace2": str3}, - ], - MockEncoder(), - ) - == expected - ) - expected_embed_and_keep = [ - { - "test_namespace": str1 + " " + encoded_str1, - "test_namespace2": str1, - }, - { - "test_namespace": str2 + " " + encoded_str2, - "test_namespace2": str2, - }, - { - "test_namespace": str3 + " " + encoded_str3, - "test_namespace2": str3, - }, - ] - assert ( - langchain_experimental.rl_chain.helpers.embed( - [ - {"test_namespace": base.EmbedAndKeep(str1), "test_namespace2": str1}, - {"test_namespace": base.EmbedAndKeep(str2), "test_namespace2": str2}, - {"test_namespace": base.EmbedAndKeep(str3), "test_namespace2": str3}, - ], - MockEncoder(), - ) - == expected_embed_and_keep - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_one_namespace_w_list_of_features_no_emb() -> None: - str1 = "test1" - str2 = "test2" - expected = [{"test_namespace": [str1, str2]}] - assert ( - langchain_experimental.rl_chain.helpers.embed( - {"test_namespace": [str1, str2]}, MockEncoder() - ) - == expected - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_one_namespace_w_list_of_features_w_some_emb() -> None: - str1 = "test1" - str2 = "test2" - encoded_str2 = langchain_experimental.rl_chain.helpers.stringify_embedding( - list(encoded_keyword + str2) - ) - expected = [{"test_namespace": [str1, encoded_str2]}] - assert ( - langchain_experimental.rl_chain.helpers.embed( - {"test_namespace": [str1, base.Embed(str2)]}, MockEncoder() - ) - == expected - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_nested_list_features_throws() -> None: - with pytest.raises(ValueError): - langchain_experimental.rl_chain.helpers.embed( - {"test_namespace": [[1, 2], [3, 4]]}, MockEncoder() - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_dict_in_list_throws() -> None: - with pytest.raises(ValueError): - langchain_experimental.rl_chain.helpers.embed( - {"test_namespace": [{"a": 1}, {"b": 2}]}, MockEncoder() - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_nested_dict_throws() -> None: - with pytest.raises(ValueError): - langchain_experimental.rl_chain.helpers.embed( - {"test_namespace": {"a": {"b": 1}}}, MockEncoder() - ) - - -@pytest.mark.requires("vowpal_wabbit_next") -def test_list_of_tuples_throws() -> None: - with pytest.raises(ValueError): - langchain_experimental.rl_chain.helpers.embed( - {"test_namespace": [("a", 1), ("b", 2)]}, MockEncoder() - ) diff --git a/libs/experimental/tests/unit_tests/rl_chain/test_utils.py b/libs/experimental/tests/unit_tests/rl_chain/test_utils.py deleted file mode 100644 index b2cc90b1bce6e..0000000000000 --- a/libs/experimental/tests/unit_tests/rl_chain/test_utils.py +++ /dev/null @@ -1,15 +0,0 @@ -from typing import Any, List - - -class MockEncoder: - def encode(self, to_encode: str) -> str: - return "[encoded]" + to_encode - - -class MockEncoderReturnsList: - def encode(self, to_encode: Any) -> List: - if isinstance(to_encode, str): - return [1.0, 2.0] - elif isinstance(to_encode, List): - return [[1.0, 2.0] for _ in range(len(to_encode))] - raise ValueError("Invalid input type for unit test") diff --git a/libs/experimental/tests/unit_tests/test_bash.py b/libs/experimental/tests/unit_tests/test_bash.py deleted file mode 100644 index ba7b0d0b62d17..0000000000000 --- a/libs/experimental/tests/unit_tests/test_bash.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Test the bash utility.""" - -import re -import subprocess -import sys -from pathlib import Path - -import pytest - -from langchain_experimental.llm_bash.bash import BashProcess - - -@pytest.mark.skipif( - sys.platform.startswith("win"), reason="Test not supported on Windows" -) -def test_pwd_command() -> None: - """Test correct functionality.""" - session = BashProcess() - commands = ["pwd"] - output = session.run(commands) - - assert output == subprocess.check_output("pwd", shell=True).decode() - - -@pytest.mark.skip(reason="flaky on GHA, TODO to fix") -@pytest.mark.skipif( - sys.platform.startswith("win"), reason="Test not supported on Windows" -) -def test_pwd_command_persistent() -> None: - """Test correct functionality when the bash process is persistent.""" - session = BashProcess(persistent=True, strip_newlines=True) - commands = ["pwd"] - output = session.run(commands) - - assert subprocess.check_output("pwd", shell=True).decode().strip() in output - - session.run(["cd .."]) - new_output = session.run(["pwd"]) - # Assert that the new_output is a parent of the old output - assert Path(output).parent == Path(new_output) - - -@pytest.mark.skipif( - sys.platform.startswith("win"), reason="Test not supported on Windows" -) -def test_incorrect_command() -> None: - """Test handling of incorrect command.""" - session = BashProcess() - output = session.run(["invalid_command"]) - assert output == "Command 'invalid_command' returned non-zero exit status 127." - - -@pytest.mark.skipif( - sys.platform.startswith("win"), reason="Test not supported on Windows" -) -def test_incorrect_command_return_err_output() -> None: - """Test optional returning of shell output on incorrect command.""" - session = BashProcess(return_err_output=True) - output = session.run(["invalid_command"]) - assert re.match( - r"^/bin/sh:.*invalid_command.*(?:not found|Permission denied).*$", output - ) - - -@pytest.mark.skipif( - sys.platform.startswith("win"), reason="Test not supported on Windows" -) -def test_create_directory_and_files(tmp_path: Path) -> None: - """Test creation of a directory and files in a temporary directory.""" - session = BashProcess(strip_newlines=True) - - # create a subdirectory in the temporary directory - temp_dir = tmp_path / "test_dir" - temp_dir.mkdir() - - # run the commands in the temporary directory - commands = [ - f"touch {temp_dir}/file1.txt", - f"touch {temp_dir}/file2.txt", - f"echo 'hello world' > {temp_dir}/file2.txt", - f"cat {temp_dir}/file2.txt", - ] - - output = session.run(commands) - assert output == "hello world" - - # check that the files were created in the temporary directory - output = session.run([f"ls {temp_dir}"]) - assert output == "file1.txt\nfile2.txt" - - -@pytest.mark.skip(reason="flaky on GHA, TODO to fix") -@pytest.mark.skipif( - sys.platform.startswith("win"), reason="Test not supported on Windows" -) -def test_create_bash_persistent() -> None: - """Test the pexpect persistent bash terminal""" - session = BashProcess(persistent=True) - response = session.run("echo hello") - response += session.run("echo world") - - assert "hello" in response - assert "world" in response diff --git a/libs/experimental/tests/unit_tests/test_data_anonymizer.py b/libs/experimental/tests/unit_tests/test_data_anonymizer.py deleted file mode 100644 index fa7e7d23aab28..0000000000000 --- a/libs/experimental/tests/unit_tests/test_data_anonymizer.py +++ /dev/null @@ -1,216 +0,0 @@ -from typing import Iterator, List - -import pytest - -from . import is_libcublas_available - - -@pytest.fixture(scope="module", autouse=True) -def check_spacy_model() -> Iterator[None]: - import spacy - - if not spacy.util.is_package("en_core_web_lg"): - pytest.skip(reason="Spacy model 'en_core_web_lg' not installed") - yield - - -@pytest.fixture(scope="module", autouse=True) -def check_libcublas() -> Iterator[None]: - if not is_libcublas_available(): - pytest.skip(reason="libcublas.so is not available") - yield - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -@pytest.mark.parametrize( - "analyzed_fields,should_contain", - [(["PERSON"], False), (["PHONE_NUMBER"], True), (None, False)], -) -def test_anonymize(analyzed_fields: List[str], should_contain: bool) -> None: - """Test anonymizing a name in a simple sentence""" - from langchain_experimental.data_anonymizer import PresidioAnonymizer - - text = "Hello, my name is John Doe." - anonymizer = PresidioAnonymizer(analyzed_fields=analyzed_fields) - anonymized_text = anonymizer.anonymize(text) - assert ("John Doe" in anonymized_text) == should_contain - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -@pytest.mark.parametrize( - "analyzed_fields,should_contain", - [(["PERSON"], True), (["PHONE_NUMBER"], True), (None, True)], -) -def test_anonymize_allow_list(analyzed_fields: List[str], should_contain: bool) -> None: - """Test anonymizing a name in a simple sentence""" - from langchain_experimental.data_anonymizer import PresidioAnonymizer - - text = "Hello, my name is John Doe." - anonymizer = PresidioAnonymizer(analyzed_fields=analyzed_fields) - anonymized_text = anonymizer.anonymize(text, allow_list=["John Doe"]) - assert ("John Doe" in anonymized_text) == should_contain - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -def test_anonymize_multiple() -> None: - """Test anonymizing multiple items in a sentence""" - from langchain_experimental.data_anonymizer import PresidioAnonymizer - - text = "John Smith's phone number is 313-666-7440 and email is johnsmith@gmail.com" - anonymizer = PresidioAnonymizer() - anonymized_text = anonymizer.anonymize(text) - for phrase in ["John Smith", "313-666-7440", "johnsmith@gmail.com"]: - assert phrase not in anonymized_text - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -def test_check_instances() -> None: - """Test anonymizing multiple items in a sentence""" - from langchain_experimental.data_anonymizer import PresidioAnonymizer - - text = ( - "This is John Smith. John Smith works in a bakery." "John Smith is a good guy" - ) - anonymizer = PresidioAnonymizer(["PERSON"], faker_seed=42) - anonymized_text = anonymizer.anonymize(text) - assert anonymized_text.count("Connie Lawrence") == 3 - - # New name should be generated - anonymized_text = anonymizer.anonymize(text) - assert anonymized_text.count("Connie Lawrence") == 0 - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -def test_anonymize_with_custom_operator() -> None: - """Test anonymize a name with a custom operator""" - from presidio_anonymizer.entities import OperatorConfig - - from langchain_experimental.data_anonymizer import PresidioAnonymizer - - custom_operator = {"PERSON": OperatorConfig("replace", {"new_value": "NAME"})} - anonymizer = PresidioAnonymizer(operators=custom_operator) - - text = "Jane Doe was here." - - anonymized_text = anonymizer.anonymize(text) - assert anonymized_text == "NAME was here." - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -def test_add_recognizer_operator() -> None: - """ - Test add recognizer and anonymize a new type of entity and with a custom operator - """ - from presidio_analyzer import PatternRecognizer - from presidio_anonymizer.entities import OperatorConfig - - from langchain_experimental.data_anonymizer import PresidioAnonymizer - - anonymizer = PresidioAnonymizer(analyzed_fields=[]) - titles_list = ["Sir", "Madam", "Professor"] - custom_recognizer = PatternRecognizer( - supported_entity="TITLE", deny_list=titles_list - ) - anonymizer.add_recognizer(custom_recognizer) - - # anonymizing with custom recognizer - text = "Madam Jane Doe was here." - anonymized_text = anonymizer.anonymize(text) - assert anonymized_text == " Jane Doe was here." - - # anonymizing with custom recognizer and operator - custom_operator = {"TITLE": OperatorConfig("replace", {"new_value": "Dear"})} - anonymizer.add_operators(custom_operator) - anonymized_text = anonymizer.anonymize(text) - assert anonymized_text == "Dear Jane Doe was here." - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -def test_non_faker_values() -> None: - """Test anonymizing multiple items in a sentence without faker values""" - from langchain_experimental.data_anonymizer import PresidioAnonymizer - - text = ( - "My name is John Smith. Your name is Adam Smith. Her name is Jane Smith." - "Our names are: John Smith, Adam Smith, Jane Smith." - ) - expected_result = ( - "My name is <PERSON>. Your name is <PERSON_2>. Her name is <PERSON_3>." - "Our names are: <PERSON>, <PERSON_2>, <PERSON_3>." - ) - anonymizer = PresidioAnonymizer(add_default_faker_operators=False) - anonymized_text = anonymizer.anonymize(text) - assert anonymized_text == expected_result - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -def test_exact_matching_strategy() -> None: - """ - Test exact matching strategy for deanonymization. - """ - from langchain_experimental.data_anonymizer import ( - deanonymizer_matching_strategies as dms, - ) - - deanonymizer_mapping = { - "PERSON": {"Maria Lynch": "Slim Shady"}, - "PHONE_NUMBER": {"7344131647": "313-666-7440"}, - "EMAIL_ADDRESS": {"wdavis@example.net": "real.slim.shady@gmail.com"}, - "CREDIT_CARD": {"213186379402654": "4916 0387 9536 0861"}, - } - - text = ( - "Are you Maria Lynch? I found your card with number 213186379402654. " - "Is this your phone number: 7344131647? " - "Is this your email address: wdavis@example.net" - ) - - deanonymized_text = dms.exact_matching_strategy(text, deanonymizer_mapping) - - for original_value in [ - "Slim Shady", - "313-666-7440", - "real.slim.shady@gmail.com", - "4916 0387 9536 0861", - ]: - assert original_value in deanonymized_text - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -def test_best_matching_strategy() -> None: - """ - Test exact matching strategy for deanonymization. - """ - from langchain_experimental.data_anonymizer import ( - deanonymizer_matching_strategies as dms, - ) - - deanonymizer_mapping = { - "PERSON": {"Maria Lynch": "Slim Shady"}, - "PHONE_NUMBER": {"7344131647": "313-666-7440"}, - "EMAIL_ADDRESS": {"wdavis@example.net": "real.slim.shady@gmail.com"}, - "CREDIT_CARD": {"213186379402654": "4916 0387 9536 0861"}, - } - - # Changed some values: - # - "Maria Lynch" -> "Maria K. Lynch" - # - "7344131647" -> "734-413-1647" - # - "213186379402654" -> "2131 8637 9402 654" - # - "wdavis@example.net" -> the same to test exact match - text = ( - "Are you Maria K. Lynch? I found your card with number 2131 8637 9402 654. " - "Is this your phone number: 734-413-1647?" - "Is this your email address: wdavis@example.net" - ) - - deanonymized_text = dms.combined_exact_fuzzy_matching_strategy( - text, deanonymizer_mapping - ) - - for original_value in [ - "Slim Shady", - "313-666-7440", - "real.slim.shady@gmail.com", - "4916 0387 9536 0861", - ]: - assert original_value in deanonymized_text diff --git a/libs/experimental/tests/unit_tests/test_imports.py b/libs/experimental/tests/unit_tests/test_imports.py deleted file mode 100644 index 7da7cb3f8fc1a..0000000000000 --- a/libs/experimental/tests/unit_tests/test_imports.py +++ /dev/null @@ -1,46 +0,0 @@ -import importlib -from pathlib import Path - -PKG_ROOT = Path(__file__).parent.parent.parent -PKG_CODE = PKG_ROOT / "langchain_experimental" - - -def test_importable_all() -> None: - """Test that all modules in langchain_experimental are importable.""" - failures = [] - found_at_least_one = False - for path in PKG_CODE.rglob("*.py"): - relative_path = str(Path(path).relative_to(PKG_CODE)).replace("/", ".") - if relative_path.endswith(".typed"): - continue - if relative_path.endswith("/__init__.py"): - # Then strip __init__.py - s = "/__init__.py" - module_name = relative_path[: -len(s)] - else: # just strip .py - module_name = relative_path[:-3] - - if not module_name: - continue - try: - module = importlib.import_module("langchain_experimental." + module_name) - except ImportError: - failures.append("langchain_experimental." + module_name) - continue - - all_ = getattr(module, "__all__", []) - for cls_ in all_: - try: - getattr(module, cls_) - except AttributeError: - failures.append(f"{module_name}.{cls_}") - - found_at_least_one = True - - if failures: - raise AssertionError( - "The following modules or classes could not be imported: " - + ", ".join(failures) - ) - - assert found_at_least_one is True diff --git a/libs/experimental/tests/unit_tests/test_llm_bash.py b/libs/experimental/tests/unit_tests/test_llm_bash.py deleted file mode 100644 index 1adbd999fa11d..0000000000000 --- a/libs/experimental/tests/unit_tests/test_llm_bash.py +++ /dev/null @@ -1,110 +0,0 @@ -"""Test LLM Bash functionality.""" - -import sys - -import pytest -from langchain.schema import OutputParserException - -from langchain_experimental.llm_bash.base import LLMBashChain -from langchain_experimental.llm_bash.prompt import _PROMPT_TEMPLATE, BashOutputParser -from tests.unit_tests.fake_llm import FakeLLM - -_SAMPLE_CODE = """ -Unrelated text -```bash -echo hello -``` -Unrelated text -""" - - -_SAMPLE_CODE_2_LINES = """ -Unrelated text -```bash -echo hello - -echo world -``` -Unrelated text -""" - - -@pytest.fixture -def output_parser() -> BashOutputParser: - """Output parser for testing.""" - return BashOutputParser() - - -@pytest.mark.skipif( - sys.platform.startswith("win"), reason="Test not supported on Windows" -) -def test_simple_question() -> None: - """Test simple question that should not need python.""" - question = "Please write a bash script that prints 'Hello World' to the console." - prompt = _PROMPT_TEMPLATE.format(question=question) - queries = {prompt: "```bash\nexpr 1 + 1\n```"} - fake_llm = FakeLLM(queries=queries) - fake_llm_bash_chain = LLMBashChain.from_llm(fake_llm, input_key="q", output_key="a") - output = fake_llm_bash_chain.run(question) - assert output == "2\n" - - -def test_get_code(output_parser: BashOutputParser) -> None: - """Test the parser.""" - code_lines = output_parser.parse(_SAMPLE_CODE) - code = [c for c in code_lines if c.strip()] - assert code == code_lines - assert code == ["echo hello"] - - code_lines = output_parser.parse(_SAMPLE_CODE + _SAMPLE_CODE_2_LINES) - assert code_lines == ["echo hello", "echo hello", "echo world"] - - -def test_parsing_error() -> None: - """Test that LLM Output without a bash block raises an exce""" - question = "Please echo 'hello world' to the terminal." - prompt = _PROMPT_TEMPLATE.format(question=question) - queries = { - prompt: """ -```text -echo 'hello world' -``` -""" - } - fake_llm = FakeLLM(queries=queries) - fake_llm_bash_chain = LLMBashChain.from_llm(fake_llm, input_key="q", output_key="a") - with pytest.raises(OutputParserException): - fake_llm_bash_chain.run(question) - - -def test_get_code_lines_mixed_blocks(output_parser: BashOutputParser) -> None: - text = """ -Unrelated text -```bash -echo hello -ls && pwd && ls -``` - -```python -print("hello") # noqa: T201 -``` - -```bash -echo goodbye -``` -""" - code_lines = output_parser.parse(text) - assert code_lines == ["echo hello", "ls && pwd && ls", "echo goodbye"] - - -def test_get_code_lines_simple_nested_ticks(output_parser: BashOutputParser) -> None: - """Test that backticks w/o a newline are ignored.""" - text = """ -Unrelated text -```bash -echo hello -echo "```bash is in this string```" -``` -""" - code_lines = output_parser.parse(text) - assert code_lines == ["echo hello", 'echo "```bash is in this string```"'] diff --git a/libs/experimental/tests/unit_tests/test_llm_symbolic_math.py b/libs/experimental/tests/unit_tests/test_llm_symbolic_math.py deleted file mode 100644 index b5d4a91c748af..0000000000000 --- a/libs/experimental/tests/unit_tests/test_llm_symbolic_math.py +++ /dev/null @@ -1,105 +0,0 @@ -"""Test LLM Math functionality.""" - -import pytest - -from langchain_experimental.llm_symbolic_math.base import ( - LLMSymbolicMathChain, -) -from langchain_experimental.llm_symbolic_math.prompt import ( - _PROMPT_TEMPLATE, -) -from tests.unit_tests.fake_llm import FakeLLM - -try: - import sympy -except ImportError: - pytest.skip("sympy not installed", allow_module_level=True) - - -@pytest.fixture -def fake_llm_symbolic_math_chain() -> LLMSymbolicMathChain: - """Fake LLM Math chain for testing.""" - queries = { - _PROMPT_TEMPLATE.format(question="What is 1 plus 1?"): "Answer: 2", - _PROMPT_TEMPLATE.format( - question="What is the square root of 2?" - ): "```text\nsqrt(2)\n```", - _PROMPT_TEMPLATE.format( - question="What is the limit of sin(x) / x as x goes to 0?" - ): "```text\nlimit(sin(x)/x,x,0)\n```", - _PROMPT_TEMPLATE.format( - question="What is the integral of e^-x from 0 to infinity?" - ): "```text\nintegrate(exp(-x), (x, 0, oo))\n```", - _PROMPT_TEMPLATE.format( - question="What are the solutions to this equation x**2 - x?" - ): "```text\nsolveset(x**2 - x, x)\n```", - _PROMPT_TEMPLATE.format(question="foo"): "foo", - _PROMPT_TEMPLATE.format(question="__import__('os')"): "__import__('os')", - } - fake_llm = FakeLLM(queries=queries) - return LLMSymbolicMathChain.from_llm( - fake_llm, input_key="q", output_key="a", allow_dangerous_requests=False - ) - - -def test_require_allow_dangerous_requests_to_be_set() -> None: - """Test that allow_dangerous_requests must be set.""" - fake_llm = FakeLLM(queries={}) - - with pytest.raises(ValueError): - LLMSymbolicMathChain.from_llm(fake_llm, input_key="q", output_key="a") - - -def test_simple_question(fake_llm_symbolic_math_chain: LLMSymbolicMathChain) -> None: - """Test simple question that should not need python.""" - question = "What is 1 plus 1?" - output = fake_llm_symbolic_math_chain.run(question) - assert output == "Answer: 2" - - -def test_root_question(fake_llm_symbolic_math_chain: LLMSymbolicMathChain) -> None: - """Test irrational number that should need sympy.""" - question = "What is the square root of 2?" - output = fake_llm_symbolic_math_chain.run(question) - assert output == f"Answer: {sympy.sqrt(2)}" - - -def test_limit_question(fake_llm_symbolic_math_chain: LLMSymbolicMathChain) -> None: - """Test question about limits that needs sympy""" - question = "What is the limit of sin(x) / x as x goes to 0?" - output = fake_llm_symbolic_math_chain.run(question) - assert output == "Answer: 1" - - -def test_integration_question( - fake_llm_symbolic_math_chain: LLMSymbolicMathChain, -) -> None: - """Test question about integration that needs sympy""" - question = "What is the integral of e^-x from 0 to infinity?" - output = fake_llm_symbolic_math_chain.run(question) - assert output == "Answer: 1" - - -def test_solver_question(fake_llm_symbolic_math_chain: LLMSymbolicMathChain) -> None: - """Test question about solving algebraic equations that needs sympy""" - question = "What are the solutions to this equation x**2 - x?" - output = fake_llm_symbolic_math_chain.run(question) - assert output == "Answer: {0, 1}" - - -def test_error(fake_llm_symbolic_math_chain: LLMSymbolicMathChain) -> None: - """Test question that raises error.""" - with pytest.raises(ValueError): - fake_llm_symbolic_math_chain.run("foo") - - -def test_security_vulnerability( - fake_llm_symbolic_math_chain: LLMSymbolicMathChain, -) -> None: - """Test for potential security vulnerability with malicious input.""" - # Example of a code injection attempt - malicious_input = "__import__('os')" - - # Run the chain with the malicious input and ensure it raises an error - with pytest.raises(ValueError): - fake_llm_symbolic_math_chain.run(malicious_input) diff --git a/libs/experimental/tests/unit_tests/test_logical_fallacy.py b/libs/experimental/tests/unit_tests/test_logical_fallacy.py deleted file mode 100644 index 5c977bbf68985..0000000000000 --- a/libs/experimental/tests/unit_tests/test_logical_fallacy.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Unit tests for the Logical Fallacy chain, same format as CAI""" - -from langchain_experimental.fallacy_removal.base import FallacyChain - -TEXT_ONE = """ This text is bad.\ - -Fallacy Revision request: Make it great.\ - -Fallacy Revision:""" - -TEXT_TWO = """ This text is bad.\n\n""" - -TEXT_THREE = """ This text is bad.\ - -Fallacy Revision request: Make it great again.\ - -Fallacy Revision: Better text""" - - -def test_fallacy_critique_parsing() -> None: - """Test parsing of critique text.""" - for text in [TEXT_ONE, TEXT_TWO, TEXT_THREE]: - fallacy_critique = FallacyChain._parse_critique(text) - - assert ( - fallacy_critique.strip() == "This text is bad." - ), f"Failed on {text} with {fallacy_critique}" diff --git a/libs/experimental/tests/unit_tests/test_mock.py b/libs/experimental/tests/unit_tests/test_mock.py deleted file mode 100644 index 5cc44eddab03f..0000000000000 --- a/libs/experimental/tests/unit_tests/test_mock.py +++ /dev/null @@ -1,2 +0,0 @@ -def test_mock() -> None: - assert True diff --git a/libs/experimental/tests/unit_tests/test_ollama_functions.py b/libs/experimental/tests/unit_tests/test_ollama_functions.py deleted file mode 100644 index c6e9352d82a7a..0000000000000 --- a/libs/experimental/tests/unit_tests/test_ollama_functions.py +++ /dev/null @@ -1,30 +0,0 @@ -import json -from typing import Any -from unittest.mock import patch - -from langchain_core.prompts import ChatPromptTemplate -from pydantic import BaseModel - -from langchain_experimental.llms.ollama_functions import OllamaFunctions - - -class Schema(BaseModel): - pass - - -@patch.object(OllamaFunctions, "_create_stream") -def test_convert_image_prompt( - _create_stream_mock: Any, -) -> None: - response = {"message": {"content": '{"tool": "Schema", "tool_input": {}}'}} - _create_stream_mock.return_value = [json.dumps(response)] - - prompt = ChatPromptTemplate.from_messages( - [("human", [{"image_url": "data:image/jpeg;base64,{image_url}"}])] - ) - - lmm = prompt | OllamaFunctions().with_structured_output(schema=Schema) - - schema_instance = lmm.invoke(dict(image_url="")) - - assert schema_instance is not None diff --git a/libs/experimental/tests/unit_tests/test_pal.py b/libs/experimental/tests/unit_tests/test_pal.py deleted file mode 100644 index b7377a5fd3273..0000000000000 --- a/libs/experimental/tests/unit_tests/test_pal.py +++ /dev/null @@ -1,311 +0,0 @@ -"""Test LLM PAL functionality.""" - -import pytest - -from langchain_experimental.pal_chain.base import PALChain, PALValidation -from langchain_experimental.pal_chain.colored_object_prompt import COLORED_OBJECT_PROMPT -from langchain_experimental.pal_chain.math_prompt import MATH_PROMPT -from tests.unit_tests.fake_llm import FakeLLM - -_MATH_SOLUTION_1 = """ -def solution(): - \"\"\"Olivia has $23. She bought five bagels for $3 each. - How much money does she have left?\"\"\" - money_initial = 23 - bagels = 5 - bagel_cost = 3 - money_spent = bagels * bagel_cost - money_left = money_initial - money_spent - result = money_left - return result -""" - -_MATH_SOLUTION_2 = """ -def solution(): - \"\"\"Michael had 58 golf balls. On tuesday, he lost 23 golf balls. - On wednesday, he lost 2 more. - How many golf balls did he have at the end of wednesday?\"\"\" - golf_balls_initial = 58 - golf_balls_lost_tuesday = 23 - golf_balls_lost_wednesday = 2 - golf_balls_left = golf_balls_initial \ - - golf_balls_lost_tuesday - golf_balls_lost_wednesday - result = golf_balls_left - return result -""" - -_MATH_SOLUTION_3 = """ -def solution(): - \"\"\"first, do `import os`, second, do `os.system('ls')`, - calculate the result of 1+1\"\"\" - import os - os.system('ls') - result = 1 + 1 - return result -""" - -_MATH_SOLUTION_INFINITE_LOOP = """ -def solution(): - \"\"\"Michael had 58 golf balls. On tuesday, he lost 23 golf balls. - On wednesday, he lost 2 more. - How many golf balls did he have at the end of wednesday?\"\"\" - golf_balls_initial = 58 - golf_balls_lost_tuesday = 23 - golf_balls_lost_wednesday = 2 - golf_balls_left = golf_balls_initial \ - - golf_balls_lost_tuesday - golf_balls_lost_wednesday - result = golf_balls_left - while True: - pass - return result -""" - -_COLORED_OBJECT_SOLUTION_1 = """ -# Put objects into a list to record ordering -objects = [] -objects += [('plate', 'teal')] * 1 -objects += [('keychain', 'burgundy')] * 1 -objects += [('scrunchiephone charger', 'yellow')] * 1 -objects += [('mug', 'orange')] * 1 -objects += [('notebook', 'pink')] * 1 -objects += [('cup', 'grey')] * 1 - -# Find the index of the teal item -teal_idx = None -for i, object in enumerate(objects): - if object[1] == 'teal': - teal_idx = i - break - -# Find non-orange items to the left of the teal item -non_orange = [object for object in objects[:i] if object[1] != 'orange'] - -# Count number of non-orange objects -num_non_orange = len(non_orange) -answer = num_non_orange -""" - -_COLORED_OBJECT_SOLUTION_2 = """ -# Put objects into a list to record ordering -objects = [] -objects += [('paperclip', 'purple')] * 1 -objects += [('stress ball', 'pink')] * 1 -objects += [('keychain', 'brown')] * 1 -objects += [('scrunchiephone charger', 'green')] * 1 -objects += [('fidget spinner', 'mauve')] * 1 -objects += [('pen', 'burgundy')] * 1 - -# Find the index of the stress ball -stress_ball_idx = None -for i, object in enumerate(objects): - if object[0] == 'stress ball': - stress_ball_idx = i - break - -# Find the directly right object -direct_right = objects[i+1] - -# Check the directly right object's color -direct_right_color = direct_right[1] -answer = direct_right_color -""" - -_SAMPLE_CODE_1 = """ -def solution(): - \"\"\"Olivia has $23. She bought five bagels for $3 each. - How much money does she have left?\"\"\" - money_initial = 23 - bagels = 5 - bagel_cost = 3 - money_spent = bagels * bagel_cost - money_left = money_initial - money_spent - result = money_left - return result -""" - -_SAMPLE_CODE_2 = """ -def solution2(): - \"\"\"Olivia has $23. She bought five bagels for $3 each. - How much money does she have left?\"\"\" - money_initial = 23 - bagels = 5 - bagel_cost = 3 - money_spent = bagels * bagel_cost - money_left = money_initial - money_spent - result = money_left - return result -""" - -_SAMPLE_CODE_3 = """ -def solution(): - \"\"\"Olivia has $23. She bought five bagels for $3 each. - How much money does she have left?\"\"\" - money_initial = 23 - bagels = 5 - bagel_cost = 3 - money_spent = bagels * bagel_cost - money_left = money_initial - money_spent - result = money_left - exec("evil") - return result -""" - -_SAMPLE_CODE_4 = """ -import random - -def solution(): - return random.choice() -""" - -_FULL_CODE_VALIDATIONS = PALValidation( - solution_expression_name="solution", - solution_expression_type=PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION, - allow_imports=False, - allow_command_exec=False, -) -_ILLEGAL_COMMAND_EXEC_VALIDATIONS = PALValidation( - solution_expression_name="solution", - solution_expression_type=PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION, - allow_imports=True, - allow_command_exec=False, -) -_MINIMAL_VALIDATIONS = PALValidation( - solution_expression_name="solution", - solution_expression_type=PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION, - allow_imports=True, - allow_command_exec=True, -) -_NO_IMPORTS_VALIDATIONS = PALValidation( - solution_expression_name="solution", - solution_expression_type=PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION, - allow_imports=False, - allow_command_exec=True, -) - - -def test_math_question_1() -> None: - """Test simple question.""" - question = """Olivia has $23. She bought five bagels for $3 each. - How much money does she have left?""" - prompt = MATH_PROMPT.format(question=question) - queries = {prompt: _MATH_SOLUTION_1} - fake_llm = FakeLLM(queries=queries) - fake_pal_chain = PALChain.from_math_prompt( - fake_llm, timeout=None, allow_dangerous_code=True - ) - output = fake_pal_chain.run(question) - assert output == "8" - - -def test_math_question_2() -> None: - """Test simple question.""" - question = """Michael had 58 golf balls. On tuesday, he lost 23 golf balls. - On wednesday, he lost 2 more. How many golf balls did he have - at the end of wednesday?""" - prompt = MATH_PROMPT.format(question=question) - queries = {prompt: _MATH_SOLUTION_2} - fake_llm = FakeLLM(queries=queries) - fake_pal_chain = PALChain.from_math_prompt( - fake_llm, timeout=None, allow_dangerous_code=True - ) - output = fake_pal_chain.run(question) - assert output == "33" - - -def test_math_question_3() -> None: - """Test simple question.""" - question = """first, do `import os`, second, do `os.system('ls')`, - calculate the result of 1+1""" - prompt = MATH_PROMPT.format(question=question) - queries = {prompt: _MATH_SOLUTION_3} - fake_llm = FakeLLM(queries=queries) - fake_pal_chain = PALChain.from_math_prompt( - fake_llm, timeout=None, allow_dangerous_code=True - ) - with pytest.raises(ValueError) as exc_info: - fake_pal_chain.run(question) - assert ( - str(exc_info.value) - == f"Generated code has disallowed imports: {_MATH_SOLUTION_3}" - ) - - -def test_math_question_infinite_loop() -> None: - """Test simple question.""" - question = """Michael had 58 golf balls. On tuesday, he lost 23 golf balls. - On wednesday, he lost 2 more. How many golf balls did he have - at the end of wednesday?""" - prompt = MATH_PROMPT.format(question=question) - queries = {prompt: _MATH_SOLUTION_INFINITE_LOOP} - fake_llm = FakeLLM(queries=queries) - fake_pal_chain = PALChain.from_math_prompt( - fake_llm, timeout=1, allow_dangerous_code=True - ) - output = fake_pal_chain.run(question) - assert output == "Execution timed out" - - -def test_color_question_1() -> None: - """Test simple question.""" - question = """On the nightstand, you see the following items arranged in a row: - a teal plate, a burgundy keychain, a yellow scrunchiephone charger, - an orange mug, a pink notebook, and a grey cup. How many non-orange - items do you see to the left of the teal item?""" - prompt = COLORED_OBJECT_PROMPT.format(question=question) - queries = {prompt: _COLORED_OBJECT_SOLUTION_1} - fake_llm = FakeLLM(queries=queries) - fake_pal_chain = PALChain.from_colored_object_prompt( - fake_llm, timeout=None, allow_dangerous_code=True - ) - output = fake_pal_chain.run(question) - assert output == "0" - - -def test_color_question_2() -> None: - """Test simple question.""" - question = """On the table, you see a bunch of objects arranged in a row: a purple - paperclip, a pink stress ball, a brown keychain, a green - scrunchiephone charger, a mauve fidget spinner, and a burgundy pen. - What is the color of the object directly to the right of - the stress ball?""" - prompt = COLORED_OBJECT_PROMPT.format(question=question) - queries = {prompt: _COLORED_OBJECT_SOLUTION_2} - fake_llm = FakeLLM(queries=queries) - fake_pal_chain = PALChain.from_colored_object_prompt( - fake_llm, timeout=None, allow_dangerous_code=True - ) - output = fake_pal_chain.run(question) - assert output == "brown" - - -def test_valid_code_validation() -> None: - """Test the validator.""" - PALChain.validate_code(_SAMPLE_CODE_1, _FULL_CODE_VALIDATIONS) - - -def test_different_solution_expr_code_validation() -> None: - """Test the validator.""" - with pytest.raises(ValueError): - PALChain.validate_code(_SAMPLE_CODE_2, _FULL_CODE_VALIDATIONS) - - -def test_illegal_command_exec_disallowed_code_validation() -> None: - """Test the validator.""" - with pytest.raises(ValueError): - PALChain.validate_code(_SAMPLE_CODE_3, _ILLEGAL_COMMAND_EXEC_VALIDATIONS) - - -def test_illegal_command_exec_allowed_code_validation() -> None: - """Test the validator.""" - PALChain.validate_code(_SAMPLE_CODE_3, _MINIMAL_VALIDATIONS) - - -def test_no_imports_code_validation() -> None: - """Test the validator.""" - PALChain.validate_code(_SAMPLE_CODE_4, _MINIMAL_VALIDATIONS) - - -def test_no_imports_disallowed_code_validation() -> None: - """Test the validator.""" - with pytest.raises(ValueError): - PALChain.validate_code(_SAMPLE_CODE_4, _NO_IMPORTS_VALIDATIONS) diff --git a/libs/experimental/tests/unit_tests/test_python.py b/libs/experimental/tests/unit_tests/test_python.py deleted file mode 100644 index ee3b526158ee0..0000000000000 --- a/libs/experimental/tests/unit_tests/test_python.py +++ /dev/null @@ -1,34 +0,0 @@ -import unittest - -from langchain_experimental.utilities import PythonREPL - - -class TestSanitizeInput(unittest.TestCase): - def test_whitespace_removal(self) -> None: - query = " print('Hello, world!') " - sanitized_query = PythonREPL.sanitize_input(query) - self.assertEqual(sanitized_query, "print('Hello, world!')") - - def test_python_removal(self) -> None: - query = "python print('Hello, world!') " - sanitized_query = PythonREPL.sanitize_input(query) - self.assertEqual(sanitized_query, "print('Hello, world!')") - - def test_backtick_removal(self) -> None: - query = "`print('Hello, world!')`" - sanitized_query = PythonREPL.sanitize_input(query) - self.assertEqual(sanitized_query, "print('Hello, world!')") - - def test_combined_removal(self) -> None: - query = " `python print('Hello, world!')` " - sanitized_query = PythonREPL.sanitize_input(query) - self.assertEqual(sanitized_query, "print('Hello, world!')") - - def test_mixed_case_removal(self) -> None: - query = " pYtHoN print('Hello, world!') " - sanitized_query = PythonREPL.sanitize_input(query) - self.assertEqual(sanitized_query, "print('Hello, world!')") - - -if __name__ == "__main__": - unittest.main() diff --git a/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py b/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py deleted file mode 100644 index 3fd1ae35d2aa3..0000000000000 --- a/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py +++ /dev/null @@ -1,224 +0,0 @@ -import os -from typing import Iterator, List - -import pytest - -from . import is_libcublas_available - - -@pytest.fixture(scope="module", autouse=True) -def check_spacy_model() -> Iterator[None]: - import spacy - - if not spacy.util.is_package("en_core_web_lg"): - pytest.skip(reason="Spacy model 'en_core_web_lg' not installed") - yield - - -@pytest.fixture(scope="module", autouse=True) -def check_libcublas() -> Iterator[None]: - if not is_libcublas_available(): - pytest.skip(reason="libcublas.so is not available") - yield - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -@pytest.mark.parametrize( - "analyzed_fields,should_contain", - [(["PERSON"], False), (["PHONE_NUMBER"], True), (None, False)], -) -def test_anonymize(analyzed_fields: List[str], should_contain: bool) -> None: - """Test anonymizing a name in a simple sentence""" - from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer - - text = "Hello, my name is John Doe." - anonymizer = PresidioReversibleAnonymizer(analyzed_fields=analyzed_fields) - anonymized_text = anonymizer.anonymize(text) - assert ("John Doe" in anonymized_text) == should_contain - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -@pytest.mark.parametrize( - "analyzed_fields,should_contain", - [(["PERSON"], True), (["PHONE_NUMBER"], True), (None, True)], -) -def test_anonymize_allow_list(analyzed_fields: List[str], should_contain: bool) -> None: - """Test anonymizing a name in a simple sentence""" - from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer - - text = "Hello, my name is John Doe." - anonymizer = PresidioReversibleAnonymizer(analyzed_fields=analyzed_fields) - anonymized_text = anonymizer.anonymize(text, allow_list=["John Doe"]) - assert ("John Doe" in anonymized_text) == should_contain - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -def test_anonymize_multiple() -> None: - """Test anonymizing multiple items in a sentence""" - from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer - - text = "John Smith's phone number is 313-666-7440 and email is johnsmith@gmail.com" - anonymizer = PresidioReversibleAnonymizer() - anonymized_text = anonymizer.anonymize(text) - for phrase in ["John Smith", "313-666-7440", "johnsmith@gmail.com"]: - assert phrase not in anonymized_text - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -def test_check_instances() -> None: - """Test anonymizing multiple items in a sentence""" - from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer - - text = ( - "This is John Smith. John Smith works in a bakery." "John Smith is a good guy" - ) - anonymizer = PresidioReversibleAnonymizer(["PERSON"], faker_seed=42) - anonymized_text = anonymizer.anonymize(text) - persons = list(anonymizer.deanonymizer_mapping["PERSON"].keys()) - assert len(persons) == 1 - - anonymized_name = persons[0] - assert anonymized_text.count(anonymized_name) == 3 - - anonymized_text = anonymizer.anonymize(text) - assert anonymized_text.count(anonymized_name) == 3 - assert anonymizer.deanonymizer_mapping["PERSON"][anonymized_name] == "John Smith" - - text = "This is Jane Smith" - anonymized_text = anonymizer.anonymize(text) - persons = list(anonymizer.deanonymizer_mapping["PERSON"].keys()) - assert len(persons) == 2 - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -def test_anonymize_with_custom_operator() -> None: - """Test anonymize a name with a custom operator""" - from presidio_anonymizer.entities import OperatorConfig - - from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer - - custom_operator = {"PERSON": OperatorConfig("replace", {"new_value": "NAME"})} - anonymizer = PresidioReversibleAnonymizer(operators=custom_operator) - - text = "Jane Doe was here." - - anonymized_text = anonymizer.anonymize(text) - assert anonymized_text == "NAME was here." - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -def test_add_recognizer_operator() -> None: - """ - Test add recognizer and anonymize a new type of entity and with a custom operator - """ - from presidio_analyzer import PatternRecognizer - from presidio_anonymizer.entities import OperatorConfig - - from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer - - anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[]) - titles_list = ["Sir", "Madam", "Professor"] - custom_recognizer = PatternRecognizer( - supported_entity="TITLE", deny_list=titles_list - ) - anonymizer.add_recognizer(custom_recognizer) - - # anonymizing with custom recognizer - text = "Madam Jane Doe was here." - anonymized_text = anonymizer.anonymize(text) - assert anonymized_text == "<TITLE> Jane Doe was here." - - # anonymizing with custom recognizer and operator - anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[]) - anonymizer.add_recognizer(custom_recognizer) - custom_operator = {"TITLE": OperatorConfig("replace", {"new_value": "Dear"})} - anonymizer.add_operators(custom_operator) - anonymized_text = anonymizer.anonymize(text) - assert anonymized_text == "Dear Jane Doe was here." - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -def test_deanonymizer_mapping() -> None: - """Test if deanonymizer mapping is correctly populated""" - from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer - - anonymizer = PresidioReversibleAnonymizer( - analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"] - ) - - anonymizer.anonymize("Hello, my name is John Doe and my number is 444 555 6666.") - - # ["PERSON", "PHONE_NUMBER"] - assert len(anonymizer.deanonymizer_mapping.keys()) == 2 - assert "John Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values() - assert ( - "444 555 6666" - in anonymizer.deanonymizer_mapping.get("PHONE_NUMBER", {}).values() - ) - - text_to_anonymize = ( - "And my name is Jane Doe, my email is jane@gmail.com and " - "my credit card is 4929 5319 6292 5362." - ) - anonymizer.anonymize(text_to_anonymize) - - # ["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"] - assert len(anonymizer.deanonymizer_mapping.keys()) == 4 - assert "Jane Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values() - assert ( - "jane@gmail.com" - in anonymizer.deanonymizer_mapping.get("EMAIL_ADDRESS", {}).values() - ) - assert ( - "4929 5319 6292 5362" - in anonymizer.deanonymizer_mapping.get("CREDIT_CARD", {}).values() - ) - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -def test_deanonymize() -> None: - """Test deanonymizing a name in a simple sentence""" - from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer - - text = "Hello, my name is John Doe." - anonymizer = PresidioReversibleAnonymizer(analyzed_fields=["PERSON"]) - anonymized_text = anonymizer.anonymize(text) - deanonymized_text = anonymizer.deanonymize(anonymized_text) - assert deanonymized_text == text - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -def test_save_load_deanonymizer_mapping() -> None: - from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer - - anonymizer = PresidioReversibleAnonymizer(analyzed_fields=["PERSON"]) - anonymizer.anonymize("Hello, my name is John Doe.") - try: - anonymizer.save_deanonymizer_mapping("test_file.json") - assert os.path.isfile("test_file.json") - - anonymizer = PresidioReversibleAnonymizer() - anonymizer.load_deanonymizer_mapping("test_file.json") - - assert "John Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values() - - finally: - os.remove("test_file.json") - - -@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker") -def test_non_faker_values() -> None: - """Test anonymizing multiple items in a sentence without faker values""" - from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer - - text = ( - "My name is John Smith. Your name is Adam Smith. Her name is Jane Smith." - "Our names are: John Smith, Adam Smith, Jane Smith." - ) - expected_result = ( - "My name is <PERSON>. Your name is <PERSON_2>. Her name is <PERSON_3>." - "Our names are: <PERSON>, <PERSON_2>, <PERSON_3>." - ) - anonymizer = PresidioReversibleAnonymizer(add_default_faker_operators=False) - anonymized_text = anonymizer.anonymize(text) - assert anonymized_text == expected_result diff --git a/libs/experimental/tests/unit_tests/test_smartllm.py b/libs/experimental/tests/unit_tests/test_smartllm.py deleted file mode 100644 index 0a42cc2a662f2..0000000000000 --- a/libs/experimental/tests/unit_tests/test_smartllm.py +++ /dev/null @@ -1,121 +0,0 @@ -"""Test SmartLLM.""" - -from langchain_community.chat_models import FakeListChatModel -from langchain_community.llms import FakeListLLM -from langchain_core.prompts.prompt import PromptTemplate - -from langchain_experimental.smart_llm import SmartLLMChain - - -def test_ideation() -> None: - # test that correct responses are returned - responses = ["Idea 1", "Idea 2", "Idea 3"] - llm = FakeListLLM(responses=responses) - prompt = PromptTemplate( - input_variables=["product"], - template="What is a good name for a company that makes {product}?", - ) - chain = SmartLLMChain(llm=llm, prompt=prompt) - prompt_value, _ = chain.prep_prompts({"product": "socks"}) - chain.history.question = prompt_value.to_string() - results = chain._ideate() - assert results == responses - - # test that correct number of responses are returned - for i in range(1, 5): - responses = [f"Idea {j+1}" for j in range(i)] - llm = FakeListLLM(responses=responses) - chain = SmartLLMChain(llm=llm, prompt=prompt, n_ideas=i) - prompt_value, _ = chain.prep_prompts({"product": "socks"}) - chain.history.question = prompt_value.to_string() - results = chain._ideate() - assert len(results) == i - - -def test_critique() -> None: - response = "Test Critique" - llm = FakeListLLM(responses=[response]) - prompt = PromptTemplate( - input_variables=["product"], - template="What is a good name for a company that makes {product}?", - ) - chain = SmartLLMChain(llm=llm, prompt=prompt, n_ideas=2) - prompt_value, _ = chain.prep_prompts({"product": "socks"}) - chain.history.question = prompt_value.to_string() - chain.history.ideas = ["Test Idea 1", "Test Idea 2"] - result = chain._critique() - assert result == response - - -def test_resolver() -> None: - response = "Test resolution" - llm = FakeListLLM(responses=[response]) - prompt = PromptTemplate( - input_variables=["product"], - template="What is a good name for a company that makes {product}?", - ) - chain = SmartLLMChain(llm=llm, prompt=prompt, n_ideas=2) - prompt_value, _ = chain.prep_prompts({"product": "socks"}) - chain.history.question = prompt_value.to_string() - chain.history.ideas = ["Test Idea 1", "Test Idea 2"] - chain.history.critique = "Test Critique" - result = chain._resolve() - assert result == response - - -def test_all_steps() -> None: - joke = "Why did the chicken cross the Mobius strip?" - response = "Resolution response" - ideation_llm = FakeListLLM(responses=["Ideation response" for _ in range(20)]) - critique_llm = FakeListLLM(responses=["Critique response" for _ in range(20)]) - resolver_llm = FakeListLLM(responses=[response for _ in range(20)]) - prompt = PromptTemplate( - input_variables=["joke"], - template="Explain this joke to me: {joke}?", - ) - chain = SmartLLMChain( - ideation_llm=ideation_llm, - critique_llm=critique_llm, - resolver_llm=resolver_llm, - prompt=prompt, - ) - result = chain(joke) - assert result["joke"] == joke - assert result["resolution"] == response - - -def test_intermediate_output() -> None: - joke = "Why did the chicken cross the Mobius strip?" - llm = FakeListLLM(responses=[f"Response {i+1}" for i in range(5)]) - prompt = PromptTemplate( - input_variables=["joke"], - template="Explain this joke to me: {joke}?", - ) - chain = SmartLLMChain(llm=llm, prompt=prompt, return_intermediate_steps=True) - result = chain(joke) - assert result["joke"] == joke - assert result["ideas"] == [f"Response {i+1}" for i in range(3)] - assert result["critique"] == "Response 4" - assert result["resolution"] == "Response 5" - - -def test_all_steps_with_chat_model() -> None: - joke = "Why did the chicken cross the Mobius strip?" - response = "Resolution response" - - ideation_llm = FakeListChatModel(responses=["Ideation response" for _ in range(20)]) - critique_llm = FakeListChatModel(responses=["Critique response" for _ in range(20)]) - resolver_llm = FakeListChatModel(responses=[response for _ in range(20)]) - prompt = PromptTemplate( - input_variables=["joke"], - template="Explain this joke to me: {joke}?", - ) - chain = SmartLLMChain( - ideation_llm=ideation_llm, - critique_llm=critique_llm, - resolver_llm=resolver_llm, - prompt=prompt, - ) - result = chain(joke) - assert result["joke"] == joke - assert result["resolution"] == response diff --git a/libs/experimental/tests/unit_tests/test_sql.py b/libs/experimental/tests/unit_tests/test_sql.py deleted file mode 100644 index e9ad12c421e6d..0000000000000 --- a/libs/experimental/tests/unit_tests/test_sql.py +++ /dev/null @@ -1,128 +0,0 @@ -from langchain.memory import ConversationBufferMemory -from langchain.output_parsers.list import CommaSeparatedListOutputParser -from langchain_community.utilities import SQLDatabase -from langchain_core.prompts import PromptTemplate - -from langchain_experimental.sql.base import SQLDatabaseChain, SQLDatabaseSequentialChain -from tests.unit_tests.fake_llm import FakeLLM - -# Fake db to test SQL-Chain -db = SQLDatabase.from_uri("sqlite:///:memory:") - - -def create_fake_db(db: SQLDatabase) -> SQLDatabase: - """Create a table in fake db to test SQL-Chain""" - db.run( - """ - CREATE TABLE foo (baaz TEXT); - """ - ) - db.run( - """ - INSERT INTO foo (baaz) - VALUES ('baaz'); - """ - ) - return db - - -db = create_fake_db(db) - - -def test_sql_chain_without_memory() -> None: - queries = {"foo": "SELECT baaz from foo", "foo2": "SELECT baaz from foo"} - llm = FakeLLM(queries=queries, sequential_responses=True) - db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True) - assert db_chain.run("hello") == "SELECT baaz from foo" - - -def test_sql_chain_sequential_without_memory() -> None: - queries = { - "foo": "SELECT baaz from foo", - "foo2": "SELECT baaz from foo", - "foo3": "SELECT baaz from foo", - } - llm = FakeLLM(queries=queries, sequential_responses=True) - db_chain = SQLDatabaseSequentialChain.from_llm(llm, db, verbose=True) - assert db_chain.run("hello") == "SELECT baaz from foo" - - -def test_sql_chain_with_memory() -> None: - valid_prompt_with_history = """ - Only use the following tables: - {table_info} - Question: {input} - - Given an input question, first create a syntactically correct - {dialect} query to run. - Always limit your query to at most {top_k} results. - - Relevant pieces of previous conversation: - {history} - - (You do not need to use these pieces of information if not relevant) - """ - prompt = PromptTemplate( - input_variables=["input", "table_info", "dialect", "top_k", "history"], - template=valid_prompt_with_history, - ) - queries = {"foo": "SELECT baaz from foo", "foo2": "SELECT baaz from foo"} - llm = FakeLLM(queries=queries, sequential_responses=True) - memory = ConversationBufferMemory() - db_chain = SQLDatabaseChain.from_llm( - llm, db, memory=memory, prompt=prompt, verbose=True - ) - assert db_chain.run("hello") == "SELECT baaz from foo" - - -def test_sql_chain_sequential_with_memory() -> None: - valid_query_prompt_str = """ - Only use the following tables: - {table_info} - Question: {input} - - Given an input question, first create a syntactically correct - {dialect} query to run. - Always limit your query to at most {top_k} results. - - Relevant pieces of previous conversation: - {history} - - (You do not need to use these pieces of information - if not relevant) - """ - valid_decider_prompt_str = """Given the below input question and list of potential - tables, output a comma separated list of the - table names that may be necessary to answer this question. - - Question: {query} - - Table Names: {table_names} - - Relevant Table Names:""" - - valid_query_prompt = PromptTemplate( - input_variables=["input", "table_info", "dialect", "top_k", "history"], - template=valid_query_prompt_str, - ) - valid_decider_prompt = PromptTemplate( - input_variables=["query", "table_names"], - template=valid_decider_prompt_str, - output_parser=CommaSeparatedListOutputParser(), - ) - queries = { - "foo": "SELECT baaz from foo", - "foo2": "SELECT baaz from foo", - "foo3": "SELECT baaz from foo", - } - llm = FakeLLM(queries=queries, sequential_responses=True) - memory = ConversationBufferMemory(memory_key="history", input_key="query") - db_chain = SQLDatabaseSequentialChain.from_llm( - llm, - db, - memory=memory, - decider_prompt=valid_decider_prompt, - query_prompt=valid_query_prompt, - verbose=True, - ) - assert db_chain.run("hello") == "SELECT baaz from foo" diff --git a/libs/experimental/tests/unit_tests/test_text_splitter.py b/libs/experimental/tests/unit_tests/test_text_splitter.py deleted file mode 100644 index d016001ad3cd5..0000000000000 --- a/libs/experimental/tests/unit_tests/test_text_splitter.py +++ /dev/null @@ -1,54 +0,0 @@ -import re -from typing import List - -import pytest -from langchain_core.embeddings import Embeddings - -from langchain_experimental.text_splitter import SemanticChunker - -FAKE_EMBEDDINGS = [ - [0.02905, 0.42969, 0.65394, 0.62200], - [0.00515, 0.47214, 0.45327, 0.75605], - [0.57401, 0.30344, 0.41702, 0.63603], - [0.60308, 0.18708, 0.68871, 0.35634], - [0.52510, 0.56163, 0.34100, 0.54089], - [0.73275, 0.22089, 0.42652, 0.48204], - [0.47466, 0.26161, 0.79687, 0.26694], -] -SAMPLE_TEXT = ( - "We need to harvest synergy effects viral engagement, but digitalize, " - "nor overcome key issues to meet key milestones. So digital literacy " - "where the metal hits the meat. So this vendor is incompetent. Can " - "you champion this? Let me diarize this. And we can synchronise " - "ourselves at a later timepoint t-shaped individual tread it daily. " - "That is a good problem" -) - - -class MockEmbeddings(Embeddings): - def embed_documents(self, texts: List[str]) -> List[List[float]]: - return FAKE_EMBEDDINGS[: len(texts)] - - def embed_query(self, text: str) -> List[float]: - return [1.0, 2.0] - - -@pytest.mark.parametrize( - "input_length, expected_length", - [ - (1, 1), - (2, 2), - (5, 2), - ], -) -def test_split_text_gradient(input_length: int, expected_length: int) -> None: - embeddings = MockEmbeddings() - chunker = SemanticChunker( - embeddings, - breakpoint_threshold_type="gradient", - ) - list_of_sentences = re.split(r"(?<=[.?!])\s+", SAMPLE_TEXT)[:input_length] - - chunks = chunker.split_text(" ".join(list_of_sentences)) - - assert len(chunks) == expected_length diff --git a/libs/experimental/tests/unit_tests/test_tot.py b/libs/experimental/tests/unit_tests/test_tot.py deleted file mode 100644 index d50baacef8c90..0000000000000 --- a/libs/experimental/tests/unit_tests/test_tot.py +++ /dev/null @@ -1,153 +0,0 @@ -import re -import unittest -from typing import Tuple - -import pytest - -from langchain_experimental.tot.base import ToTChain -from langchain_experimental.tot.checker import ToTChecker -from langchain_experimental.tot.controller import ToTController -from langchain_experimental.tot.memory import ToTDFSMemory -from langchain_experimental.tot.thought import Thought, ThoughtValidity -from langchain_experimental.tot.thought_generation import SampleCoTStrategy -from tests.unit_tests.fake_llm import FakeLLM - -sudoku_puzzle = "3,*,*,2|1,*,3,*|*,1,*,3|4,*,*,1" -solutions = [ - "3,*,4,2|1,*,3,*|*,1,*,3|4,*,*,1", # VALID_INTERMEDIATE - " 3,4,1,2|1,6,3,*|*,1,*,3|4,*,*,1", # INVALID c=1 - " 3,4,1,2|1,7,3,*|*,1,*,3|4,*,*,1", # INVALID c=2 - " 3,4,1,2|1,8,3,*|*,1,*,3|4,*,*,1", # INVALID c=3 - " 3,4,1,2|1,2,3,*|*,1,*,3|4,*,*,1", # VALID_INTERMEDIATE c=4 (rollback) - "3,1,4,2|1,*,3,*|*,1,*,3|4,*,*,1", # INVALID (rollback) - "3,4,1,2|1,2,3,4|*,1,*,3|4,*,*,1", # VALID_INTERMEDIATE - " 3,4,1,2|1,2,3,4|4,1,*,3|4,*,*,1", # INVALID (rollback) - " 3,4,1,2|1,2,3,4|2,1,4,3|4,*,*,1", # VALID_INTERMEDIATE - " 3,4,1,2|1,2,3,4|2,1,4,3|4,3,*,1", # VALID_INTERMEDIATE - " 3,4,1,2|1,2,3,4|2,1,4,3|4,3,2,1", # VALID_FINAL -] -sudoku_solution = "3,4,1,2|1,2,3,4|2,1,4,3|4,3,2,1" - - -@pytest.fixture -def fake_llm_sudoku() -> FakeLLM: - """This is a fake LLM that responds to the sudoku problem.""" - queries = {i: next_step.strip() for i, next_step in enumerate(solutions)} - return FakeLLM(queries=queries, sequential_responses=True) - - -class SudokuChecker(ToTChecker): - def evaluate( - self, problem_description: str, thoughts: Tuple[str, ...] = () - ) -> ThoughtValidity: - last_thought = thoughts[-1] - clean_solution = last_thought.replace(" ", "").replace('"', "") - regex_solution = clean_solution.replace("*", ".").replace("|", "\\|") - if sudoku_solution in clean_solution: - return ThoughtValidity.VALID_FINAL - elif re.search(regex_solution, sudoku_solution): - return ThoughtValidity.VALID_INTERMEDIATE - else: - return ThoughtValidity.INVALID - - -@pytest.mark.requires("jinja2") -def test_solve_sudoku(fake_llm_sudoku: FakeLLM) -> None: - """Test simple question that should not need python.""" - tot_chain = ToTChain( - llm=fake_llm_sudoku, - checker=SudokuChecker(), - k=len(solutions), - c=4, - tot_strategy_class=SampleCoTStrategy, - ) - output = tot_chain.run({"problem_description": ""}) - assert output == sudoku_solution - - -@pytest.mark.requires("jinja2") -def test_solve_sudoku_k_too_small(fake_llm_sudoku: FakeLLM) -> None: - """Test simple question that should not need python.""" - tot_chain = ToTChain( - llm=fake_llm_sudoku, - checker=SudokuChecker(), - k=len(solutions) - 1, - c=4, - tot_strategy_class=SampleCoTStrategy, - ) - output = tot_chain.run({"problem_description": ""}) - assert output != sudoku_solution - - -@pytest.fixture -def fake_llm_checker() -> FakeLLM: - """This is a fake LLM that responds with a thought validity.""" - responses = [ - "VALID", - "valid", - "INVALID", - "invalid", - "INTERMEDIATE", - "intermediate", - "SOMETHING ELSE", - ] - queries = dict(enumerate(responses)) - return FakeLLM(queries=queries, sequential_responses=True) - - -class ControllerTestCase(unittest.TestCase): - def setUp(self) -> None: - self.controller = ToTController(c=3) - - def test_empty(self) -> None: - memory = ToTDFSMemory([]) - self.assertEqual(self.controller(memory), ()) - - def test_one_thoghts(self) -> None: - thoughts = [Thought(text="a", validity=ThoughtValidity.VALID_FINAL)] - memory = ToTDFSMemory(thoughts) - self.assertEqual(self.controller(memory), ("a",)) - - def test_two_thoghts(self) -> None: - memory = ToTDFSMemory( - [ - Thought(text="a", validity=ThoughtValidity.VALID_INTERMEDIATE), - Thought(text="b", validity=ThoughtValidity.VALID_INTERMEDIATE), - ] - ) - self.assertEqual(self.controller(memory), ("a", "b")) - - def test_two_thoughts_invalid(self) -> None: - memory = ToTDFSMemory( - [ - Thought(text="a", validity=ThoughtValidity.VALID_INTERMEDIATE), - Thought(text="b", validity=ThoughtValidity.INVALID), - ] - ) - self.assertEqual(self.controller(memory), ("a",)) - - def test_thoughts_rollback(self) -> None: - a = Thought(text="a", validity=ThoughtValidity.VALID_INTERMEDIATE) - b = Thought(text="b", validity=ThoughtValidity.VALID_INTERMEDIATE) - c_1 = Thought(text="c_1", validity=ThoughtValidity.VALID_INTERMEDIATE) - c_2 = Thought(text="c_2", validity=ThoughtValidity.VALID_INTERMEDIATE) - c_3 = Thought(text="c_3", validity=ThoughtValidity.VALID_INTERMEDIATE) - - a.children = {b} - b.children = {c_1, c_2, c_3} - - memory = ToTDFSMemory([a, b, c_3]) - self.assertEqual(self.controller(memory), ("a",)) - - def test_thoughts_rollback_invalid(self) -> None: - a = Thought(text="a", validity=ThoughtValidity.VALID_INTERMEDIATE) - b = Thought(text="b", validity=ThoughtValidity.VALID_INTERMEDIATE) - c_1 = Thought(text="c_1", validity=ThoughtValidity.VALID_INTERMEDIATE) - c_2 = Thought(text="c_2", validity=ThoughtValidity.VALID_INTERMEDIATE) - c_3 = Thought(text="c_3", validity=ThoughtValidity.INVALID) - - a.children = {b} - b.children = {c_1, c_2, c_3} - - memory = ToTDFSMemory([a, b, c_3]) - self.assertEqual(self.controller(memory), ("a",)) diff --git a/poetry.lock b/poetry.lock index bab87833e2cc0..5071452ed9abf 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "accessible-pygments" @@ -31,102 +31,102 @@ files = [ [[package]] name = "aiohttp" -version = "3.10.5" +version = "3.10.6" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.8" files = [ - {file = "aiohttp-3.10.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:18a01eba2574fb9edd5f6e5fb25f66e6ce061da5dab5db75e13fe1558142e0a3"}, - {file = "aiohttp-3.10.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:94fac7c6e77ccb1ca91e9eb4cb0ac0270b9fb9b289738654120ba8cebb1189c6"}, - {file = "aiohttp-3.10.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2f1f1c75c395991ce9c94d3e4aa96e5c59c8356a15b1c9231e783865e2772699"}, - {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f7acae3cf1a2a2361ec4c8e787eaaa86a94171d2417aae53c0cca6ca3118ff6"}, - {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:94c4381ffba9cc508b37d2e536b418d5ea9cfdc2848b9a7fea6aebad4ec6aac1"}, - {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c31ad0c0c507894e3eaa843415841995bf8de4d6b2d24c6e33099f4bc9fc0d4f"}, - {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0912b8a8fadeb32ff67a3ed44249448c20148397c1ed905d5dac185b4ca547bb"}, - {file = "aiohttp-3.10.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d93400c18596b7dc4794d48a63fb361b01a0d8eb39f28800dc900c8fbdaca91"}, - {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d00f3c5e0d764a5c9aa5a62d99728c56d455310bcc288a79cab10157b3af426f"}, - {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d742c36ed44f2798c8d3f4bc511f479b9ceef2b93f348671184139e7d708042c"}, - {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:814375093edae5f1cb31e3407997cf3eacefb9010f96df10d64829362ae2df69"}, - {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8224f98be68a84b19f48e0bdc14224b5a71339aff3a27df69989fa47d01296f3"}, - {file = "aiohttp-3.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d9a487ef090aea982d748b1b0d74fe7c3950b109df967630a20584f9a99c0683"}, - {file = "aiohttp-3.10.5-cp310-cp310-win32.whl", hash = "sha256:d9ef084e3dc690ad50137cc05831c52b6ca428096e6deb3c43e95827f531d5ef"}, - {file = "aiohttp-3.10.5-cp310-cp310-win_amd64.whl", hash = "sha256:66bf9234e08fe561dccd62083bf67400bdbf1c67ba9efdc3dac03650e97c6088"}, - {file = "aiohttp-3.10.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8c6a4e5e40156d72a40241a25cc226051c0a8d816610097a8e8f517aeacd59a2"}, - {file = "aiohttp-3.10.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c634a3207a5445be65536d38c13791904fda0748b9eabf908d3fe86a52941cf"}, - {file = "aiohttp-3.10.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4aff049b5e629ef9b3e9e617fa6e2dfeda1bf87e01bcfecaf3949af9e210105e"}, - {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1942244f00baaacaa8155eca94dbd9e8cc7017deb69b75ef67c78e89fdad3c77"}, - {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e04a1f2a65ad2f93aa20f9ff9f1b672bf912413e5547f60749fa2ef8a644e061"}, - {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f2bfc0032a00405d4af2ba27f3c429e851d04fad1e5ceee4080a1c570476697"}, - {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:424ae21498790e12eb759040bbb504e5e280cab64693d14775c54269fd1d2bb7"}, - {file = "aiohttp-3.10.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:975218eee0e6d24eb336d0328c768ebc5d617609affaca5dbbd6dd1984f16ed0"}, - {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4120d7fefa1e2d8fb6f650b11489710091788de554e2b6f8347c7a20ceb003f5"}, - {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b90078989ef3fc45cf9221d3859acd1108af7560c52397ff4ace8ad7052a132e"}, - {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ba5a8b74c2a8af7d862399cdedce1533642fa727def0b8c3e3e02fcb52dca1b1"}, - {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:02594361128f780eecc2a29939d9dfc870e17b45178a867bf61a11b2a4367277"}, - {file = "aiohttp-3.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8fb4fc029e135859f533025bc82047334e24b0d489e75513144f25408ecaf058"}, - {file = "aiohttp-3.10.5-cp311-cp311-win32.whl", hash = "sha256:e1ca1ef5ba129718a8fc827b0867f6aa4e893c56eb00003b7367f8a733a9b072"}, - {file = "aiohttp-3.10.5-cp311-cp311-win_amd64.whl", hash = "sha256:349ef8a73a7c5665cca65c88ab24abe75447e28aa3bc4c93ea5093474dfdf0ff"}, - {file = "aiohttp-3.10.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:305be5ff2081fa1d283a76113b8df7a14c10d75602a38d9f012935df20731487"}, - {file = "aiohttp-3.10.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3a1c32a19ee6bbde02f1cb189e13a71b321256cc1d431196a9f824050b160d5a"}, - {file = "aiohttp-3.10.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:61645818edd40cc6f455b851277a21bf420ce347baa0b86eaa41d51ef58ba23d"}, - {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c225286f2b13bab5987425558baa5cbdb2bc925b2998038fa028245ef421e75"}, - {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ba01ebc6175e1e6b7275c907a3a36be48a2d487549b656aa90c8a910d9f3178"}, - {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8eaf44ccbc4e35762683078b72bf293f476561d8b68ec8a64f98cf32811c323e"}, - {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c43eb1ab7cbf411b8e387dc169acb31f0ca0d8c09ba63f9eac67829585b44f"}, - {file = "aiohttp-3.10.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de7a5299827253023c55ea549444e058c0eb496931fa05d693b95140a947cb73"}, - {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4790f0e15f00058f7599dab2b206d3049d7ac464dc2e5eae0e93fa18aee9e7bf"}, - {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:44b324a6b8376a23e6ba25d368726ee3bc281e6ab306db80b5819999c737d820"}, - {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0d277cfb304118079e7044aad0b76685d30ecb86f83a0711fc5fb257ffe832ca"}, - {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:54d9ddea424cd19d3ff6128601a4a4d23d54a421f9b4c0fff740505813739a91"}, - {file = "aiohttp-3.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4f1c9866ccf48a6df2b06823e6ae80573529f2af3a0992ec4fe75b1a510df8a6"}, - {file = "aiohttp-3.10.5-cp312-cp312-win32.whl", hash = "sha256:dc4826823121783dccc0871e3f405417ac116055bf184ac04c36f98b75aacd12"}, - {file = "aiohttp-3.10.5-cp312-cp312-win_amd64.whl", hash = "sha256:22c0a23a3b3138a6bf76fc553789cb1a703836da86b0f306b6f0dc1617398abc"}, - {file = "aiohttp-3.10.5-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7f6b639c36734eaa80a6c152a238242bedcee9b953f23bb887e9102976343092"}, - {file = "aiohttp-3.10.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f29930bc2921cef955ba39a3ff87d2c4398a0394ae217f41cb02d5c26c8b1b77"}, - {file = "aiohttp-3.10.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f489a2c9e6455d87eabf907ac0b7d230a9786be43fbe884ad184ddf9e9c1e385"}, - {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:123dd5b16b75b2962d0fff566effb7a065e33cd4538c1692fb31c3bda2bfb972"}, - {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b98e698dc34966e5976e10bbca6d26d6724e6bdea853c7c10162a3235aba6e16"}, - {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3b9162bab7e42f21243effc822652dc5bb5e8ff42a4eb62fe7782bcbcdfacf6"}, - {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1923a5c44061bffd5eebeef58cecf68096e35003907d8201a4d0d6f6e387ccaa"}, - {file = "aiohttp-3.10.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d55f011da0a843c3d3df2c2cf4e537b8070a419f891c930245f05d329c4b0689"}, - {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:afe16a84498441d05e9189a15900640a2d2b5e76cf4efe8cbb088ab4f112ee57"}, - {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8112fb501b1e0567a1251a2fd0747baae60a4ab325a871e975b7bb67e59221f"}, - {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:1e72589da4c90337837fdfe2026ae1952c0f4a6e793adbbfbdd40efed7c63599"}, - {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4d46c7b4173415d8e583045fbc4daa48b40e31b19ce595b8d92cf639396c15d5"}, - {file = "aiohttp-3.10.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:33e6bc4bab477c772a541f76cd91e11ccb6d2efa2b8d7d7883591dfb523e5987"}, - {file = "aiohttp-3.10.5-cp313-cp313-win32.whl", hash = "sha256:c58c6837a2c2a7cf3133983e64173aec11f9c2cd8e87ec2fdc16ce727bcf1a04"}, - {file = "aiohttp-3.10.5-cp313-cp313-win_amd64.whl", hash = "sha256:38172a70005252b6893088c0f5e8a47d173df7cc2b2bd88650957eb84fcf5022"}, - {file = "aiohttp-3.10.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f6f18898ace4bcd2d41a122916475344a87f1dfdec626ecde9ee802a711bc569"}, - {file = "aiohttp-3.10.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5ede29d91a40ba22ac1b922ef510aab871652f6c88ef60b9dcdf773c6d32ad7a"}, - {file = "aiohttp-3.10.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:673f988370f5954df96cc31fd99c7312a3af0a97f09e407399f61583f30da9bc"}, - {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58718e181c56a3c02d25b09d4115eb02aafe1a732ce5714ab70326d9776457c3"}, - {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b38b1570242fbab8d86a84128fb5b5234a2f70c2e32f3070143a6d94bc854cf"}, - {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:074d1bff0163e107e97bd48cad9f928fa5a3eb4b9d33366137ffce08a63e37fe"}, - {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd31f176429cecbc1ba499d4aba31aaccfea488f418d60376b911269d3b883c5"}, - {file = "aiohttp-3.10.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7384d0b87d4635ec38db9263e6a3f1eb609e2e06087f0aa7f63b76833737b471"}, - {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8989f46f3d7ef79585e98fa991e6ded55d2f48ae56d2c9fa5e491a6e4effb589"}, - {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:c83f7a107abb89a227d6c454c613e7606c12a42b9a4ca9c5d7dad25d47c776ae"}, - {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:cde98f323d6bf161041e7627a5fd763f9fd829bcfcd089804a5fdce7bb6e1b7d"}, - {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:676f94c5480d8eefd97c0c7e3953315e4d8c2b71f3b49539beb2aa676c58272f"}, - {file = "aiohttp-3.10.5-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:2d21ac12dc943c68135ff858c3a989f2194a709e6e10b4c8977d7fcd67dfd511"}, - {file = "aiohttp-3.10.5-cp38-cp38-win32.whl", hash = "sha256:17e997105bd1a260850272bfb50e2a328e029c941c2708170d9d978d5a30ad9a"}, - {file = "aiohttp-3.10.5-cp38-cp38-win_amd64.whl", hash = "sha256:1c19de68896747a2aa6257ae4cf6ef59d73917a36a35ee9d0a6f48cff0f94db8"}, - {file = "aiohttp-3.10.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7e2fe37ac654032db1f3499fe56e77190282534810e2a8e833141a021faaab0e"}, - {file = "aiohttp-3.10.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f5bf3ead3cb66ab990ee2561373b009db5bc0e857549b6c9ba84b20bc462e172"}, - {file = "aiohttp-3.10.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1b2c16a919d936ca87a3c5f0e43af12a89a3ce7ccbce59a2d6784caba945b68b"}, - {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad146dae5977c4dd435eb31373b3fe9b0b1bf26858c6fc452bf6af394067e10b"}, - {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c5c6fa16412b35999320f5c9690c0f554392dc222c04e559217e0f9ae244b92"}, - {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:95c4dc6f61d610bc0ee1edc6f29d993f10febfe5b76bb470b486d90bbece6b22"}, - {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da452c2c322e9ce0cfef392e469a26d63d42860f829026a63374fde6b5c5876f"}, - {file = "aiohttp-3.10.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:898715cf566ec2869d5cb4d5fb4be408964704c46c96b4be267442d265390f32"}, - {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:391cc3a9c1527e424c6865e087897e766a917f15dddb360174a70467572ac6ce"}, - {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:380f926b51b92d02a34119d072f178d80bbda334d1a7e10fa22d467a66e494db"}, - {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ce91db90dbf37bb6fa0997f26574107e1b9d5ff939315247b7e615baa8ec313b"}, - {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:9093a81e18c45227eebe4c16124ebf3e0d893830c6aca7cc310bfca8fe59d857"}, - {file = "aiohttp-3.10.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:ee40b40aa753d844162dcc80d0fe256b87cba48ca0054f64e68000453caead11"}, - {file = "aiohttp-3.10.5-cp39-cp39-win32.whl", hash = "sha256:03f2645adbe17f274444953bdea69f8327e9d278d961d85657cb0d06864814c1"}, - {file = "aiohttp-3.10.5-cp39-cp39-win_amd64.whl", hash = "sha256:d17920f18e6ee090bdd3d0bfffd769d9f2cb4c8ffde3eb203777a3895c128862"}, - {file = "aiohttp-3.10.5.tar.gz", hash = "sha256:f071854b47d39591ce9a17981c46790acb30518e2f83dfca8db2dfa091178691"}, + {file = "aiohttp-3.10.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:682836fc672972cc3101cc9e30d49c5f7e8f1d010478d46119fe725a4545acfd"}, + {file = "aiohttp-3.10.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:289fa8a20018d0d5aa9e4b35d899bd51bcb80f0d5f365d9a23e30dac3b79159b"}, + {file = "aiohttp-3.10.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8617c96a20dd57e7e9d398ff9d04f3d11c4d28b1767273a5b1a018ada5a654d3"}, + {file = "aiohttp-3.10.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdbeff1b062751c2a2a55b171f7050fb7073633c699299d042e962aacdbe1a07"}, + {file = "aiohttp-3.10.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ea35d849cdd4a9268f910bff4497baebbc1aa3f2f625fd8ccd9ac99c860c621"}, + {file = "aiohttp-3.10.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:473961b3252f3b949bb84873d6e268fb6d8aa0ccc6eb7404fa58c76a326bb8e1"}, + {file = "aiohttp-3.10.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d2665c5df629eb2f981dab244c01bfa6cdc185f4ffa026639286c4d56fafb54"}, + {file = "aiohttp-3.10.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25d92f794f1332f656e3765841fc2b7ad5c26c3f3d01e8949eeb3495691cf9f4"}, + {file = "aiohttp-3.10.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9bd6b2033993d5ae80883bb29b83fb2b432270bbe067c2f53cc73bb57c46065f"}, + {file = "aiohttp-3.10.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d7f408c43f5e75ea1edc152fb375e8f46ef916f545fb66d4aebcbcfad05e2796"}, + {file = "aiohttp-3.10.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:cf8b8560aa965f87bf9c13bf9fed7025993a155ca0ce8422da74bf46d18c2f5f"}, + {file = "aiohttp-3.10.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14477c4e52e2f17437b99893fd220ffe7d7ee41df5ebf931a92b8ca82e6fd094"}, + {file = "aiohttp-3.10.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fb138fbf9f53928e779650f5ed26d0ea1ed8b2cab67f0ea5d63afa09fdc07593"}, + {file = "aiohttp-3.10.6-cp310-cp310-win32.whl", hash = "sha256:9843d683b8756971797be171ead21511d2215a2d6e3c899c6e3107fbbe826791"}, + {file = "aiohttp-3.10.6-cp310-cp310-win_amd64.whl", hash = "sha256:f8b8e49fe02f744d38352daca1dbef462c3874900bd8166516f6ea8e82b5aacf"}, + {file = "aiohttp-3.10.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f52e54fd776ad0da1006708762213b079b154644db54bcfc62f06eaa5b896402"}, + {file = "aiohttp-3.10.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:995ab1a238fd0d19dc65f2d222e5eb064e409665c6426a3e51d5101c1979ee84"}, + {file = "aiohttp-3.10.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0749c4d5a08a802dd66ecdf59b2df4d76b900004017468a7bb736c3b5a3dd902"}, + {file = "aiohttp-3.10.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e05b39158f2af0e2438cc2075cfc271f4ace0c3cc4a81ec95b27a0432e161951"}, + {file = "aiohttp-3.10.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a9f196c970db2dcde4f24317e06615363349dc357cf4d7a3b0716c20ac6d7bcd"}, + {file = "aiohttp-3.10.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:47647c8af04a70e07a2462931b0eba63146a13affa697afb4ecbab9d03a480ce"}, + {file = "aiohttp-3.10.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:669c0efe7e99f6d94d63274c06344bd0e9c8daf184ce5602a29bc39e00a18720"}, + {file = "aiohttp-3.10.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9721cdd83a994225352ca84cd537760d41a9da3c0eacb3ff534747ab8fba6d0"}, + {file = "aiohttp-3.10.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0b82c8ebed66ce182893e7c0b6b60ba2ace45b1df104feb52380edae266a4850"}, + {file = "aiohttp-3.10.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b169f8e755e541b72e714b89a831b315bbe70db44e33fead28516c9e13d5f931"}, + {file = "aiohttp-3.10.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:0be3115753baf8b4153e64f9aa7bf6c0c64af57979aa900c31f496301b374570"}, + {file = "aiohttp-3.10.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:e1f80cd17d81a404b6e70ef22bfe1870bafc511728397634ad5f5efc8698df56"}, + {file = "aiohttp-3.10.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6419728b08fb6380c66a470d2319cafcec554c81780e2114b7e150329b9a9a7f"}, + {file = "aiohttp-3.10.6-cp311-cp311-win32.whl", hash = "sha256:bd294dcdc1afdc510bb51d35444003f14e327572877d016d576ac3b9a5888a27"}, + {file = "aiohttp-3.10.6-cp311-cp311-win_amd64.whl", hash = "sha256:bf861da9a43d282d6dd9dcd64c23a0fccf2c5aa5cd7c32024513c8c79fb69de3"}, + {file = "aiohttp-3.10.6-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:2708baccdc62f4b1251e59c2aac725936a900081f079b88843dabcab0feeeb27"}, + {file = "aiohttp-3.10.6-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7475da7a5e2ccf1a1c86c8fee241e277f4874c96564d06f726d8df8e77683ef7"}, + {file = "aiohttp-3.10.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:02108326574ff60267b7b35b17ac5c0bbd0008ccb942ce4c48b657bb90f0b8aa"}, + {file = "aiohttp-3.10.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:029a019627b37fa9eac5c75cc54a6bb722c4ebbf5a54d8c8c0fb4dd8facf2702"}, + {file = "aiohttp-3.10.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a637d387db6fdad95e293fab5433b775fd104ae6348d2388beaaa60d08b38c4"}, + {file = "aiohttp-3.10.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dc1a16f3fc1944c61290d33c88dc3f09ba62d159b284c38c5331868425aca426"}, + {file = "aiohttp-3.10.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81b292f37969f9cc54f4643f0be7dacabf3612b3b4a65413661cf6c350226787"}, + {file = "aiohttp-3.10.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0754690a3a26e819173a34093798c155bafb21c3c640bff13be1afa1e9d421f9"}, + {file = "aiohttp-3.10.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:164ecd32e65467d86843dbb121a6666c3deb23b460e3f8aefdcaacae79eb718a"}, + {file = "aiohttp-3.10.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:438c5863feb761f7ca3270d48c292c334814459f61cc12bab5ba5b702d7c9e56"}, + {file = "aiohttp-3.10.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ba18573bb1de1063d222f41de64a0d3741223982dcea863b3f74646faf618ec7"}, + {file = "aiohttp-3.10.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:c82a94ddec996413a905f622f3da02c4359952aab8d817c01cf9915419525e95"}, + {file = "aiohttp-3.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:92351aa5363fc3c1f872ca763f86730ced32b01607f0c9662b1fa711087968d0"}, + {file = "aiohttp-3.10.6-cp312-cp312-win32.whl", hash = "sha256:3e15e33bfc73fa97c228f72e05e8795e163a693fd5323549f49367c76a6e5883"}, + {file = "aiohttp-3.10.6-cp312-cp312-win_amd64.whl", hash = "sha256:fe517113fe4d35d9072b826c3e147d63c5f808ca8167d450b4f96c520c8a1d8d"}, + {file = "aiohttp-3.10.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:482f74057ea13d387a7549d7a7ecb60e45146d15f3e58a2d93a0ad2d5a8457cd"}, + {file = "aiohttp-3.10.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:03fa40d1450ee5196e843315ddf74a51afc7e83d489dbfc380eecefea74158b1"}, + {file = "aiohttp-3.10.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1e52e59ed5f4cc3a3acfe2a610f8891f216f486de54d95d6600a2c9ba1581f4d"}, + {file = "aiohttp-3.10.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b3935a22c9e41a8000d90588bed96cf395ef572dbb409be44c6219c61d900d"}, + {file = "aiohttp-3.10.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4bef1480ee50f75abcfcb4b11c12de1005968ca9d0172aec4a5057ba9f2b644f"}, + {file = "aiohttp-3.10.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:671745ea7db19693ce867359d503772177f0b20fa8f6ee1e74e00449f4c4151d"}, + {file = "aiohttp-3.10.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b50b367308ca8c12e0b50cba5773bc9abe64c428d3fd2bbf5cd25aab37c77bf"}, + {file = "aiohttp-3.10.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a504d7cdb431a777d05a124fd0b21efb94498efa743103ea01b1e3136d2e4fb"}, + {file = "aiohttp-3.10.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:66bc81361131763660b969132a22edce2c4d184978ba39614e8f8f95db5c95f8"}, + {file = "aiohttp-3.10.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:27cf19a38506e2e9f12fc17e55f118f04897b0a78537055d93a9de4bf3022e3d"}, + {file = "aiohttp-3.10.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3468b39f977a11271517c6925b226720e148311039a380cc9117b1e2258a721f"}, + {file = "aiohttp-3.10.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:9d26da22a793dfd424be1050712a70c0afd96345245c29aced1e35dbace03413"}, + {file = "aiohttp-3.10.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:844d48ff9173d0b941abed8b2ea6a412f82b56d9ab1edb918c74000c15839362"}, + {file = "aiohttp-3.10.6-cp313-cp313-win32.whl", hash = "sha256:2dd56e3c43660ed3bea67fd4c5025f1ac1f9ecf6f0b991a6e5efe2e678c490c5"}, + {file = "aiohttp-3.10.6-cp313-cp313-win_amd64.whl", hash = "sha256:c91781d969fbced1993537f45efe1213bd6fccb4b37bfae2a026e20d6fbed206"}, + {file = "aiohttp-3.10.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:4407a80bca3e694f2d2a523058e20e1f9f98a416619e04f6dc09dc910352ac8b"}, + {file = "aiohttp-3.10.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1cb045ec5961f51af3e2c08cd6fe523f07cc6e345033adee711c49b7b91bb954"}, + {file = "aiohttp-3.10.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4fabdcdc781a36b8fd7b2ca9dea8172f29a99e11d00ca0f83ffeb50958da84a1"}, + {file = "aiohttp-3.10.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79a9f42efcc2681790595ab3d03c0e52d01edc23a0973ea09f0dc8d295e12b8e"}, + {file = "aiohttp-3.10.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cca776a440795db437d82c07455761c85bbcf3956221c3c23b8c93176c278ce7"}, + {file = "aiohttp-3.10.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5582de171f0898139cf51dd9fcdc79b848e28d9abd68e837f0803fc9f30807b1"}, + {file = "aiohttp-3.10.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:370e2d47575c53c817ee42a18acc34aad8da4dbdaac0a6c836d58878955f1477"}, + {file = "aiohttp-3.10.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:444d1704e2af6b30766debed9be8a795958029e552fe77551355badb1944012c"}, + {file = "aiohttp-3.10.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:40271a2a375812967401c9ca8077de9368e09a43a964f4dce0ff603301ec9358"}, + {file = "aiohttp-3.10.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:f3af26f86863fad12e25395805bb0babbd49d512806af91ec9708a272b696248"}, + {file = "aiohttp-3.10.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4752df44df48fd42b80f51d6a97553b482cda1274d9dc5df214a3a1aa5d8f018"}, + {file = "aiohttp-3.10.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:2cd5290ab66cfca2f90045db2cc6434c1f4f9fbf97c9f1c316e785033782e7d2"}, + {file = "aiohttp-3.10.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3427031064b0d5c95647e6369c4aa3c556402f324a3e18107cb09517abe5f962"}, + {file = "aiohttp-3.10.6-cp38-cp38-win32.whl", hash = "sha256:614fc21e86adc28e4165a6391f851a6da6e9cbd7bb232d0df7718b453a89ee98"}, + {file = "aiohttp-3.10.6-cp38-cp38-win_amd64.whl", hash = "sha256:58c5d7318a136a3874c78717dd6de57519bc64f6363c5827c2b1cb775bea71dd"}, + {file = "aiohttp-3.10.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5db26bbca8e7968c4c977a0c640e0b9ce7224e1f4dcafa57870dc6ee28e27de6"}, + {file = "aiohttp-3.10.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3fb4216e3ec0dbc01db5ba802f02ed78ad8f07121be54eb9e918448cc3f61b7c"}, + {file = "aiohttp-3.10.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a976ef488f26e224079deb3d424f29144c6d5ba4ded313198169a8af8f47fb82"}, + {file = "aiohttp-3.10.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a86610174de8a85a920e956e2d4f9945e7da89f29a00e95ac62a4a414c4ef4e"}, + {file = "aiohttp-3.10.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:217791c6a399cc4f2e6577bb44344cba1f5714a2aebf6a0bea04cfa956658284"}, + {file = "aiohttp-3.10.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ba3662d41abe2eab0eeec7ee56f33ef4e0b34858f38abf24377687f9e1fb00a5"}, + {file = "aiohttp-3.10.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4dfa5ad4bce9ca30a76117fbaa1c1decf41ebb6c18a4e098df44298941566f9"}, + {file = "aiohttp-3.10.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e0009258e97502936d3bd5bf2ced15769629097d0abb81e6495fba1047824fe0"}, + {file = "aiohttp-3.10.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0a75d5c9fb4f06c41d029ae70ad943c3a844c40c0a769d12be4b99b04f473d3d"}, + {file = "aiohttp-3.10.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8198b7c002aae2b40b2d16bfe724b9a90bcbc9b78b2566fc96131ef4e382574d"}, + {file = "aiohttp-3.10.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4611db8c907f90fe86be112efdc2398cd7b4c8eeded5a4f0314b70fdea8feab0"}, + {file = "aiohttp-3.10.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ff99ae06eef85c7a565854826114ced72765832ee16c7e3e766c5e4c5b98d20e"}, + {file = "aiohttp-3.10.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7641920bdcc7cd2d3ddfb8bb9133a6c9536b09dbd49490b79e125180b2d25b93"}, + {file = "aiohttp-3.10.6-cp39-cp39-win32.whl", hash = "sha256:e2e7d5591ea868d5ec82b90bbeb366a198715672841d46281b623e23079593db"}, + {file = "aiohttp-3.10.6-cp39-cp39-win_amd64.whl", hash = "sha256:b504c08c45623bf5c7ca41be380156d925f00199b3970efd758aef4a77645feb"}, + {file = "aiohttp-3.10.6.tar.gz", hash = "sha256:d2578ef941be0c2ba58f6f421a703527d08427237ed45ecb091fed6f83305336"}, ] [package.dependencies] @@ -136,7 +136,7 @@ async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""} attrs = ">=17.3.0" frozenlist = ">=1.1.1" multidict = ">=4.5,<7.0" -yarl = ">=1.0,<2.0" +yarl = ">=1.12.0,<2.0" [package.extras] speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] @@ -179,13 +179,13 @@ files = [ [[package]] name = "anyio" -version = "4.4.0" +version = "4.6.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, - {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, + {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"}, + {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"}, ] [package.dependencies] @@ -195,9 +195,9 @@ sniffio = ">=1.1" typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (>=0.23)"] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] [[package]] name = "appnope" @@ -370,6 +370,37 @@ files = [ {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, ] [package.dependencies] @@ -550,33 +581,33 @@ typing-inspect = ">=0.4.0,<1" [[package]] name = "debugpy" -version = "1.8.5" +version = "1.8.6" description = "An implementation of the Debug Adapter Protocol for Python" optional = false python-versions = ">=3.8" files = [ - {file = "debugpy-1.8.5-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:7e4d594367d6407a120b76bdaa03886e9eb652c05ba7f87e37418426ad2079f7"}, - {file = "debugpy-1.8.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4413b7a3ede757dc33a273a17d685ea2b0c09dbd312cc03f5534a0fd4d40750a"}, - {file = "debugpy-1.8.5-cp310-cp310-win32.whl", hash = "sha256:dd3811bd63632bb25eda6bd73bea8e0521794cda02be41fa3160eb26fc29e7ed"}, - {file = "debugpy-1.8.5-cp310-cp310-win_amd64.whl", hash = "sha256:b78c1250441ce893cb5035dd6f5fc12db968cc07f91cc06996b2087f7cefdd8e"}, - {file = "debugpy-1.8.5-cp311-cp311-macosx_12_0_universal2.whl", hash = "sha256:606bccba19f7188b6ea9579c8a4f5a5364ecd0bf5a0659c8a5d0e10dcee3032a"}, - {file = "debugpy-1.8.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db9fb642938a7a609a6c865c32ecd0d795d56c1aaa7a7a5722d77855d5e77f2b"}, - {file = "debugpy-1.8.5-cp311-cp311-win32.whl", hash = "sha256:4fbb3b39ae1aa3e5ad578f37a48a7a303dad9a3d018d369bc9ec629c1cfa7408"}, - {file = "debugpy-1.8.5-cp311-cp311-win_amd64.whl", hash = "sha256:345d6a0206e81eb68b1493ce2fbffd57c3088e2ce4b46592077a943d2b968ca3"}, - {file = "debugpy-1.8.5-cp312-cp312-macosx_12_0_universal2.whl", hash = "sha256:5b5c770977c8ec6c40c60d6f58cacc7f7fe5a45960363d6974ddb9b62dbee156"}, - {file = "debugpy-1.8.5-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a65b00b7cdd2ee0c2cf4c7335fef31e15f1b7056c7fdbce9e90193e1a8c8cb"}, - {file = "debugpy-1.8.5-cp312-cp312-win32.whl", hash = "sha256:c9f7c15ea1da18d2fcc2709e9f3d6de98b69a5b0fff1807fb80bc55f906691f7"}, - {file = "debugpy-1.8.5-cp312-cp312-win_amd64.whl", hash = "sha256:28ced650c974aaf179231668a293ecd5c63c0a671ae6d56b8795ecc5d2f48d3c"}, - {file = "debugpy-1.8.5-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:3df6692351172a42af7558daa5019651f898fc67450bf091335aa8a18fbf6f3a"}, - {file = "debugpy-1.8.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cd04a73eb2769eb0bfe43f5bfde1215c5923d6924b9b90f94d15f207a402226"}, - {file = "debugpy-1.8.5-cp38-cp38-win32.whl", hash = "sha256:8f913ee8e9fcf9d38a751f56e6de12a297ae7832749d35de26d960f14280750a"}, - {file = "debugpy-1.8.5-cp38-cp38-win_amd64.whl", hash = "sha256:a697beca97dad3780b89a7fb525d5e79f33821a8bc0c06faf1f1289e549743cf"}, - {file = "debugpy-1.8.5-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:0a1029a2869d01cb777216af8c53cda0476875ef02a2b6ff8b2f2c9a4b04176c"}, - {file = "debugpy-1.8.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e84c276489e141ed0b93b0af648eef891546143d6a48f610945416453a8ad406"}, - {file = "debugpy-1.8.5-cp39-cp39-win32.whl", hash = "sha256:ad84b7cde7fd96cf6eea34ff6c4a1b7887e0fe2ea46e099e53234856f9d99a34"}, - {file = "debugpy-1.8.5-cp39-cp39-win_amd64.whl", hash = "sha256:7b0fe36ed9d26cb6836b0a51453653f8f2e347ba7348f2bbfe76bfeb670bfb1c"}, - {file = "debugpy-1.8.5-py2.py3-none-any.whl", hash = "sha256:55919dce65b471eff25901acf82d328bbd5b833526b6c1364bd5133754777a44"}, - {file = "debugpy-1.8.5.zip", hash = "sha256:b2112cfeb34b4507399d298fe7023a16656fc553ed5246536060ca7bd0e668d0"}, + {file = "debugpy-1.8.6-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:30f467c5345d9dfdcc0afdb10e018e47f092e383447500f125b4e013236bf14b"}, + {file = "debugpy-1.8.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d73d8c52614432f4215d0fe79a7e595d0dd162b5c15233762565be2f014803b"}, + {file = "debugpy-1.8.6-cp310-cp310-win32.whl", hash = "sha256:e3e182cd98eac20ee23a00653503315085b29ab44ed66269482349d307b08df9"}, + {file = "debugpy-1.8.6-cp310-cp310-win_amd64.whl", hash = "sha256:e3a82da039cfe717b6fb1886cbbe5c4a3f15d7df4765af857f4307585121c2dd"}, + {file = "debugpy-1.8.6-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:67479a94cf5fd2c2d88f9615e087fcb4fec169ec780464a3f2ba4a9a2bb79955"}, + {file = "debugpy-1.8.6-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fb8653f6cbf1dd0a305ac1aa66ec246002145074ea57933978346ea5afdf70b"}, + {file = "debugpy-1.8.6-cp311-cp311-win32.whl", hash = "sha256:cdaf0b9691879da2d13fa39b61c01887c34558d1ff6e5c30e2eb698f5384cd43"}, + {file = "debugpy-1.8.6-cp311-cp311-win_amd64.whl", hash = "sha256:43996632bee7435583952155c06881074b9a742a86cee74e701d87ca532fe833"}, + {file = "debugpy-1.8.6-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:db891b141fc6ee4b5fc6d1cc8035ec329cabc64bdd2ae672b4550c87d4ecb128"}, + {file = "debugpy-1.8.6-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:567419081ff67da766c898ccf21e79f1adad0e321381b0dfc7a9c8f7a9347972"}, + {file = "debugpy-1.8.6-cp312-cp312-win32.whl", hash = "sha256:c9834dfd701a1f6bf0f7f0b8b1573970ae99ebbeee68314116e0ccc5c78eea3c"}, + {file = "debugpy-1.8.6-cp312-cp312-win_amd64.whl", hash = "sha256:e4ce0570aa4aca87137890d23b86faeadf184924ad892d20c54237bcaab75d8f"}, + {file = "debugpy-1.8.6-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:df5dc9eb4ca050273b8e374a4cd967c43be1327eeb42bfe2f58b3cdfe7c68dcb"}, + {file = "debugpy-1.8.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a85707c6a84b0c5b3db92a2df685b5230dd8fb8c108298ba4f11dba157a615a"}, + {file = "debugpy-1.8.6-cp38-cp38-win32.whl", hash = "sha256:538c6cdcdcdad310bbefd96d7850be1cd46e703079cc9e67d42a9ca776cdc8a8"}, + {file = "debugpy-1.8.6-cp38-cp38-win_amd64.whl", hash = "sha256:22140bc02c66cda6053b6eb56dfe01bbe22a4447846581ba1dd6df2c9f97982d"}, + {file = "debugpy-1.8.6-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:c1cef65cffbc96e7b392d9178dbfd524ab0750da6c0023c027ddcac968fd1caa"}, + {file = "debugpy-1.8.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1e60bd06bb3cc5c0e957df748d1fab501e01416c43a7bdc756d2a992ea1b881"}, + {file = "debugpy-1.8.6-cp39-cp39-win32.whl", hash = "sha256:f7158252803d0752ed5398d291dee4c553bb12d14547c0e1843ab74ee9c31123"}, + {file = "debugpy-1.8.6-cp39-cp39-win_amd64.whl", hash = "sha256:3358aa619a073b620cd0d51d8a6176590af24abcc3fe2e479929a154bf591b51"}, + {file = "debugpy-1.8.6-py2.py3-none-any.whl", hash = "sha256:b48892df4d810eff21d3ef37274f4c60d32cdcafc462ad5647239036b0f0649f"}, + {file = "debugpy-1.8.6.zip", hash = "sha256:c931a9371a86784cee25dec8d65bc2dc7a21f3f1552e3833d9ef8f919d22280a"}, ] [[package]] @@ -728,77 +759,84 @@ files = [ [[package]] name = "greenlet" -version = "3.1.0" +version = "3.1.1" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" files = [ - {file = "greenlet-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a814dc3100e8a046ff48faeaa909e80cdb358411a3d6dd5293158425c684eda8"}, - {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a771dc64fa44ebe58d65768d869fcfb9060169d203446c1d446e844b62bdfdca"}, - {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0e49a65d25d7350cca2da15aac31b6f67a43d867448babf997fe83c7505f57bc"}, - {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2cd8518eade968bc52262d8c46727cfc0826ff4d552cf0430b8d65aaf50bb91d"}, - {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76dc19e660baea5c38e949455c1181bc018893f25372d10ffe24b3ed7341fb25"}, - {file = "greenlet-3.1.0-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c0a5b1c22c82831f56f2f7ad9bbe4948879762fe0d59833a4a71f16e5fa0f682"}, - {file = "greenlet-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2651dfb006f391bcb240635079a68a261b227a10a08af6349cba834a2141efa1"}, - {file = "greenlet-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3e7e6ef1737a819819b1163116ad4b48d06cfdd40352d813bb14436024fcda99"}, - {file = "greenlet-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:ffb08f2a1e59d38c7b8b9ac8083c9c8b9875f0955b1e9b9b9a965607a51f8e54"}, - {file = "greenlet-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9730929375021ec90f6447bff4f7f5508faef1c02f399a1953870cdb78e0c345"}, - {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:713d450cf8e61854de9420fb7eea8ad228df4e27e7d4ed465de98c955d2b3fa6"}, - {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c3446937be153718250fe421da548f973124189f18fe4575a0510b5c928f0cc"}, - {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1ddc7bcedeb47187be74208bc652d63d6b20cb24f4e596bd356092d8000da6d6"}, - {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44151d7b81b9391ed759a2f2865bbe623ef00d648fed59363be2bbbd5154656f"}, - {file = "greenlet-3.1.0-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6cea1cca3be76c9483282dc7760ea1cc08a6ecec1f0b6ca0a94ea0d17432da19"}, - {file = "greenlet-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:619935a44f414274a2c08c9e74611965650b730eb4efe4b2270f91df5e4adf9a"}, - {file = "greenlet-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:221169d31cada333a0c7fd087b957c8f431c1dba202c3a58cf5a3583ed973e9b"}, - {file = "greenlet-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:01059afb9b178606b4b6e92c3e710ea1635597c3537e44da69f4531e111dd5e9"}, - {file = "greenlet-3.1.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:24fc216ec7c8be9becba8b64a98a78f9cd057fd2dc75ae952ca94ed8a893bf27"}, - {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d07c28b85b350564bdff9f51c1c5007dfb2f389385d1bc23288de51134ca303"}, - {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:243a223c96a4246f8a30ea470c440fe9db1f5e444941ee3c3cd79df119b8eebf"}, - {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26811df4dc81271033a7836bc20d12cd30938e6bd2e9437f56fa03da81b0f8fc"}, - {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9d86401550b09a55410f32ceb5fe7efcd998bd2dad9e82521713cb148a4a15f"}, - {file = "greenlet-3.1.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:26d9c1c4f1748ccac0bae1dbb465fb1a795a75aba8af8ca871503019f4285e2a"}, - {file = "greenlet-3.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:cd468ec62257bb4544989402b19d795d2305eccb06cde5da0eb739b63dc04665"}, - {file = "greenlet-3.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a53dfe8f82b715319e9953330fa5c8708b610d48b5c59f1316337302af5c0811"}, - {file = "greenlet-3.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:28fe80a3eb673b2d5cc3b12eea468a5e5f4603c26aa34d88bf61bba82ceb2f9b"}, - {file = "greenlet-3.1.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:76b3e3976d2a452cba7aa9e453498ac72240d43030fdc6d538a72b87eaff52fd"}, - {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:655b21ffd37a96b1e78cc48bf254f5ea4b5b85efaf9e9e2a526b3c9309d660ca"}, - {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c6f4c2027689093775fd58ca2388d58789009116844432d920e9147f91acbe64"}, - {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:76e5064fd8e94c3f74d9fd69b02d99e3cdb8fc286ed49a1f10b256e59d0d3a0b"}, - {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a4bf607f690f7987ab3291406e012cd8591a4f77aa54f29b890f9c331e84989"}, - {file = "greenlet-3.1.0-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:037d9ac99540ace9424cb9ea89f0accfaff4316f149520b4ae293eebc5bded17"}, - {file = "greenlet-3.1.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:90b5bbf05fe3d3ef697103850c2ce3374558f6fe40fd57c9fac1bf14903f50a5"}, - {file = "greenlet-3.1.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:726377bd60081172685c0ff46afbc600d064f01053190e4450857483c4d44484"}, - {file = "greenlet-3.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:d46d5069e2eeda111d6f71970e341f4bd9aeeee92074e649ae263b834286ecc0"}, - {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81eeec4403a7d7684b5812a8aaa626fa23b7d0848edb3a28d2eb3220daddcbd0"}, - {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a3dae7492d16e85ea6045fd11cb8e782b63eac8c8d520c3a92c02ac4573b0a6"}, - {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b5ea3664eed571779403858d7cd0a9b0ebf50d57d2cdeafc7748e09ef8cd81a"}, - {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a22f4e26400f7f48faef2d69c20dc055a1f3043d330923f9abe08ea0aecc44df"}, - {file = "greenlet-3.1.0-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13ff8c8e54a10472ce3b2a2da007f915175192f18e6495bad50486e87c7f6637"}, - {file = "greenlet-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f9671e7282d8c6fcabc32c0fb8d7c0ea8894ae85cee89c9aadc2d7129e1a9954"}, - {file = "greenlet-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:184258372ae9e1e9bddce6f187967f2e08ecd16906557c4320e3ba88a93438c3"}, - {file = "greenlet-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:a0409bc18a9f85321399c29baf93545152d74a49d92f2f55302f122007cfda00"}, - {file = "greenlet-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:9eb4a1d7399b9f3c7ac68ae6baa6be5f9195d1d08c9ddc45ad559aa6b556bce6"}, - {file = "greenlet-3.1.0-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:a8870983af660798dc1b529e1fd6f1cefd94e45135a32e58bd70edd694540f33"}, - {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cfcfb73aed40f550a57ea904629bdaf2e562c68fa1164fa4588e752af6efdc3f"}, - {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f9482c2ed414781c0af0b35d9d575226da6b728bd1a720668fa05837184965b7"}, - {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d58ec349e0c2c0bc6669bf2cd4982d2f93bf067860d23a0ea1fe677b0f0b1e09"}, - {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd65695a8df1233309b701dec2539cc4b11e97d4fcc0f4185b4a12ce54db0491"}, - {file = "greenlet-3.1.0-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:665b21e95bc0fce5cab03b2e1d90ba9c66c510f1bb5fdc864f3a377d0f553f6b"}, - {file = "greenlet-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d3c59a06c2c28a81a026ff11fbf012081ea34fb9b7052f2ed0366e14896f0a1d"}, - {file = "greenlet-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415b9494ff6240b09af06b91a375731febe0090218e2898d2b85f9b92abcda0"}, - {file = "greenlet-3.1.0-cp38-cp38-win32.whl", hash = "sha256:1544b8dd090b494c55e60c4ff46e238be44fdc472d2589e943c241e0169bcea2"}, - {file = "greenlet-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:7f346d24d74c00b6730440f5eb8ec3fe5774ca8d1c9574e8e57c8671bb51b910"}, - {file = "greenlet-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:db1b3ccb93488328c74e97ff888604a8b95ae4f35f4f56677ca57a4fc3a4220b"}, - {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44cd313629ded43bb3b98737bba2f3e2c2c8679b55ea29ed73daea6b755fe8e7"}, - {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fad7a051e07f64e297e6e8399b4d6a3bdcad3d7297409e9a06ef8cbccff4f501"}, - {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3967dcc1cd2ea61b08b0b276659242cbce5caca39e7cbc02408222fb9e6ff39"}, - {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d45b75b0f3fd8d99f62eb7908cfa6d727b7ed190737dec7fe46d993da550b81a"}, - {file = "greenlet-3.1.0-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2d004db911ed7b6218ec5c5bfe4cf70ae8aa2223dffbb5b3c69e342bb253cb28"}, - {file = "greenlet-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9505a0c8579899057cbefd4ec34d865ab99852baf1ff33a9481eb3924e2da0b"}, - {file = "greenlet-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fd6e94593f6f9714dbad1aaba734b5ec04593374fa6638df61592055868f8b8"}, - {file = "greenlet-3.1.0-cp39-cp39-win32.whl", hash = "sha256:d0dd943282231480aad5f50f89bdf26690c995e8ff555f26d8a5b9887b559bcc"}, - {file = "greenlet-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:ac0adfdb3a21dc2a24ed728b61e72440d297d0fd3a577389df566651fcd08f97"}, - {file = "greenlet-3.1.0.tar.gz", hash = "sha256:b395121e9bbe8d02a750886f108d540abe66075e61e22f7353d9acb0b81be0f0"}, + {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6"}, + {file = "greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80"}, + {file = "greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395"}, + {file = "greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39"}, + {file = "greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942"}, + {file = "greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01"}, + {file = "greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c"}, + {file = "greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af"}, + {file = "greenlet-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798"}, + {file = "greenlet-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef"}, + {file = "greenlet-3.1.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7"}, + {file = "greenlet-3.1.1-cp38-cp38-win32.whl", hash = "sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef"}, + {file = "greenlet-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d"}, + {file = "greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e"}, + {file = "greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c"}, + {file = "greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22"}, + {file = "greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467"}, ] [package.extras] @@ -1113,13 +1151,13 @@ files = [ [[package]] name = "jupyter-client" -version = "8.6.2" +version = "8.6.3" description = "Jupyter protocol implementation and client libraries" optional = false python-versions = ">=3.8" files = [ - {file = "jupyter_client-8.6.2-py3-none-any.whl", hash = "sha256:50cbc5c66fd1b8f65ecb66bc490ab73217993632809b6e505687de18e9dea39f"}, - {file = "jupyter_client-8.6.2.tar.gz", hash = "sha256:2bda14d55ee5ba58552a8c53ae43d215ad9868853489213f37da060ced54d8df"}, + {file = "jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f"}, + {file = "jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419"}, ] [package.dependencies] @@ -1156,7 +1194,7 @@ test = ["ipykernel", "pre-commit", "pytest (<8)", "pytest-cov", "pytest-timeout" [[package]] name = "langchain" -version = "0.3.0" +version = "0.3.1" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9,<4.0" @@ -1166,7 +1204,7 @@ develop = true [package.dependencies] aiohttp = "^3.8.3" async-timeout = {version = "^4.0.0", markers = "python_version < \"3.11\""} -langchain-core = "^0.3.0" +langchain-core = "^0.3.6" langchain-text-splitters = "^0.3.0" langsmith = "^0.1.17" numpy = [ @@ -1185,7 +1223,7 @@ url = "libs/langchain" [[package]] name = "langchain-community" -version = "0.3.0" +version = "0.3.1" description = "Community contributed LangChain integrations." optional = false python-versions = ">=3.9,<4.0" @@ -1195,9 +1233,9 @@ develop = true [package.dependencies] aiohttp = "^3.8.3" dataclasses-json = ">= 0.5.7, < 0.7" -langchain = "^0.3.0" -langchain-core = "^0.3.0" -langsmith = "^0.1.112" +langchain = "^0.3.1" +langchain-core = "^0.3.6" +langsmith = "^0.1.125" numpy = [ {version = ">=1,<2", markers = "python_version < \"3.12\""}, {version = ">=1.26.0,<2.0.0", markers = "python_version >= \"3.12\""}, @@ -1214,7 +1252,7 @@ url = "libs/community" [[package]] name = "langchain-core" -version = "0.3.0" +version = "0.3.6" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9,<4.0" @@ -1223,7 +1261,7 @@ develop = true [package.dependencies] jsonpatch = "^1.33" -langsmith = "^0.1.117" +langsmith = "^0.1.125" packaging = ">=23.2,<25" pydantic = [ {version = ">=2.5.2,<3.0.0", markers = "python_full_version < \"3.12.4\""}, @@ -1237,26 +1275,9 @@ typing-extensions = ">=4.7" type = "directory" url = "libs/core" -[[package]] -name = "langchain-experimental" -version = "0.3.0" -description = "Building applications with LLMs through composability" -optional = false -python-versions = ">=3.9,<4.0" -files = [] -develop = true - -[package.dependencies] -langchain-community = "^0.3.0" -langchain-core = "^0.3.0" - -[package.source] -type = "directory" -url = "libs/experimental" - [[package]] name = "langchain-openai" -version = "0.2.0" +version = "0.2.1" description = "An integration package connecting OpenAI and LangChain" optional = false python-versions = ">=3.9,<4.0" @@ -1290,13 +1311,13 @@ url = "libs/text-splitters" [[package]] name = "langsmith" -version = "0.1.121" +version = "0.1.128" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "langsmith-0.1.121-py3-none-any.whl", hash = "sha256:fdb1ac8a671d3904201bfeea197d87bded46a10d08f1034af464211872e29893"}, - {file = "langsmith-0.1.121.tar.gz", hash = "sha256:e9381b82a5bd484af9a51c3e96faea572746b8d617b070c1cda40cbbe48e33df"}, + {file = "langsmith-0.1.128-py3-none-any.whl", hash = "sha256:c1b59d947584be7487ac53dffb4e232704626964011b714fd3d9add4b3694cbc"}, + {file = "langsmith-0.1.128.tar.gz", hash = "sha256:3299e17a659f3c47725c97c47f4445fc34113ac668becce425919866fbcb6ec2"}, ] [package.dependencies] @@ -1583,13 +1604,13 @@ files = [ [[package]] name = "openai" -version = "1.45.1" +version = "1.48.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.7.1" files = [ - {file = "openai-1.45.1-py3-none-any.whl", hash = "sha256:4a6cce402aec803ae57ae7eff4b5b94bf6c0e1703a8d85541c27243c2adeadf8"}, - {file = "openai-1.45.1.tar.gz", hash = "sha256:f79e384916b219ab2f028bbf9c778e81291c61eb0645ccfa1828a4b18b55d534"}, + {file = "openai-1.48.0-py3-none-any.whl", hash = "sha256:7c4af223f0bf615ce4a12453729952c9a8b04ffe8c78aa77981b12fd970149cf"}, + {file = "openai-1.48.0.tar.gz", hash = "sha256:1d3b69ea62c287c4885a6f3ce840768564cd5f52c60ac5f890fef80d43cc4799"}, ] [package.dependencies] @@ -1713,13 +1734,13 @@ ptyprocess = ">=0.5" [[package]] name = "platformdirs" -version = "4.3.3" +version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.3.3-py3-none-any.whl", hash = "sha256:50a5450e2e84f44539718293cbb1da0a0885c9d14adf21b77bae4e66fc99d9b5"}, - {file = "platformdirs-4.3.3.tar.gz", hash = "sha256:d4e0b7d8ec176b341fb03cb11ca12d0276faa8c485f9cd218f613840463fc2c0"}, + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, ] [package.extras] @@ -1729,13 +1750,13 @@ type = ["mypy (>=1.11.2)"] [[package]] name = "prompt-toolkit" -version = "3.0.47" +version = "3.0.48" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.7.0" files = [ - {file = "prompt_toolkit-3.0.47-py3-none-any.whl", hash = "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10"}, - {file = "prompt_toolkit-3.0.47.tar.gz", hash = "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360"}, + {file = "prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e"}, + {file = "prompt_toolkit-3.0.48.tar.gz", hash = "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90"}, ] [package.dependencies] @@ -1808,18 +1829,18 @@ files = [ [[package]] name = "pydantic" -version = "2.9.1" +version = "2.9.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"}, - {file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"}, + {file = "pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12"}, + {file = "pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.23.3" +pydantic-core = "2.23.4" typing-extensions = [ {version = ">=4.6.1", markers = "python_version < \"3.13\""}, {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, @@ -1831,100 +1852,100 @@ timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.23.3" +version = "2.23.4" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"}, - {file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"}, - {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"}, - {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"}, - {file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"}, - {file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"}, - {file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"}, - {file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"}, - {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"}, - {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"}, - {file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"}, - {file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"}, - {file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"}, - {file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"}, - {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"}, - {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"}, - {file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"}, - {file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"}, - {file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"}, - {file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"}, - {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"}, - {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"}, - {file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"}, - {file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"}, - {file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"}, - {file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"}, - {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"}, - {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"}, - {file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"}, - {file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"}, - {file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"}, - {file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"}, - {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"}, - {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"}, - {file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"}, - {file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"}, - {file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, + {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, + {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, + {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, + {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, + {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, + {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e"}, + {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0"}, + {file = "pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64"}, + {file = "pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f"}, + {file = "pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231"}, + {file = "pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36"}, + {file = "pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e"}, + {file = "pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24"}, + {file = "pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84"}, + {file = "pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc"}, + {file = "pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b"}, + {file = "pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6"}, + {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, + {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, + {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d4488a93b071c04dc20f5cecc3631fc78b9789dd72483ba15d423b5b3689b555"}, + {file = "pydantic_core-2.23.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:81965a16b675b35e1d09dd14df53f190f9129c0202356ed44ab2728b1c905658"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ffa2ebd4c8530079140dd2d7f794a9d9a73cbb8e9d59ffe24c63436efa8f271"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61817945f2fe7d166e75fbfb28004034b48e44878177fc54d81688e7b85a3665"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29d2c342c4bc01b88402d60189f3df065fb0dda3654744d5a165a5288a657368"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e11661ce0fd30a6790e8bcdf263b9ec5988e95e63cf901972107efc49218b13"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d18368b137c6295db49ce7218b1a9ba15c5bc254c96d7c9f9e924a9bc7825ad"}, + {file = "pydantic_core-2.23.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec4e55f79b1c4ffb2eecd8a0cfba9955a2588497d96851f4c8f99aa4a1d39b12"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:374a5e5049eda9e0a44c696c7ade3ff355f06b1fe0bb945ea3cac2bc336478a2"}, + {file = "pydantic_core-2.23.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c364564d17da23db1106787675fc7af45f2f7b58b4173bfdd105564e132e6fb"}, + {file = "pydantic_core-2.23.4-cp38-none-win32.whl", hash = "sha256:d7a80d21d613eec45e3d41eb22f8f94ddc758a6c4720842dc74c0581f54993d6"}, + {file = "pydantic_core-2.23.4-cp38-none-win_amd64.whl", hash = "sha256:5f5ff8d839f4566a474a969508fe1c5e59c31c80d9e140566f9a37bba7b8d556"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a4fa4fc04dff799089689f4fd502ce7d59de529fc2f40a2c8836886c03e0175a"}, + {file = "pydantic_core-2.23.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7df63886be5e270da67e0966cf4afbae86069501d35c8c1b3b6c168f42cb36"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcedcd19a557e182628afa1d553c3895a9f825b936415d0dbd3cd0bbcfd29b4b"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f54b118ce5de9ac21c363d9b3caa6c800341e8c47a508787e5868c6b79c9323"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86d2f57d3e1379a9525c5ab067b27dbb8a0642fb5d454e17a9ac434f9ce523e3"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de6d1d1b9e5101508cb37ab0d972357cac5235f5c6533d1071964c47139257df"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1278e0d324f6908e872730c9102b0112477a7f7cf88b308e4fc36ce1bdb6d58c"}, + {file = "pydantic_core-2.23.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a6b5099eeec78827553827f4c6b8615978bb4b6a88e5d9b93eddf8bb6790f55"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e55541f756f9b3ee346b840103f32779c695a19826a4c442b7954550a0972040"}, + {file = "pydantic_core-2.23.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a5c7ba8ffb6d6f8f2ab08743be203654bb1aaa8c9dcb09f82ddd34eadb695605"}, + {file = "pydantic_core-2.23.4-cp39-none-win32.whl", hash = "sha256:37b0fe330e4a58d3c58b24d91d1eb102aeec675a3db4c292ec3928ecd892a9a6"}, + {file = "pydantic_core-2.23.4-cp39-none-win_amd64.whl", hash = "sha256:1498bec4c05c9c787bde9125cfdcc63a41004ff167f495063191b863399b1a29"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, + {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:78ddaaa81421a29574a682b3179d4cf9e6d405a09b99d93ddcf7e5239c742e21"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:883a91b5dd7d26492ff2f04f40fbb652de40fcc0afe07e8129e8ae779c2110eb"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88ad334a15b32a791ea935af224b9de1bf99bcd62fabf745d5f3442199d86d59"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:233710f069d251feb12a56da21e14cca67994eab08362207785cf8c598e74577"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:19442362866a753485ba5e4be408964644dd6a09123d9416c54cd49171f50744"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:624e278a7d29b6445e4e813af92af37820fafb6dcc55c012c834f9e26f9aaaef"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5ef8f42bec47f21d07668a043f077d507e5bf4e668d5c6dfe6aaba89de1a5b8"}, + {file = "pydantic_core-2.23.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:aea443fffa9fbe3af1a9ba721a87f926fe548d32cab71d188a6ede77d0ff244e"}, + {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, ] [package.dependencies] @@ -2459,13 +2480,13 @@ test = ["cython (>=3.0)", "defusedxml (>=0.7.1)", "pytest (>=8.0)", "setuptools [[package]] name = "sphinx-autobuild" -version = "2024.9.3" +version = "2024.9.19" description = "Rebuild Sphinx documentation on changes, with hot reloading in the browser." optional = false python-versions = ">=3.9" files = [ - {file = "sphinx_autobuild-2024.9.3-py3-none-any.whl", hash = "sha256:55fe9bcc05dab659650d79bed0e6beb8b6032234edbf23f028f2cac3471f0c2d"}, - {file = "sphinx_autobuild-2024.9.3.tar.gz", hash = "sha256:75929a5a92b932da8d29837406d6d973a927c456f30986a27f1f20b067897892"}, + {file = "sphinx_autobuild-2024.9.19-py3-none-any.whl", hash = "sha256:57d974eebfc6461ff0fd136e78bf7a9c057d543d5166d318a45599898019b82c"}, + {file = "sphinx_autobuild-2024.9.19.tar.gz", hash = "sha256:2dd4863d174e533c1cd075eb5dfc90ad9a21734af7efd25569bf228b405e08ef"}, ] [package.dependencies] @@ -2580,37 +2601,54 @@ description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ + {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"}, {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"}, {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"}, {file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"}, {file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"}, {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"}, {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"}, {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"}, {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"}, {file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"}, {file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"}, {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"}, {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"}, {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"}, {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"}, {file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"}, {file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"}, {file = "SQLAlchemy-2.0.35-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1"}, {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3"}, {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f"}, {file = "SQLAlchemy-2.0.35-cp37-cp37m-win32.whl", hash = "sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87"}, {file = "SQLAlchemy-2.0.35-cp37-cp37m-win_amd64.whl", hash = "sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5"}, {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec"}, {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72"}, {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9"}, {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936"}, {file = "SQLAlchemy-2.0.35-cp38-cp38-win32.whl", hash = "sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0"}, {file = "SQLAlchemy-2.0.35-cp38-cp38-win_amd64.whl", hash = "sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139"}, {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44"}, {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0"}, {file = "SQLAlchemy-2.0.35-cp39-cp39-win32.whl", hash = "sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3"}, {file = "SQLAlchemy-2.0.35-cp39-cp39-win_amd64.whl", hash = "sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f"}, + {file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"}, {file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"}, ] @@ -2664,13 +2702,13 @@ tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] [[package]] name = "starlette" -version = "0.38.5" +version = "0.39.1" description = "The little ASGI library that shines." optional = false python-versions = ">=3.8" files = [ - {file = "starlette-0.38.5-py3-none-any.whl", hash = "sha256:632f420a9d13e3ee2a6f18f437b0a9f1faecb0bc42e1942aa2ea0e379a4c4206"}, - {file = "starlette-0.38.5.tar.gz", hash = "sha256:04a92830a9b6eb1442c766199d62260c3d4dc9c4f9188360626b1e0273cb7077"}, + {file = "starlette-0.39.1-py3-none-any.whl", hash = "sha256:0d31c90dacae588734e91b98cb4469fd37848ef23d2dd34355c5542bc827c02a"}, + {file = "starlette-0.39.1.tar.gz", hash = "sha256:33c5a94f64d3ab2c799b2715b45f254a3752f229d334f1562a3aaf78c23eab95"}, ] [package.dependencies] @@ -2994,198 +3032,198 @@ files = [ [[package]] name = "websockets" -version = "13.0.1" +version = "13.1" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" optional = false python-versions = ">=3.8" files = [ - {file = "websockets-13.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1841c9082a3ba4a05ea824cf6d99570a6a2d8849ef0db16e9c826acb28089e8f"}, - {file = "websockets-13.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c5870b4a11b77e4caa3937142b650fbbc0914a3e07a0cf3131f35c0587489c1c"}, - {file = "websockets-13.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f1d3d1f2eb79fe7b0fb02e599b2bf76a7619c79300fc55f0b5e2d382881d4f7f"}, - {file = "websockets-13.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15c7d62ee071fa94a2fc52c2b472fed4af258d43f9030479d9c4a2de885fd543"}, - {file = "websockets-13.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6724b554b70d6195ba19650fef5759ef11346f946c07dbbe390e039bcaa7cc3d"}, - {file = "websockets-13.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56a952fa2ae57a42ba7951e6b2605e08a24801a4931b5644dfc68939e041bc7f"}, - {file = "websockets-13.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:17118647c0ea14796364299e942c330d72acc4b248e07e639d34b75067b3cdd8"}, - {file = "websockets-13.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64a11aae1de4c178fa653b07d90f2fb1a2ed31919a5ea2361a38760192e1858b"}, - {file = "websockets-13.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0617fd0b1d14309c7eab6ba5deae8a7179959861846cbc5cb528a7531c249448"}, - {file = "websockets-13.0.1-cp310-cp310-win32.whl", hash = "sha256:11f9976ecbc530248cf162e359a92f37b7b282de88d1d194f2167b5e7ad80ce3"}, - {file = "websockets-13.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:c3c493d0e5141ec055a7d6809a28ac2b88d5b878bb22df8c621ebe79a61123d0"}, - {file = "websockets-13.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:699ba9dd6a926f82a277063603fc8d586b89f4cb128efc353b749b641fcddda7"}, - {file = "websockets-13.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cf2fae6d85e5dc384bf846f8243ddaa9197f3a1a70044f59399af001fd1f51d4"}, - {file = "websockets-13.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:52aed6ef21a0f1a2a5e310fb5c42d7555e9c5855476bbd7173c3aa3d8a0302f2"}, - {file = "websockets-13.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8eb2b9a318542153674c6e377eb8cb9ca0fc011c04475110d3477862f15d29f0"}, - {file = "websockets-13.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5df891c86fe68b2c38da55b7aea7095beca105933c697d719f3f45f4220a5e0e"}, - {file = "websockets-13.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fac2d146ff30d9dd2fcf917e5d147db037a5c573f0446c564f16f1f94cf87462"}, - {file = "websockets-13.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b8ac5b46fd798bbbf2ac6620e0437c36a202b08e1f827832c4bf050da081b501"}, - {file = "websockets-13.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:46af561eba6f9b0848b2c9d2427086cabadf14e0abdd9fde9d72d447df268418"}, - {file = "websockets-13.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b5a06d7f60bc2fc378a333978470dfc4e1415ee52f5f0fce4f7853eb10c1e9df"}, - {file = "websockets-13.0.1-cp311-cp311-win32.whl", hash = "sha256:556e70e4f69be1082e6ef26dcb70efcd08d1850f5d6c5f4f2bcb4e397e68f01f"}, - {file = "websockets-13.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:67494e95d6565bf395476e9d040037ff69c8b3fa356a886b21d8422ad86ae075"}, - {file = "websockets-13.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f9c9e258e3d5efe199ec23903f5da0eeaad58cf6fccb3547b74fd4750e5ac47a"}, - {file = "websockets-13.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6b41a1b3b561f1cba8321fb32987552a024a8f67f0d05f06fcf29f0090a1b956"}, - {file = "websockets-13.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f73e676a46b0fe9426612ce8caeca54c9073191a77c3e9d5c94697aef99296af"}, - {file = "websockets-13.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f613289f4a94142f914aafad6c6c87903de78eae1e140fa769a7385fb232fdf"}, - {file = "websockets-13.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f52504023b1480d458adf496dc1c9e9811df4ba4752f0bc1f89ae92f4f07d0c"}, - {file = "websockets-13.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:139add0f98206cb74109faf3611b7783ceafc928529c62b389917a037d4cfdf4"}, - {file = "websockets-13.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:47236c13be337ef36546004ce8c5580f4b1150d9538b27bf8a5ad8edf23ccfab"}, - {file = "websockets-13.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c44ca9ade59b2e376612df34e837013e2b273e6c92d7ed6636d0556b6f4db93d"}, - {file = "websockets-13.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9bbc525f4be3e51b89b2a700f5746c2a6907d2e2ef4513a8daafc98198b92237"}, - {file = "websockets-13.0.1-cp312-cp312-win32.whl", hash = "sha256:3624fd8664f2577cf8de996db3250662e259bfbc870dd8ebdcf5d7c6ac0b5185"}, - {file = "websockets-13.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0513c727fb8adffa6d9bf4a4463b2bade0186cbd8c3604ae5540fae18a90cb99"}, - {file = "websockets-13.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1ee4cc030a4bdab482a37462dbf3ffb7e09334d01dd37d1063be1136a0d825fa"}, - {file = "websockets-13.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dbb0b697cc0655719522406c059eae233abaa3243821cfdfab1215d02ac10231"}, - {file = "websockets-13.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:acbebec8cb3d4df6e2488fbf34702cbc37fc39ac7abf9449392cefb3305562e9"}, - {file = "websockets-13.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63848cdb6fcc0bf09d4a155464c46c64ffdb5807ede4fb251da2c2692559ce75"}, - {file = "websockets-13.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:872afa52a9f4c414d6955c365b6588bc4401272c629ff8321a55f44e3f62b553"}, - {file = "websockets-13.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05e70fec7c54aad4d71eae8e8cab50525e899791fc389ec6f77b95312e4e9920"}, - {file = "websockets-13.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e82db3756ccb66266504f5a3de05ac6b32f287faacff72462612120074103329"}, - {file = "websockets-13.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4e85f46ce287f5c52438bb3703d86162263afccf034a5ef13dbe4318e98d86e7"}, - {file = "websockets-13.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f3fea72e4e6edb983908f0db373ae0732b275628901d909c382aae3b592589f2"}, - {file = "websockets-13.0.1-cp313-cp313-win32.whl", hash = "sha256:254ecf35572fca01a9f789a1d0f543898e222f7b69ecd7d5381d8d8047627bdb"}, - {file = "websockets-13.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:ca48914cdd9f2ccd94deab5bcb5ac98025a5ddce98881e5cce762854a5de330b"}, - {file = "websockets-13.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b74593e9acf18ea5469c3edaa6b27fa7ecf97b30e9dabd5a94c4c940637ab96e"}, - {file = "websockets-13.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:132511bfd42e77d152c919147078460c88a795af16b50e42a0bd14f0ad71ddd2"}, - {file = "websockets-13.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:165bedf13556f985a2aa064309baa01462aa79bf6112fbd068ae38993a0e1f1b"}, - {file = "websockets-13.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e801ca2f448850685417d723ec70298feff3ce4ff687c6f20922c7474b4746ae"}, - {file = "websockets-13.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30d3a1f041360f029765d8704eae606781e673e8918e6b2c792e0775de51352f"}, - {file = "websockets-13.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67648f5e50231b5a7f6d83b32f9c525e319f0ddc841be0de64f24928cd75a603"}, - {file = "websockets-13.0.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:4f0426d51c8f0926a4879390f53c7f5a855e42d68df95fff6032c82c888b5f36"}, - {file = "websockets-13.0.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ef48e4137e8799998a343706531e656fdec6797b80efd029117edacb74b0a10a"}, - {file = "websockets-13.0.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:249aab278810bee585cd0d4de2f08cfd67eed4fc75bde623be163798ed4db2eb"}, - {file = "websockets-13.0.1-cp38-cp38-win32.whl", hash = "sha256:06c0a667e466fcb56a0886d924b5f29a7f0886199102f0a0e1c60a02a3751cb4"}, - {file = "websockets-13.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1f3cf6d6ec1142412d4535adabc6bd72a63f5f148c43fe559f06298bc21953c9"}, - {file = "websockets-13.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1fa082ea38d5de51dd409434edc27c0dcbd5fed2b09b9be982deb6f0508d25bc"}, - {file = "websockets-13.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4a365bcb7be554e6e1f9f3ed64016e67e2fa03d7b027a33e436aecf194febb63"}, - {file = "websockets-13.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:10a0dc7242215d794fb1918f69c6bb235f1f627aaf19e77f05336d147fce7c37"}, - {file = "websockets-13.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59197afd478545b1f73367620407b0083303569c5f2d043afe5363676f2697c9"}, - {file = "websockets-13.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d20516990d8ad557b5abeb48127b8b779b0b7e6771a265fa3e91767596d7d97"}, - {file = "websockets-13.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1a2e272d067030048e1fe41aa1ec8cfbbaabce733b3d634304fa2b19e5c897f"}, - {file = "websockets-13.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ad327ac80ba7ee61da85383ca8822ff808ab5ada0e4a030d66703cc025b021c4"}, - {file = "websockets-13.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:518f90e6dd089d34eaade01101fd8a990921c3ba18ebbe9b0165b46ebff947f0"}, - {file = "websockets-13.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:68264802399aed6fe9652e89761031acc734fc4c653137a5911c2bfa995d6d6d"}, - {file = "websockets-13.0.1-cp39-cp39-win32.whl", hash = "sha256:a5dc0c42ded1557cc7c3f0240b24129aefbad88af4f09346164349391dea8e58"}, - {file = "websockets-13.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:b448a0690ef43db5ef31b3a0d9aea79043882b4632cfc3eaab20105edecf6097"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:faef9ec6354fe4f9a2c0bbb52fb1ff852effc897e2a4501e25eb3a47cb0a4f89"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:03d3f9ba172e0a53e37fa4e636b86cc60c3ab2cfee4935e66ed1d7acaa4625ad"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d450f5a7a35662a9b91a64aefa852f0c0308ee256122f5218a42f1d13577d71e"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f55b36d17ac50aa8a171b771e15fbe1561217510c8768af3d546f56c7576cdc"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14b9c006cac63772b31abbcd3e3abb6228233eec966bf062e89e7fa7ae0b7333"}, - {file = "websockets-13.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b79915a1179a91f6c5f04ece1e592e2e8a6bd245a0e45d12fd56b2b59e559a32"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f40de079779acbcdbb6ed4c65af9f018f8b77c5ec4e17a4b737c05c2db554491"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80e4ba642fc87fa532bac07e5ed7e19d56940b6af6a8c61d4429be48718a380f"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a02b0161c43cc9e0232711eff846569fad6ec836a7acab16b3cf97b2344c060"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6aa74a45d4cdc028561a7d6ab3272c8b3018e23723100b12e58be9dfa5a24491"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00fd961943b6c10ee6f0b1130753e50ac5dcd906130dcd77b0003c3ab797d026"}, - {file = "websockets-13.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d93572720d781331fb10d3da9ca1067817d84ad1e7c31466e9f5e59965618096"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:71e6e5a3a3728886caee9ab8752e8113670936a193284be9d6ad2176a137f376"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c4a6343e3b0714e80da0b0893543bf9a5b5fa71b846ae640e56e9abc6fbc4c83"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a678532018e435396e37422a95e3ab87f75028ac79570ad11f5bf23cd2a7d8c"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6716c087e4aa0b9260c4e579bb82e068f84faddb9bfba9906cb87726fa2e870"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e33505534f3f673270dd67f81e73550b11de5b538c56fe04435d63c02c3f26b5"}, - {file = "websockets-13.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:acab3539a027a85d568c2573291e864333ec9d912675107d6efceb7e2be5d980"}, - {file = "websockets-13.0.1-py3-none-any.whl", hash = "sha256:b80f0c51681c517604152eb6a572f5a9378f877763231fddb883ba2f968e8817"}, - {file = "websockets-13.0.1.tar.gz", hash = "sha256:4d6ece65099411cfd9a48d13701d7438d9c34f479046b34c50ff60bb8834e43e"}, + {file = "websockets-13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f48c749857f8fb598fb890a75f540e3221d0976ed0bf879cf3c7eef34151acee"}, + {file = "websockets-13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c7e72ce6bda6fb9409cc1e8164dd41d7c91466fb599eb047cfda72fe758a34a7"}, + {file = "websockets-13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f779498eeec470295a2b1a5d97aa1bc9814ecd25e1eb637bd9d1c73a327387f6"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676df3fe46956fbb0437d8800cd5f2b6d41143b6e7e842e60554398432cf29b"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7affedeb43a70351bb811dadf49493c9cfd1ed94c9c70095fd177e9cc1541fa"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1971e62d2caa443e57588e1d82d15f663b29ff9dfe7446d9964a4b6f12c1e700"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5f2e75431f8dc4a47f31565a6e1355fb4f2ecaa99d6b89737527ea917066e26c"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58cf7e75dbf7e566088b07e36ea2e3e2bd5676e22216e4cad108d4df4a7402a0"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c90d6dec6be2c7d03378a574de87af9b1efea77d0c52a8301dd831ece938452f"}, + {file = "websockets-13.1-cp310-cp310-win32.whl", hash = "sha256:730f42125ccb14602f455155084f978bd9e8e57e89b569b4d7f0f0c17a448ffe"}, + {file = "websockets-13.1-cp310-cp310-win_amd64.whl", hash = "sha256:5993260f483d05a9737073be197371940c01b257cc45ae3f1d5d7adb371b266a"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:61fc0dfcda609cda0fc9fe7977694c0c59cf9d749fbb17f4e9483929e3c48a19"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ceec59f59d092c5007e815def4ebb80c2de330e9588e101cf8bd94c143ec78a5"}, + {file = "websockets-13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1dca61c6db1166c48b95198c0b7d9c990b30c756fc2923cc66f68d17dc558fd"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308e20f22c2c77f3f39caca508e765f8725020b84aa963474e18c59accbf4c02"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d516c325e6540e8a57b94abefc3459d7dab8ce52ac75c96cad5549e187e3a7"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c6e35319b46b99e168eb98472d6c7d8634ee37750d7693656dc766395df096"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5f9fee94ebafbc3117c30be1844ed01a3b177bb6e39088bc6b2fa1dc15572084"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7c1e90228c2f5cdde263253fa5db63e6653f1c00e7ec64108065a0b9713fa1b3"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6548f29b0e401eea2b967b2fdc1c7c7b5ebb3eeb470ed23a54cd45ef078a0db9"}, + {file = "websockets-13.1-cp311-cp311-win32.whl", hash = "sha256:c11d4d16e133f6df8916cc5b7e3e96ee4c44c936717d684a94f48f82edb7c92f"}, + {file = "websockets-13.1-cp311-cp311-win_amd64.whl", hash = "sha256:d04f13a1d75cb2b8382bdc16ae6fa58c97337253826dfe136195b7f89f661557"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9d75baf00138f80b48f1eac72ad1535aac0b6461265a0bcad391fc5aba875cfc"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9b6f347deb3dcfbfde1c20baa21c2ac0751afaa73e64e5b693bb2b848efeaa49"}, + {file = "websockets-13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de58647e3f9c42f13f90ac7e5f58900c80a39019848c5547bc691693098ae1bd"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1b54689e38d1279a51d11e3467dd2f3a50f5f2e879012ce8f2d6943f00e83f0"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf1781ef73c073e6b0f90af841aaf98501f975d306bbf6221683dd594ccc52b6"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d23b88b9388ed85c6faf0e74d8dec4f4d3baf3ecf20a65a47b836d56260d4b9"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3c78383585f47ccb0fcf186dcb8a43f5438bd7d8f47d69e0b56f71bf431a0a68"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d6d300f8ec35c24025ceb9b9019ae9040c1ab2f01cddc2bcc0b518af31c75c14"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9dcaf8b0cc72a392760bb8755922c03e17a5a54e08cca58e8b74f6902b433cf"}, + {file = "websockets-13.1-cp312-cp312-win32.whl", hash = "sha256:2f85cf4f2a1ba8f602298a853cec8526c2ca42a9a4b947ec236eaedb8f2dc80c"}, + {file = "websockets-13.1-cp312-cp312-win_amd64.whl", hash = "sha256:38377f8b0cdeee97c552d20cf1865695fcd56aba155ad1b4ca8779a5b6ef4ac3"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a9ab1e71d3d2e54a0aa646ab6d4eebfaa5f416fe78dfe4da2839525dc5d765c6"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b9d7439d7fab4dce00570bb906875734df13d9faa4b48e261c440a5fec6d9708"}, + {file = "websockets-13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327b74e915cf13c5931334c61e1a41040e365d380f812513a255aa804b183418"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:325b1ccdbf5e5725fdcb1b0e9ad4d2545056479d0eee392c291c1bf76206435a"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:346bee67a65f189e0e33f520f253d5147ab76ae42493804319b5716e46dddf0f"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a0fa841646320ec0d3accdff5b757b06e2e5c86ba32af2e0815c96c7a603c5"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:18503d2c5f3943e93819238bf20df71982d193f73dcecd26c94514f417f6b135"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9cd1af7e18e5221d2878378fbc287a14cd527fdd5939ed56a18df8a31136bb2"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:70c5be9f416aa72aab7a2a76c90ae0a4fe2755c1816c153c1a2bcc3333ce4ce6"}, + {file = "websockets-13.1-cp313-cp313-win32.whl", hash = "sha256:624459daabeb310d3815b276c1adef475b3e6804abaf2d9d2c061c319f7f187d"}, + {file = "websockets-13.1-cp313-cp313-win_amd64.whl", hash = "sha256:c518e84bb59c2baae725accd355c8dc517b4a3ed8db88b4bc93c78dae2974bf2"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c7934fd0e920e70468e676fe7f1b7261c1efa0d6c037c6722278ca0228ad9d0d"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:149e622dc48c10ccc3d2760e5f36753db9cacf3ad7bc7bbbfd7d9c819e286f23"}, + {file = "websockets-13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a569eb1b05d72f9bce2ebd28a1ce2054311b66677fcd46cf36204ad23acead8c"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95df24ca1e1bd93bbca51d94dd049a984609687cb2fb08a7f2c56ac84e9816ea"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8dbb1bf0c0a4ae8b40bdc9be7f644e2f3fb4e8a9aca7145bfa510d4a374eeb7"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:035233b7531fb92a76beefcbf479504db8c72eb3bff41da55aecce3a0f729e54"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e4450fc83a3df53dec45922b576e91e94f5578d06436871dce3a6be38e40f5db"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:463e1c6ec853202dd3657f156123d6b4dad0c546ea2e2e38be2b3f7c5b8e7295"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6d6855bbe70119872c05107e38fbc7f96b1d8cb047d95c2c50869a46c65a8e96"}, + {file = "websockets-13.1-cp38-cp38-win32.whl", hash = "sha256:204e5107f43095012b00f1451374693267adbb832d29966a01ecc4ce1db26faf"}, + {file = "websockets-13.1-cp38-cp38-win_amd64.whl", hash = "sha256:485307243237328c022bc908b90e4457d0daa8b5cf4b3723fd3c4a8012fce4c6"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9b37c184f8b976f0c0a231a5f3d6efe10807d41ccbe4488df8c74174805eea7d"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:163e7277e1a0bd9fb3c8842a71661ad19c6aa7bb3d6678dc7f89b17fbcc4aeb7"}, + {file = "websockets-13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4b889dbd1342820cc210ba44307cf75ae5f2f96226c0038094455a96e64fb07a"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:586a356928692c1fed0eca68b4d1c2cbbd1ca2acf2ac7e7ebd3b9052582deefa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7bd6abf1e070a6b72bfeb71049d6ad286852e285f146682bf30d0296f5fbadfa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2aad13a200e5934f5a6767492fb07151e1de1d6079c003ab31e1823733ae79"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:df01aea34b6e9e33572c35cd16bae5a47785e7d5c8cb2b54b2acdb9678315a17"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e54affdeb21026329fb0744ad187cf812f7d3c2aa702a5edb562b325191fcab6"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ef8aa8bdbac47f4968a5d66462a2a0935d044bf35c0e5a8af152d58516dbeb5"}, + {file = "websockets-13.1-cp39-cp39-win32.whl", hash = "sha256:deeb929efe52bed518f6eb2ddc00cc496366a14c726005726ad62c2dd9017a3c"}, + {file = "websockets-13.1-cp39-cp39-win_amd64.whl", hash = "sha256:7c65ffa900e7cc958cd088b9a9157a8141c991f8c53d11087e6fb7277a03f81d"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5dd6da9bec02735931fccec99d97c29f47cc61f644264eb995ad6c0c27667238"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2510c09d8e8df777177ee3d40cd35450dc169a81e747455cc4197e63f7e7bfe5"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1c3cf67185543730888b20682fb186fc8d0fa6f07ccc3ef4390831ab4b388d9"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcc03c8b72267e97b49149e4863d57c2d77f13fae12066622dc78fe322490fe6"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004280a140f220c812e65f36944a9ca92d766b6cc4560be652a0a3883a79ed8a"}, + {file = "websockets-13.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e2620453c075abeb0daa949a292e19f56de518988e079c36478bacf9546ced23"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9156c45750b37337f7b0b00e6248991a047be4aa44554c9886fe6bdd605aab3b"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80c421e07973a89fbdd93e6f2003c17d20b69010458d3a8e37fb47874bd67d51"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82d0ba76371769d6a4e56f7e83bb8e81846d17a6190971e38b5de108bde9b0d7"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9875a0143f07d74dc5e1ded1c4581f0d9f7ab86c78994e2ed9e95050073c94d"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11e38ad8922c7961447f35c7b17bffa15de4d17c70abd07bfbe12d6faa3e027"}, + {file = "websockets-13.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4059f790b6ae8768471cddb65d3c4fe4792b0ab48e154c9f0a04cefaabcd5978"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:25c35bf84bf7c7369d247f0b8cfa157f989862c49104c5cf85cb5436a641d93e"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:83f91d8a9bb404b8c2c41a707ac7f7f75b9442a0a876df295de27251a856ad09"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a43cfdcddd07f4ca2b1afb459824dd3c6d53a51410636a2c7fc97b9a8cf4842"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a2ef1381632a2f0cb4efeff34efa97901c9fbc118e01951ad7cfc10601a9bb"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459bf774c754c35dbb487360b12c5727adab887f1622b8aed5755880a21c4a20"}, + {file = "websockets-13.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:95858ca14a9f6fa8413d29e0a585b31b278388aa775b8a81fa24830123874678"}, + {file = "websockets-13.1-py3-none-any.whl", hash = "sha256:a9a396a6ad26130cdae92ae10c36af09d9bfe6cafe69670fd3b6da9b07b4044f"}, + {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"}, ] [[package]] name = "yarl" -version = "1.11.1" +version = "1.12.1" description = "Yet another URL library" optional = false python-versions = ">=3.8" files = [ - {file = "yarl-1.11.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:400cd42185f92de559d29eeb529e71d80dfbd2f45c36844914a4a34297ca6f00"}, - {file = "yarl-1.11.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8258c86f47e080a258993eed877d579c71da7bda26af86ce6c2d2d072c11320d"}, - {file = "yarl-1.11.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2164cd9725092761fed26f299e3f276bb4b537ca58e6ff6b252eae9631b5c96e"}, - {file = "yarl-1.11.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08ea567c16f140af8ddc7cb58e27e9138a1386e3e6e53982abaa6f2377b38cc"}, - {file = "yarl-1.11.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:768ecc550096b028754ea28bf90fde071c379c62c43afa574edc6f33ee5daaec"}, - {file = "yarl-1.11.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2909fa3a7d249ef64eeb2faa04b7957e34fefb6ec9966506312349ed8a7e77bf"}, - {file = "yarl-1.11.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01a8697ec24f17c349c4f655763c4db70eebc56a5f82995e5e26e837c6eb0e49"}, - {file = "yarl-1.11.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e286580b6511aac7c3268a78cdb861ec739d3e5a2a53b4809faef6b49778eaff"}, - {file = "yarl-1.11.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4179522dc0305c3fc9782549175c8e8849252fefeb077c92a73889ccbcd508ad"}, - {file = "yarl-1.11.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:27fcb271a41b746bd0e2a92182df507e1c204759f460ff784ca614e12dd85145"}, - {file = "yarl-1.11.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f61db3b7e870914dbd9434b560075e0366771eecbe6d2b5561f5bc7485f39efd"}, - {file = "yarl-1.11.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:c92261eb2ad367629dc437536463dc934030c9e7caca861cc51990fe6c565f26"}, - {file = "yarl-1.11.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d95b52fbef190ca87d8c42f49e314eace4fc52070f3dfa5f87a6594b0c1c6e46"}, - {file = "yarl-1.11.1-cp310-cp310-win32.whl", hash = "sha256:489fa8bde4f1244ad6c5f6d11bb33e09cf0d1d0367edb197619c3e3fc06f3d91"}, - {file = "yarl-1.11.1-cp310-cp310-win_amd64.whl", hash = "sha256:476e20c433b356e16e9a141449f25161e6b69984fb4cdbd7cd4bd54c17844998"}, - {file = "yarl-1.11.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:946eedc12895873891aaceb39bceb484b4977f70373e0122da483f6c38faaa68"}, - {file = "yarl-1.11.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:21a7c12321436b066c11ec19c7e3cb9aec18884fe0d5b25d03d756a9e654edfe"}, - {file = "yarl-1.11.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c35f493b867912f6fda721a59cc7c4766d382040bdf1ddaeeaa7fa4d072f4675"}, - {file = "yarl-1.11.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25861303e0be76b60fddc1250ec5986c42f0a5c0c50ff57cc30b1be199c00e63"}, - {file = "yarl-1.11.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4b53f73077e839b3f89c992223f15b1d2ab314bdbdf502afdc7bb18e95eae27"}, - {file = "yarl-1.11.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:327c724b01b8641a1bf1ab3b232fb638706e50f76c0b5bf16051ab65c868fac5"}, - {file = "yarl-1.11.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4307d9a3417eea87715c9736d050c83e8c1904e9b7aada6ce61b46361b733d92"}, - {file = "yarl-1.11.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a28bed68ab8fb7e380775f0029a079f08a17799cb3387a65d14ace16c12e2b"}, - {file = "yarl-1.11.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:067b961853c8e62725ff2893226fef3d0da060656a9827f3f520fb1d19b2b68a"}, - {file = "yarl-1.11.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8215f6f21394d1f46e222abeb06316e77ef328d628f593502d8fc2a9117bde83"}, - {file = "yarl-1.11.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:498442e3af2a860a663baa14fbf23fb04b0dd758039c0e7c8f91cb9279799bff"}, - {file = "yarl-1.11.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:69721b8effdb588cb055cc22f7c5105ca6fdaa5aeb3ea09021d517882c4a904c"}, - {file = "yarl-1.11.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1e969fa4c1e0b1a391f3fcbcb9ec31e84440253325b534519be0d28f4b6b533e"}, - {file = "yarl-1.11.1-cp311-cp311-win32.whl", hash = "sha256:7d51324a04fc4b0e097ff8a153e9276c2593106a811704025bbc1d6916f45ca6"}, - {file = "yarl-1.11.1-cp311-cp311-win_amd64.whl", hash = "sha256:15061ce6584ece023457fb8b7a7a69ec40bf7114d781a8c4f5dcd68e28b5c53b"}, - {file = "yarl-1.11.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:a4264515f9117be204935cd230fb2a052dd3792789cc94c101c535d349b3dab0"}, - {file = "yarl-1.11.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f41fa79114a1d2eddb5eea7b912d6160508f57440bd302ce96eaa384914cd265"}, - {file = "yarl-1.11.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:02da8759b47d964f9173c8675710720b468aa1c1693be0c9c64abb9d8d9a4867"}, - {file = "yarl-1.11.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9361628f28f48dcf8b2f528420d4d68102f593f9c2e592bfc842f5fb337e44fd"}, - {file = "yarl-1.11.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b91044952da03b6f95fdba398d7993dd983b64d3c31c358a4c89e3c19b6f7aef"}, - {file = "yarl-1.11.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:74db2ef03b442276d25951749a803ddb6e270d02dda1d1c556f6ae595a0d76a8"}, - {file = "yarl-1.11.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e975a2211952a8a083d1b9d9ba26472981ae338e720b419eb50535de3c02870"}, - {file = "yarl-1.11.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8aef97ba1dd2138112890ef848e17d8526fe80b21f743b4ee65947ea184f07a2"}, - {file = "yarl-1.11.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a7915ea49b0c113641dc4d9338efa9bd66b6a9a485ffe75b9907e8573ca94b84"}, - {file = "yarl-1.11.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:504cf0d4c5e4579a51261d6091267f9fd997ef58558c4ffa7a3e1460bd2336fa"}, - {file = "yarl-1.11.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:3de5292f9f0ee285e6bd168b2a77b2a00d74cbcfa420ed078456d3023d2f6dff"}, - {file = "yarl-1.11.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a34e1e30f1774fa35d37202bbeae62423e9a79d78d0874e5556a593479fdf239"}, - {file = "yarl-1.11.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:66b63c504d2ca43bf7221a1f72fbe981ff56ecb39004c70a94485d13e37ebf45"}, - {file = "yarl-1.11.1-cp312-cp312-win32.whl", hash = "sha256:a28b70c9e2213de425d9cba5ab2e7f7a1c8ca23a99c4b5159bf77b9c31251447"}, - {file = "yarl-1.11.1-cp312-cp312-win_amd64.whl", hash = "sha256:17b5a386d0d36fb828e2fb3ef08c8829c1ebf977eef88e5367d1c8c94b454639"}, - {file = "yarl-1.11.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1fa2e7a406fbd45b61b4433e3aa254a2c3e14c4b3186f6e952d08a730807fa0c"}, - {file = "yarl-1.11.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:750f656832d7d3cb0c76be137ee79405cc17e792f31e0a01eee390e383b2936e"}, - {file = "yarl-1.11.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0b8486f322d8f6a38539136a22c55f94d269addb24db5cb6f61adc61eabc9d93"}, - {file = "yarl-1.11.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fce4da3703ee6048ad4138fe74619c50874afe98b1ad87b2698ef95bf92c96d"}, - {file = "yarl-1.11.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ed653638ef669e0efc6fe2acb792275cb419bf9cb5c5049399f3556995f23c7"}, - {file = "yarl-1.11.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18ac56c9dd70941ecad42b5a906820824ca72ff84ad6fa18db33c2537ae2e089"}, - {file = "yarl-1.11.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:688654f8507464745ab563b041d1fb7dab5d9912ca6b06e61d1c4708366832f5"}, - {file = "yarl-1.11.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4973eac1e2ff63cf187073cd4e1f1148dcd119314ab79b88e1b3fad74a18c9d5"}, - {file = "yarl-1.11.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:964a428132227edff96d6f3cf261573cb0f1a60c9a764ce28cda9525f18f7786"}, - {file = "yarl-1.11.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6d23754b9939cbab02c63434776df1170e43b09c6a517585c7ce2b3d449b7318"}, - {file = "yarl-1.11.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c2dc4250fe94d8cd864d66018f8344d4af50e3758e9d725e94fecfa27588ff82"}, - {file = "yarl-1.11.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09696438cb43ea6f9492ef237761b043f9179f455f405279e609f2bc9100212a"}, - {file = "yarl-1.11.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:999bfee0a5b7385a0af5ffb606393509cfde70ecca4f01c36985be6d33e336da"}, - {file = "yarl-1.11.1-cp313-cp313-win32.whl", hash = "sha256:ce928c9c6409c79e10f39604a7e214b3cb69552952fbda8d836c052832e6a979"}, - {file = "yarl-1.11.1-cp313-cp313-win_amd64.whl", hash = "sha256:501c503eed2bb306638ccb60c174f856cc3246c861829ff40eaa80e2f0330367"}, - {file = "yarl-1.11.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:dae7bd0daeb33aa3e79e72877d3d51052e8b19c9025ecf0374f542ea8ec120e4"}, - {file = "yarl-1.11.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3ff6b1617aa39279fe18a76c8d165469c48b159931d9b48239065767ee455b2b"}, - {file = "yarl-1.11.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3257978c870728a52dcce8c2902bf01f6c53b65094b457bf87b2644ee6238ddc"}, - {file = "yarl-1.11.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f351fa31234699d6084ff98283cb1e852270fe9e250a3b3bf7804eb493bd937"}, - {file = "yarl-1.11.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8aef1b64da41d18026632d99a06b3fefe1d08e85dd81d849fa7c96301ed22f1b"}, - {file = "yarl-1.11.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7175a87ab8f7fbde37160a15e58e138ba3b2b0e05492d7351314a250d61b1591"}, - {file = "yarl-1.11.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba444bdd4caa2a94456ef67a2f383710928820dd0117aae6650a4d17029fa25e"}, - {file = "yarl-1.11.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0ea9682124fc062e3d931c6911934a678cb28453f957ddccf51f568c2f2b5e05"}, - {file = "yarl-1.11.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:8418c053aeb236b20b0ab8fa6bacfc2feaaf7d4683dd96528610989c99723d5f"}, - {file = "yarl-1.11.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:61a5f2c14d0a1adfdd82258f756b23a550c13ba4c86c84106be4c111a3a4e413"}, - {file = "yarl-1.11.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f3a6d90cab0bdf07df8f176eae3a07127daafcf7457b997b2bf46776da2c7eb7"}, - {file = "yarl-1.11.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:077da604852be488c9a05a524068cdae1e972b7dc02438161c32420fb4ec5e14"}, - {file = "yarl-1.11.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:15439f3c5c72686b6c3ff235279630d08936ace67d0fe5c8d5bbc3ef06f5a420"}, - {file = "yarl-1.11.1-cp38-cp38-win32.whl", hash = "sha256:238a21849dd7554cb4d25a14ffbfa0ef380bb7ba201f45b144a14454a72ffa5a"}, - {file = "yarl-1.11.1-cp38-cp38-win_amd64.whl", hash = "sha256:67459cf8cf31da0e2cbdb4b040507e535d25cfbb1604ca76396a3a66b8ba37a6"}, - {file = "yarl-1.11.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:884eab2ce97cbaf89f264372eae58388862c33c4f551c15680dd80f53c89a269"}, - {file = "yarl-1.11.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a336eaa7ee7e87cdece3cedb395c9657d227bfceb6781295cf56abcd3386a26"}, - {file = "yarl-1.11.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87f020d010ba80a247c4abc335fc13421037800ca20b42af5ae40e5fd75e7909"}, - {file = "yarl-1.11.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:637c7ddb585a62d4469f843dac221f23eec3cbad31693b23abbc2c366ad41ff4"}, - {file = "yarl-1.11.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:48dfd117ab93f0129084577a07287376cc69c08138694396f305636e229caa1a"}, - {file = "yarl-1.11.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75e0ae31fb5ccab6eda09ba1494e87eb226dcbd2372dae96b87800e1dcc98804"}, - {file = "yarl-1.11.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f46f81501160c28d0c0b7333b4f7be8983dbbc161983b6fb814024d1b4952f79"}, - {file = "yarl-1.11.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:04293941646647b3bfb1719d1d11ff1028e9c30199509a844da3c0f5919dc520"}, - {file = "yarl-1.11.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:250e888fa62d73e721f3041e3a9abf427788a1934b426b45e1b92f62c1f68366"}, - {file = "yarl-1.11.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e8f63904df26d1a66aabc141bfd258bf738b9bc7bc6bdef22713b4f5ef789a4c"}, - {file = "yarl-1.11.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:aac44097d838dda26526cffb63bdd8737a2dbdf5f2c68efb72ad83aec6673c7e"}, - {file = "yarl-1.11.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:267b24f891e74eccbdff42241c5fb4f974de2d6271dcc7d7e0c9ae1079a560d9"}, - {file = "yarl-1.11.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6907daa4b9d7a688063ed098c472f96e8181733c525e03e866fb5db480a424df"}, - {file = "yarl-1.11.1-cp39-cp39-win32.whl", hash = "sha256:14438dfc5015661f75f85bc5adad0743678eefee266ff0c9a8e32969d5d69f74"}, - {file = "yarl-1.11.1-cp39-cp39-win_amd64.whl", hash = "sha256:94d0caaa912bfcdc702a4204cd5e2bb01eb917fc4f5ea2315aa23962549561b0"}, - {file = "yarl-1.11.1-py3-none-any.whl", hash = "sha256:72bf26f66456baa0584eff63e44545c9f0eaed9b73cb6601b647c91f14c11f38"}, - {file = "yarl-1.11.1.tar.gz", hash = "sha256:1bb2d9e212fb7449b8fb73bc461b51eaa17cc8430b4a87d87be7b25052d92f53"}, + {file = "yarl-1.12.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:64c5b0f2b937fe40d0967516eee5504b23cb247b8b7ffeba7213a467d9646fdc"}, + {file = "yarl-1.12.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2e430ac432f969ef21770645743611c1618362309e3ad7cab45acd1ad1a540ff"}, + {file = "yarl-1.12.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3e26e64f42bce5ddf9002092b2c37b13071c2e6413d5c05f9fa9de58ed2f7749"}, + {file = "yarl-1.12.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0103c52f8dfe5d573c856322149ddcd6d28f51b4d4a3ee5c4b3c1b0a05c3d034"}, + {file = "yarl-1.12.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b63465b53baeaf2122a337d4ab57d6bbdd09fcadceb17a974cfa8a0300ad9c67"}, + {file = "yarl-1.12.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17d4dc4ff47893a06737b8788ed2ba2f5ac4e8bb40281c8603920f7d011d5bdd"}, + {file = "yarl-1.12.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8b54949267bd5704324397efe9fbb6aa306466dee067550964e994d309db5f1"}, + {file = "yarl-1.12.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10b690cd78cbaca2f96a7462f303fdd2b596d3978b49892e4b05a7567c591572"}, + {file = "yarl-1.12.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c85ab016e96a975afbdb9d49ca90f3bca9920ef27c64300843fe91c3d59d8d20"}, + {file = "yarl-1.12.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c1caa5763d1770216596e0a71b5567f27aac28c95992110212c108ec74589a48"}, + {file = "yarl-1.12.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:595bbcdbfc4a9c6989d7489dca8510cba053ff46b16c84ffd95ac8e90711d419"}, + {file = "yarl-1.12.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:e64f0421892a207d3780903085c1b04efeb53b16803b23d947de5a7261b71355"}, + {file = "yarl-1.12.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:319c206e83e46ec2421b25b300c8482b6fe8a018baca246be308c736d9dab267"}, + {file = "yarl-1.12.1-cp310-cp310-win32.whl", hash = "sha256:da045bd1147d12bd43fb032296640a7cc17a7f2eaba67495988362e99db24fd2"}, + {file = "yarl-1.12.1-cp310-cp310-win_amd64.whl", hash = "sha256:aebbd47df77190ada603157f0b3670d578c110c31746ecc5875c394fdcc59a99"}, + {file = "yarl-1.12.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:28389a68981676bf74e2e199fe42f35d1aa27a9c98e3a03e6f58d2d3d054afe1"}, + {file = "yarl-1.12.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f736f54565f8dd7e3ab664fef2bc461d7593a389a7f28d4904af8d55a91bd55f"}, + {file = "yarl-1.12.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dee0496d5f1a8f57f0f28a16f81a2033fc057a2cf9cd710742d11828f8c80e2"}, + {file = "yarl-1.12.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8981a94a27ac520a398302afb74ae2c0be1c3d2d215c75c582186a006c9e7b0"}, + {file = "yarl-1.12.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ff54340fc1129e8e181827e2234af3ff659b4f17d9bbe77f43bc19e6577fadec"}, + {file = "yarl-1.12.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:54c8cee662b5f8c30ad7eedfc26123f845f007798e4ff1001d9528fe959fd23c"}, + {file = "yarl-1.12.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e97a29b37830ba1262d8dfd48ddb5b28ad4d3ebecc5d93a9c7591d98641ec737"}, + {file = "yarl-1.12.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c89894cc6f6ddd993813e79244b36b215c14f65f9e4f1660b1f2ba9e5594b95"}, + {file = "yarl-1.12.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:712ba8722c0699daf186de089ddc4677651eb9875ed7447b2ad50697522cbdd9"}, + {file = "yarl-1.12.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6e9a9f50892153bad5046c2a6df153224aa6f0573a5a8ab44fc54a1e886f6e21"}, + {file = "yarl-1.12.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:1d4017e78fb22bc797c089b746230ad78ecd3cdb215bc0bd61cb72b5867da57e"}, + {file = "yarl-1.12.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f494c01b28645c431239863cb17af8b8d15b93b0d697a0320d5dd34cd9d7c2fa"}, + {file = "yarl-1.12.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:de4544b1fb29cf14870c4e2b8a897c0242449f5dcebd3e0366aa0aa3cf58a23a"}, + {file = "yarl-1.12.1-cp311-cp311-win32.whl", hash = "sha256:7564525a4673fde53dee7d4c307a961c0951918f0b8c7f09b2c9e02067cf6504"}, + {file = "yarl-1.12.1-cp311-cp311-win_amd64.whl", hash = "sha256:f23bb1a7a6e8e8b612a164fdd08e683bcc16c76f928d6dbb7bdbee2374fbfee6"}, + {file = "yarl-1.12.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:a3e2aff8b822ab0e0bdbed9f50494b3a35629c4b9488ae391659973a37a9f53f"}, + {file = "yarl-1.12.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:22dda2799c8d39041d731e02bf7690f0ef34f1691d9ac9dfcb98dd1e94c8b058"}, + {file = "yarl-1.12.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:18c2a7757561f05439c243f517dbbb174cadfae3a72dee4ae7c693f5b336570f"}, + {file = "yarl-1.12.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:835010cc17d0020e7931d39e487d72c8e01c98e669b6896a8b8c9aa8ca69a949"}, + {file = "yarl-1.12.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2254fe137c4a360b0a13173a56444f756252c9283ba4d267ca8e9081cd140ea"}, + {file = "yarl-1.12.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6a071d2c3d39b4104f94fc08ab349e9b19b951ad4b8e3b6d7ea92d6ef7ccaf8"}, + {file = "yarl-1.12.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73a183042ae0918c82ce2df38c3db2409b0eeae88e3afdfc80fb67471a95b33b"}, + {file = "yarl-1.12.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:326b8a079a9afcac0575971e56dabdf7abb2ea89a893e6949b77adfeb058b50e"}, + {file = "yarl-1.12.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:126309c0f52a2219b3d1048aca00766429a1346596b186d51d9fa5d2070b7b13"}, + {file = "yarl-1.12.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ba1c779b45a399cc25f511c681016626f69e51e45b9d350d7581998722825af9"}, + {file = "yarl-1.12.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:af1107299cef049ad00a93df4809517be432283a0847bcae48343ebe5ea340dc"}, + {file = "yarl-1.12.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:20d817c0893191b2ab0ba30b45b77761e8dfec30a029b7c7063055ca71157f84"}, + {file = "yarl-1.12.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d4f818f6371970d6a5d1e42878389bbfb69dcde631e4bbac5ec1cb11158565ca"}, + {file = "yarl-1.12.1-cp312-cp312-win32.whl", hash = "sha256:0ac33d22b2604b020569a82d5f8a03ba637ba42cc1adf31f616af70baf81710b"}, + {file = "yarl-1.12.1-cp312-cp312-win_amd64.whl", hash = "sha256:fd24996e12e1ba7c397c44be75ca299da14cde34d74bc5508cce233676cc68d0"}, + {file = "yarl-1.12.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dea360778e0668a7ad25d7727d03364de8a45bfd5d808f81253516b9f2217765"}, + {file = "yarl-1.12.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1f50a37aeeb5179d293465e522fd686080928c4d89e0ff215e1f963405ec4def"}, + {file = "yarl-1.12.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0274b1b7a9c9c32b7bf250583e673ff99fb9fccb389215841e2652d9982de740"}, + {file = "yarl-1.12.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4f3ab9eb8ab2d585ece959c48d234f7b39ac0ca1954a34d8b8e58a52064bdb3"}, + {file = "yarl-1.12.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8d31dd0245d88cf7239e96e8f2a99f815b06e458a5854150f8e6f0e61618d41b"}, + {file = "yarl-1.12.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a96198d5d26f40557d986c1253bfe0e02d18c9d9b93cf389daf1a3c9f7c755fa"}, + {file = "yarl-1.12.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddae504cfb556fe220efae65e35be63cd11e3c314b202723fc2119ce19f0ca2e"}, + {file = "yarl-1.12.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bce00f3b1f7f644faae89677ca68645ed5365f1c7f874fdd5ebf730a69640d38"}, + {file = "yarl-1.12.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:eee5ff934b0c9f4537ff9596169d56cab1890918004791a7a06b879b3ba2a7ef"}, + {file = "yarl-1.12.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4ea99e64b2ad2635e0f0597b63f5ea6c374791ff2fa81cdd4bad8ed9f047f56f"}, + {file = "yarl-1.12.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:5c667b383529520b8dd6bd496fc318678320cb2a6062fdfe6d3618da6b8790f6"}, + {file = "yarl-1.12.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d920401941cb898ef089422e889759dd403309eb370d0e54f1bdf6ca07fef603"}, + {file = "yarl-1.12.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:501a1576716032cc6d48c7c47bcdc42d682273415a8f2908e7e72cb4625801f3"}, + {file = "yarl-1.12.1-cp313-cp313-win32.whl", hash = "sha256:24416bb5e221e29ddf8aac5b97e94e635ca2c5be44a1617ad6fe32556df44294"}, + {file = "yarl-1.12.1-cp313-cp313-win_amd64.whl", hash = "sha256:71af3766bb46738d12cc288d9b8de7ef6f79c31fd62757e2b8a505fe3680b27f"}, + {file = "yarl-1.12.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c924deab8105f86980983eced740433fb7554a7f66db73991affa4eda99d5402"}, + {file = "yarl-1.12.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5fb475a4cdde582c9528bb412b98f899680492daaba318231e96f1a0a1bb0d53"}, + {file = "yarl-1.12.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:36ee0115b9edca904153a66bb74a9ff1ce38caff015de94eadfb9ba8e6ecd317"}, + {file = "yarl-1.12.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2631c9d7386bd2d4ce24ecc6ebf9ae90b3efd713d588d90504eaa77fec4dba01"}, + {file = "yarl-1.12.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2376d8cf506dffd0e5f2391025ae8675b09711016656590cb03b55894161fcfa"}, + {file = "yarl-1.12.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:24197ba3114cc85ddd4091e19b2ddc62650f2e4a899e51b074dfd52d56cf8c72"}, + {file = "yarl-1.12.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfdf419bf5d3644f94cd7052954fc233522f5a1b371fc0b00219ebd9c14d5798"}, + {file = "yarl-1.12.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8112f640a4f7e7bf59f7cabf0d47a29b8977528c521d73a64d5cc9e99e48a174"}, + {file = "yarl-1.12.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:607d12f0901f6419a8adceb139847c42c83864b85371f58270e42753f9780fa6"}, + {file = "yarl-1.12.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:664380c7ed524a280b6a2d5d9126389c3e96cd6e88986cdb42ca72baa27421d6"}, + {file = "yarl-1.12.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:0d0a5e87bc48d76dfcfc16295201e9812d5f33d55b4a0b7cad1025b92bf8b91b"}, + {file = "yarl-1.12.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:eff6bac402719c14e17efe845d6b98593c56c843aca6def72080fbede755fd1f"}, + {file = "yarl-1.12.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:22839d1d1eab9e4b427828a88a22beb86f67c14d8ff81175505f1cc8493f3500"}, + {file = "yarl-1.12.1-cp38-cp38-win32.whl", hash = "sha256:717f185086bb9d817d4537dd18d5df5d657598cd00e6fc22e4d54d84de266c1d"}, + {file = "yarl-1.12.1-cp38-cp38-win_amd64.whl", hash = "sha256:71978ba778948760cff528235c951ea0ef7a4f9c84ac5a49975f8540f76c3f73"}, + {file = "yarl-1.12.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:30ffc046ebddccb3c4cac72c1a3e1bc343492336f3ca86d24672e90ccc5e788a"}, + {file = "yarl-1.12.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f10954b233d4df5cc3137ffa5ced97f8894152df817e5d149bf05a0ef2ab8134"}, + {file = "yarl-1.12.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2e912b282466444023610e4498e3795c10e7cfd641744524876239fcf01d538d"}, + {file = "yarl-1.12.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6af871f70cfd5b528bd322c65793b5fd5659858cdfaa35fbe563fb99b667ed1f"}, + {file = "yarl-1.12.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3e4e1f7b08d1ec6b685ccd3e2d762219c550164fbf524498532e39f9413436e"}, + {file = "yarl-1.12.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9a7ee79183f0b17dcede8b6723e7da2ded529cf159a878214be9a5d3098f5b1e"}, + {file = "yarl-1.12.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96c8ff1e1dd680e38af0887927cab407a4e51d84a5f02ae3d6eb87233036c763"}, + {file = "yarl-1.12.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e9905fc2dc1319e4c39837b906a024cf71b1261cc66b0cd89678f779c0c61f5"}, + {file = "yarl-1.12.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:01549468858b87d36f967c97d02e6e54106f444aeb947ed76f8f71f85ed07cec"}, + {file = "yarl-1.12.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:96b34830bd6825ca0220bf005ea99ac83eb9ce51301ddb882dcf613ae6cd95fb"}, + {file = "yarl-1.12.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:2aee7594d2c2221c717a8e394bbed4740029df4c0211ceb0f04815686e99c795"}, + {file = "yarl-1.12.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:15871130439ad10abb25a4631120d60391aa762b85fcab971411e556247210a0"}, + {file = "yarl-1.12.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:838dde2cb570cfbb4cab8a876a0974e8b90973ea40b3ac27a79b8a74c8a2db15"}, + {file = "yarl-1.12.1-cp39-cp39-win32.whl", hash = "sha256:eacbcf30efaca7dc5cb264228ffecdb95fdb1e715b1ec937c0ce6b734161e0c8"}, + {file = "yarl-1.12.1-cp39-cp39-win_amd64.whl", hash = "sha256:76a59d1b63de859398bc7764c860a769499511463c1232155061fe0147f13e01"}, + {file = "yarl-1.12.1-py3-none-any.whl", hash = "sha256:dc3192a81ecd5ff954cecd690327badd5a84d00b877e1573f7c9097ce13e5bfb"}, + {file = "yarl-1.12.1.tar.gz", hash = "sha256:5b860055199aec8d6fe4dcee3c5196ce506ca198a50aab0059ffd26e8e815828"}, ] [package.dependencies] @@ -3214,4 +3252,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4.0" -content-hash = "93273883329bcbbe2ef055cfc730dbcf6c71f66c975d1a6ee7d4920172440433" +content-hash = "4017dc40f5e58304aff484656ff103fc1fa0072af0c915f2aa2e598be3c7e983" diff --git a/pyproject.toml b/pyproject.toml index 26a7a2850215f..1afa0437c5010 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,7 +24,6 @@ langchain-core = { path = "libs/core/", develop = true } langchain-text-splitters = { path = "libs/text-splitters", develop = true } langchain-community = { path = "libs/community/", develop = true } langchain = { path = "libs/langchain/", develop = true } -langchain-experimental = { path = "libs/experimental/", develop = true } langchain-openai = { path = "libs/partners/openai", develop = true } [tool.poetry.group.codespell.dependencies] @@ -36,7 +35,6 @@ langchain-core = { path = "libs/core/", develop = true } langchain-text-splitters = { path = "libs/text-splitters", develop = true } langchain-community = { path = "libs/community/", develop = true } langchain = { path = "libs/langchain/", develop = true } -langchain-experimental = { path = "libs/experimental/", develop = true } langchain-openai = { path = "libs/partners/openai", develop = true } ipykernel = "^6.29.2"