diff --git a/CHANGELOG.md b/CHANGELOG.md index 53bb5f470b276..b96976e57c819 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,44 @@ # ChangeLog +## [2024-12-18] + +### `llama-index-core` [0.12.7] + +- fix: add a timeout to langchain callback handler (#17296) +- fix: make Document serialization event more backward compatible (#17312) + +### `llama-index-embeddings-voyageai` [0.3.4] + +- Exposing additional keyword arguments for VoyageAI's embedding model (#17315) + +### `llama-index-llms-keywordsai` [0.1.0] + +- Added KeywordsAI LLM (#16860) + +### `llama-index-llms-oci-genai` [0.4.0] + +- Add OCI Generative AI tool calling support (#16888) + +### `llama-index-llms-openai` [0.3.11] + +- support new o1 models (#17307) + +### `llama-index-postprocessor-voyageai-rerank` [0.3.1] + +- VoyageAI Reranker optional API Key (#17310) + +### `llama-index-vector-stores-azureaisearch` [0.3.1] + +- improve async search client handling (#17319) + +### `llama-index-vector-stores-azurecosmosmongo` [0.4.0] + +- CosmosDB insertion timestamp bugfix (#17290) + +### `llama-index-vector-stores-azurecosmosnosql` [1.3.0] + +- CosmosDB insertion timestamp bugfix (#17290) + ## [2024-12-17] ### `llama-index-core` [0.12.6] diff --git a/docs/docs/CHANGELOG.md b/docs/docs/CHANGELOG.md index 792f06c8a6367..b96976e57c819 100644 --- a/docs/docs/CHANGELOG.md +++ b/docs/docs/CHANGELOG.md @@ -1,10 +1,49 @@ # ChangeLog +## [2024-12-18] + +### `llama-index-core` [0.12.7] + +- fix: add a timeout to langchain callback handler (#17296) +- fix: make Document serialization event more backward compatible (#17312) + +### `llama-index-embeddings-voyageai` [0.3.4] + +- Exposing additional keyword arguments for VoyageAI's embedding model (#17315) + +### `llama-index-llms-keywordsai` [0.1.0] + +- Added KeywordsAI LLM (#16860) + +### `llama-index-llms-oci-genai` [0.4.0] + +- Add OCI Generative AI tool calling support (#16888) + +### `llama-index-llms-openai` [0.3.11] + +- support new o1 models (#17307) + +### `llama-index-postprocessor-voyageai-rerank` [0.3.1] + +- VoyageAI Reranker optional API Key (#17310) + +### `llama-index-vector-stores-azureaisearch` [0.3.1] + +- improve async search client handling (#17319) + +### `llama-index-vector-stores-azurecosmosmongo` [0.4.0] + +- CosmosDB insertion timestamp bugfix (#17290) + +### `llama-index-vector-stores-azurecosmosnosql` [1.3.0] + +- CosmosDB insertion timestamp bugfix (#17290) + ## [2024-12-17] ### `llama-index-core` [0.12.6] -- [bug fix] Ensure that StopEvent gets cleared from Context._in_progress["_done"] after a Workflow run (#17300) +- [bug fix] Ensure that StopEvent gets cleared from Context.\_in_progress["_done"] after a Workflow run (#17300) - fix: add a timeout to langchain callback handler (#17296) - tweak User vs tool in react prompts (#17273) - refact: Refactor Document to be natively multimodal (#17204) diff --git a/docs/docs/examples/llm/oci_genai.ipynb b/docs/docs/examples/llm/oci_genai.ipynb index 3caeb6ca6d1d0..52129cb5f8b73 100644 --- a/docs/docs/examples/llm/oci_genai.ipynb +++ b/docs/docs/examples/llm/oci_genai.ipynb @@ -1,14 +1,5 @@ { "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "id": "6d1ca9ac", - "metadata": {}, - "source": [ - "\"Open" - ] - }, { "cell_type": "markdown", "id": "9e3a8796-edc8-43f2-94ad-fe4fb20d70ed", @@ -360,6 +351,56 @@ "resp = llm.chat(messages)\n", "print(resp)" ] + }, + { + "cell_type": "markdown", + "id": "acd73b3d", + "metadata": {}, + "source": [ + "## Basic tool calling in llamaindex \n", + "\n", + "Only Cohere supports tool calling for now" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5546c661", + "metadata": {}, + "outputs": [], + "source": [ + "from llama_index.llms.oci_genai import OCIGenAI\n", + "from llama_index.core.tools import FunctionTool\n", + "\n", + "llm = OCIGenAI(\n", + " model=\"MY_MODEL\",\n", + " service_endpoint=\"https://inference.generativeai.us-chicago-1.oci.oraclecloud.com\",\n", + " compartment_id=\"MY_OCID\",\n", + ")\n", + "\n", + "\n", + "def multiply(a: int, b: int) -> int:\n", + " \"\"\"Multiple two integers and returns the result integer\"\"\"\n", + " return a * b\n", + "\n", + "\n", + "def add(a: int, b: int) -> int:\n", + " \"\"\"Addition function on two integers.\"\"\"\n", + " return a + b\n", + "\n", + "\n", + "add_tool = FunctionTool.from_defaults(fn=add)\n", + "multiply_tool = FunctionTool.from_defaults(fn=multiply)\n", + "\n", + "response = llm.chat_with_tools(\n", + " tools=[add_tool, multiply_tool],\n", + " user_msg=\"What is 3 * 12? Also, what is 11 + 49?\",\n", + ")\n", + "\n", + "print(response)\n", + "tool_calls = response.message.additional_kwargs.get(\"tool_calls\", [])\n", + "print(tool_calls)" + ] } ], "metadata": { diff --git a/docs/docs/examples/multi_modal/multimodal_rag_guardrail_gemini_llmguard_llmguard.ipynb b/docs/docs/examples/multi_modal/multimodal_rag_guardrail_gemini_llmguard_llmguard.ipynb new file mode 100644 index 0000000000000..0e94b01844873 --- /dev/null +++ b/docs/docs/examples/multi_modal/multimodal_rag_guardrail_gemini_llmguard_llmguard.ipynb @@ -0,0 +1,1202 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "93ae9bad-b8cc-43de-ba7d-387e0155674c", + "metadata": {}, + "source": [ + "## Multimodal RAG Pipeline with Guardrails Provided by LLM-GUARD\n", + "\n", + "This guide introduces a robust **Multimodal Retrieval-Augmented Generation (RAG)** pipeline enhanced with integrated **guardrails** **LLM GUARD** for secure, reliable, and contextually accurate responses. The pipeline processes multimodal inputs such as text, tables, images, and diagrams while employing guardrails to monitor and validate both input and output.\n", + "For detail information discover my README.md at: https://github.com/vntuananhbui/MultimodalRAG-LlamaIndex-Guardrail/blob/main/README.md\n", + "\n", + "### Note:\n", + "This pipeline leverages the **Gemini 1.5 Flash** model through a free API for inference, making it accessible and cost-effective for development and experimentation.\n", + "\n", + "### Extension:\n", + "You can also use other framework of Guardrails such as **Guardrail AI, etc...** also.\n", + "\n", + "---\n", + "\n", + "## Overview of the Pipeline\n", + "\n", + "The Multimodal RAG pipeline is designed to overcome the limitations of traditional text-based RAG systems by natively handling diverse document layouts and modalities. It leverages both text and image embeddings to retrieve and synthesize context-aware answers.\n", + "\n", + "### Key Features:\n", + "1. **Multimodal Input Processing**:\n", + " - Handles text, images, and complex layouts directly.\n", + " - Converts document content into robust embeddings for retrieval.\n", + "\n", + "2. **Guardrails Integration**:\n", + " - Adds input/output scanners to enforce safety and quality.\n", + " - Dynamically validates queries and responses for risks such as toxicity or token overflow.\n", + "\n", + "3. **Custom Query Engine**:\n", + " - Designed to incorporate guardrails into query handling.\n", + " - Dynamically blocks, sanitizes, or validates inputs/outputs based on scanner results.\n", + "\n", + "4. **Cost-Effective Implementation**:\n", + " - Uses **Gemini 1.5 Flash** via a free API, minimizing costs while maintaining high performance.\n", + "\n", + "---\n", + "\n", + "## Why Add Guardrails to Multimodal RAG?\n", + "\n", + "While Multimodal RAG pipelines are powerful, they are prone to risks such as inappropriate inputs, hallucinated outputs, or exceeding token limits. Guardrails act as safeguards, ensuring:\n", + "- **Safety**: Prevents harmful or offensive queries and outputs.\n", + "- **Reliability**: Validates the integrity of responses.\n", + "- **Scalability**: Enables the pipeline to handle complex scenarios dynamically.\n", + "\n", + "---\n", + "\n", + "## Architecture Overview\n", + "\n", + "### 1. Input Scanners\n", + "Input scanners validate incoming queries before they are processed. For example:\n", + "- **Toxicity Scanner**: Detects and blocks harmful language.\n", + "- **Token Limit Scanner**: Ensures queries do not exceed processing limits.\n", + "\n", + "### 2. Custom Query Engine\n", + "The query engine integrates retrieval and synthesis while applying guardrails at multiple stages:\n", + "- **Pre-processing**: Validates input queries using scanners.\n", + "- **Processing**: Retrieves relevant nodes using multimodal embeddings.\n", + "- **Post-processing**: Sanitizes and validates outputs.\n", + "\n", + "### 3. Multimodal LLM\n", + "The pipeline uses a **multimodal LLM (e.g., Gemini 1.5 Flash)** capable of understanding and generating context-aware text and image-based outputs. Its free API access makes it suitable for development without incurring significant costs.\n", + "\n", + "---\n", + "\n", + "## Guardrails Workflow\n", + "\n", + "### Input Validation\n", + "1. Scans incoming queries using pre-defined scanners.\n", + "2. Blocks or sanitizes queries based on scanner results.\n", + "\n", + "### Retrieval\n", + "1. Fetches relevant text and image nodes.\n", + "2. Converts content into embeddings for synthesis.\n", + "\n", + "### Output Validation\n", + "1. Analyzes generated responses with output scanners.\n", + "2. Blocks or sanitizes outputs based on thresholds (e.g., toxicity).\n", + "\n", + "---\n", + "\n", + "## Benefits of the Multimodal RAG with Guardrails\n", + "1. **Improved Safety**: Queries and responses are validated to reduce risk.\n", + "2. **Enhanced Robustness**: Multimodal inputs are processed without loss of context.\n", + "3. **Dynamic Control**: Guardrails provide flexibility to handle diverse inputs and outputs.\n", + "4. **Cost-Efficiency**: Selective application of input/output validation optimizes resources, while the free **Gemini 1.5 Flash** API reduces operational expenses.\n", + "\n", + "---\n", + "\n", + "This pipeline demonstrates how a **natively multimodal RAG system** can be augmented with **guardrails** to deliver secure, reliable, and high-quality results in complex document environments while remaining cost-effective through the use of free APIs.\n" + ] + }, + { + "cell_type": "markdown", + "id": "54e8d9a7-5036-4d32-818f-00b2e888521f", + "metadata": {}, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "70ccdd53-e68a-4199-aacb-cfe71ad1ff0b", + "metadata": {}, + "outputs": [], + "source": [ + "import nest_asyncio\n", + "\n", + "nest_asyncio.apply()" + ] + }, + { + "cell_type": "markdown", + "id": "225c5556-a789-4386-a1ee-cce01dbeb6cf", + "metadata": {}, + "source": [ + "### Setup Observability\n" + ] + }, + { + "cell_type": "markdown", + "id": "fbb362db-b1b1-4eea-be1a-b1f78b0779d7", + "metadata": {}, + "source": [ + "### Load Data\n", + "\n", + "Here we load the [Conoco Phillips 2023 investor meeting slide deck](https://static.conocophillips.com/files/2023-conocophillips-aim-presentation.pdf)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8bce3407-a7d2-47e8-9eaf-ab297a94750c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "mkdir: data: File exists\n", + "mkdir: data_images: File exists\n", + "zsh:1: command not found: wget\n" + ] + } + ], + "source": [ + "!mkdir data\n", + "!mkdir data_images\n", + "!wget \"https://static.conocophillips.com/files/2023-conocophillips-aim-presentation.pdf\" -O data/conocophillips.pdf" + ] + }, + { + "cell_type": "markdown", + "id": "626d7063", + "metadata": {}, + "source": [ + "### Install Dependency\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e6b7b54a", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install llama-index\n", + "!pip install llama-parse\n", + "!pip install llama-index-llms-langchain\n", + "!pip install llama-index-embeddings-huggingface\n", + "!pip install llama-index-llms-gemini\n", + "!pip install llama-index-multi-modal-llms-gemini\n", + "!pip install litellm\n", + "!pip install llm-guard" + ] + }, + { + "cell_type": "markdown", + "id": "246ba6b0-51af-42f9-b1b2-8d3e721ef782", + "metadata": {}, + "source": [ + "### Model Setup\n", + "\n", + "Setup models that will be used for downstream orchestration." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16e2071d-bbc2-4707-8ae7-cb4e1fecafd3", + "metadata": {}, + "outputs": [], + "source": [ + "from llama_index.core import Settings\n", + "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n", + "from llama_index.llms.gemini import Gemini\n", + "from llama_index.multi_modal_llms.gemini import GeminiMultiModal\n", + "import os\n", + "\n", + "LlamaCloud_API_KEY = \"\"\n", + "MultiGeminiKey = \"\"\n", + "GOOGLE_API_KEY = \"\"\n", + "os.environ[\"GOOGLE_API_KEY\"] = GOOGLE_API_KEY\n", + "os.environ[\"GEMINI_API_KEY\"] = GOOGLE_API_KEY\n", + "embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n", + "gemini_multimodal = GeminiMultiModal(\n", + " model_name=\"models/gemini-1.5-flash\", api_key=MultiGeminiKey\n", + ")\n", + "api_key = GOOGLE_API_KEY\n", + "llamaAPI_KEY = LlamaCloud_API_KEY\n", + "llm = Gemini(model=\"models/gemini-1.5-flash\", api_key=api_key)\n", + "Settings.llm = llm" + ] + }, + { + "cell_type": "markdown", + "id": "e3f6416f-f580-4722-aaa9-7f3500408547", + "metadata": {}, + "source": [ + "## Use LlamaParse to Parse Text and Images\n", + "\n", + "In this example, use LlamaParse to parse both the text and images from the document.\n", + "\n", + "We parse out the text in two ways: \n", + "- in regular `text` mode using our default text layout algorithm\n", + "- in `markdown` mode using GPT-4o (`gpt4o_mode=True`). This also allows us to capture page screenshots" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "570089e5-238a-4dcc-af65-96e7393c2b4d", + "metadata": {}, + "outputs": [], + "source": [ + "from llama_parse import LlamaParse\n", + "\n", + "\n", + "parser_text = LlamaParse(result_type=\"text\", api_key=llamaAPI_KEY)\n", + "parser_gpt4o = LlamaParse(\n", + " result_type=\"markdown\", gpt4o_mode=True, api_key=llamaAPI_KEY\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ef82a985-4088-4bb7-9a21-0318e1b9207d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Parsing text...\n", + "Started parsing the file under job_id e79a470b-e8d3-4f55-a048-d1b3d81b6d1e\n", + "Parsing PDF file...\n", + "Started parsing the file under job_id 84943607-b630-45bd-bf89-8470840e73b5\n" + ] + } + ], + "source": [ + "print(f\"Parsing text...\")\n", + "docs_text = parser_text.load_data(\"data/conocophillips.pdf\")\n", + "print(f\"Parsing PDF file...\")\n", + "md_json_objs = parser_gpt4o.get_json_result(\"data/conocophillips.pdf\")\n", + "md_json_list = md_json_objs[0][\"pages\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5318fb7b-fe6a-4a8a-b82e-4ed7b4512c37", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# Commitment to Disciplined Reinvestment Rate\n", + "\n", + "| Period | Description | Reinvestment Rate | WTI Average |\n", + "|--------------|------------------------------------|-------------------|-------------|\n", + "| 2012-2016 | Industry Growth Focus | >100% | ~$75/BBL |\n", + "| 2017-2022 | ConocoPhillips Strategy Reset | <60% | ~$63/BBL |\n", + "| 2023E | | | at $80/BBL |\n", + "| 2024-2028 | Disciplined Reinvestment Rate | ~50% | at $60/BBL |\n", + "| 2029-2032 | | ~6% CFO CAGR | at $60/BBL |\n", + "\n", + "- **Historic Reinvestment Rate**: Shown in gray.\n", + "- **Reinvestment Rate at $60/BBL WTI**: Shown in blue.\n", + "- **Reinvestment Rate at $80/BBL WTI**: Shown with dashed lines.\n", + "\n", + "**Note**: Reinvestment rate and cash from operations (CFO) are non-GAAP measures. Definitions and reconciliations are included in the Appendix.\n" + ] + } + ], + "source": [ + "print(md_json_list[10][\"md\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eeadb16c-97eb-4622-9551-b34d7f90d72f", + "metadata": {}, + "outputs": [], + "source": [ + "image_dicts = parser_gpt4o.get_images(\n", + " md_json_objs, download_path=\"data_images\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "fd3e098b-0606-4429-b48d-d4fe0140fc0e", + "metadata": {}, + "source": [ + "## Build Multimodal Index\n", + "\n", + "In this section we build the multimodal index over the parsed deck. \n", + "\n", + "We do this by creating **text** nodes from the document that contain metadata referencing the original image path.\n", + "\n", + "In this example we're indexing the text node for retrieval. The text node has a reference to both the parsed text as well as the image screenshot." + ] + }, + { + "cell_type": "markdown", + "id": "3aae2dee-9d85-4604-8a51-705d4db527f7", + "metadata": {}, + "source": [ + "#### Get Text Nodes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18c24174-05ce-417f-8dd2-79c3f375db03", + "metadata": {}, + "outputs": [], + "source": [ + "from llama_index.core.schema import TextNode\n", + "from typing import Optional" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8e331dfe-a627-4e23-8c57-70ab1d9342e4", + "metadata": {}, + "outputs": [], + "source": [ + "# get pages loaded through llamaparse\n", + "import re\n", + "\n", + "\n", + "def get_page_number(file_name):\n", + " match = re.search(r\"-page-(\\d+)\\.jpg$\", str(file_name))\n", + " if match:\n", + " return int(match.group(1))\n", + " return 0\n", + "\n", + "\n", + "def _get_sorted_image_files(image_dir):\n", + " \"\"\"Get image files sorted by page.\"\"\"\n", + " raw_files = [f for f in list(Path(image_dir).iterdir()) if f.is_file()]\n", + " sorted_files = sorted(raw_files, key=get_page_number)\n", + " return sorted_files" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "346fe5ef-171e-4a54-9084-7a7805103a13", + "metadata": {}, + "outputs": [], + "source": [ + "# Assuming TextNode class is defined somewhere else in your code\n", + "# Attach image metadata to the text nodes\n", + "def get_text_nodes(docs, image_dir=None, json_dicts=None):\n", + " \"\"\"Split docs into nodes, by separator.\"\"\"\n", + " nodes = []\n", + "\n", + " # Get image files (if provided)\n", + " image_files = (\n", + " _get_sorted_image_files(image_dir) if image_dir is not None else None\n", + " )\n", + "\n", + " # Get markdown texts (if provided)\n", + " md_texts = (\n", + " [d[\"md\"] for d in json_dicts] if json_dicts is not None else None\n", + " )\n", + "\n", + " # Split docs into chunks by separator\n", + " doc_chunks = [c for d in docs for c in d.text.split(\"---\")]\n", + "\n", + " # Handle both single-page and multi-page cases\n", + " for idx, doc_chunk in enumerate(doc_chunks):\n", + " chunk_metadata = {\"page_num\": idx + 1}\n", + "\n", + " # Check if there are image files and handle the single-page case\n", + " if image_files is not None:\n", + " # Use the first image file if there's only one\n", + " image_file = (\n", + " image_files[idx] if idx < len(image_files) else image_files[0]\n", + " )\n", + " chunk_metadata[\"image_path\"] = str(image_file)\n", + "\n", + " # Check if there are markdown texts and handle the single-page case\n", + " if md_texts is not None:\n", + " # Use the first markdown text if there's only one\n", + " parsed_text_md = (\n", + " md_texts[idx] if idx < len(md_texts) else md_texts[0]\n", + " )\n", + " chunk_metadata[\"parsed_text_markdown\"] = parsed_text_md\n", + "\n", + " # Add the chunk text as metadata\n", + " chunk_metadata[\"parsed_text\"] = doc_chunk\n", + "\n", + " # Create the TextNode with the parsed text and metadata\n", + " node = TextNode(\n", + " text=\"\",\n", + " metadata=chunk_metadata,\n", + " )\n", + " nodes.append(node)\n", + "\n", + " return nodes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f591669c-5a8e-491d-9cef-0b754abbf26f", + "metadata": {}, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "\n", + "# this will split into pages\n", + "text_nodes = get_text_nodes(\n", + " docs_text,\n", + " image_dir=\"/Users/macintosh/TA-DOCUMENT/StudyZone/ComputerScience/Artificial Intelligence/Llama_index/llama_index/docs/docs/examples/rag_guardrail/data_images\",\n", + " json_dicts=md_json_list,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "32c13950-c1db-435f-b5b4-89d62b8b7744", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "page_num: 1\n", + "image_path: /Users/macintosh/TA-DOCUMENT/StudyZone/ComputerScience/Artificial Intelligence/Llama_index/llama_index/docs/docs/examples/rag_guardrail/data_images/84943607-b630-45bd-bf89-8470840e73b5-page_51.jpg\n", + "parsed_text_markdown: NO_CONTENT_HERE\n", + "parsed_text: ConocoPhillips\n", + " 2023 Analyst & Investor Meeting\n" + ] + } + ], + "source": [ + "print(text_nodes[0].get_content(metadata_mode=\"all\"))" + ] + }, + { + "cell_type": "markdown", + "id": "4f404f56-db1e-4ed7-9ba1-ead763546348", + "metadata": {}, + "source": [ + "#### Build Index\n", + "\n", + "Once the text nodes are ready, we feed into our vector store index abstraction, which will index these nodes into a simple in-memory vector store (of course, you should definitely check out our 40+ vector store integrations!)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6ea53c31-0e38-421c-8d9b-0e3adaa1677e", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from llama_index.core import (\n", + " StorageContext,\n", + " VectorStoreIndex,\n", + " load_index_from_storage,\n", + ")\n", + "\n", + "if not os.path.exists(\"storage_nodes\"):\n", + " index = VectorStoreIndex(text_nodes, embed_model=embed_model)\n", + " # save index to disk\n", + " index.set_index_id(\"vector_index\")\n", + " index.storage_context.persist(\"./storage_nodes\")\n", + "else:\n", + " # rebuild storage context\n", + " storage_context = StorageContext.from_defaults(persist_dir=\"storage_nodes\")\n", + " # load index\n", + " index = load_index_from_storage(storage_context, index_id=\"vector_index\")\n", + "\n", + "retriever = index.as_retriever()" + ] + }, + { + "cell_type": "markdown", + "id": "576a536f", + "metadata": {}, + "source": [ + "## Build Guardrail\n" + ] + }, + { + "cell_type": "markdown", + "id": "d2a05efd", + "metadata": {}, + "source": [ + "Define the global rail output formal for guardrail" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "89dc0ffc", + "metadata": {}, + "outputs": [], + "source": [ + "def result_response(\n", + " guardrail_type,\n", + " activated,\n", + " guard_output,\n", + " is_valid,\n", + " risk_score,\n", + " threshold,\n", + " response_text,\n", + "):\n", + " \"\"\"\n", + " Standardizes the result format for all guardrail checks.\n", + " \"\"\"\n", + " return {\n", + " \"guardrail_type\": guardrail_type,\n", + " \"activated\": activated,\n", + " \"guardrail_detail\": {\n", + " \"guard_output\": guard_output,\n", + " \"is_valid\": is_valid,\n", + " \"risk_score/threshold\": f\"{risk_score}/{threshold}\",\n", + " \"response_text\": response_text,\n", + " },\n", + " }" + ] + }, + { + "cell_type": "markdown", + "id": "0a7ba30e", + "metadata": {}, + "source": [ + "## Adding scanner for Guardrail. \n", + "Here you can visit \"https://llm-guard.com\" for discover the nessessary scanner. Below is 2 example scanners you should follow this format function. \n", + "Here we use Gemini 1.5 Flash for the response of Scanner. Hence, you can choose your own LLMs such as ChatGPT, ..." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6ff28f44", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n", + "To disable this warning, you can either:\n", + "\t- Avoid using `tokenizers` before the fork if possible\n", + "\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n" + ] + } + ], + "source": [ + "from llm_guard.input_scanners import Toxicity\n", + "from llm_guard.input_scanners.toxicity import MatchType\n", + "from litellm import completion\n", + "\n", + "\n", + "def guardrail_toxicLanguage(prompt):\n", + " # Interact with the LLM to generate a response\n", + " print(f\"Prompt: {prompt}\")\n", + "\n", + " # Generate the response using the LLM (Gemini-1.5-flash)\n", + " response = completion(\n", + " model=\"gemini/gemini-1.5-flash\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n", + " {\"role\": \"user\", \"content\": prompt},\n", + " ],\n", + " )\n", + " response_text = response.choices[0].message.content\n", + "\n", + " # Define the threshold and scan for toxicity\n", + " threshold = 0.5\n", + " toxic_scanner = Toxicity(threshold=threshold, match_type=MatchType.FULL)\n", + " sanitized_output, is_valid, risk_score = toxic_scanner.scan(prompt)\n", + "\n", + " return result_response(\n", + " guardrail_type=\"Toxicity\",\n", + " activated=not is_valid,\n", + " guard_output=sanitized_output,\n", + " is_valid=is_valid,\n", + " risk_score=risk_score,\n", + " threshold=threshold,\n", + " response_text=response_text,\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cdcea84e", + "metadata": {}, + "outputs": [], + "source": [ + "from llm_guard.input_scanners import TokenLimit\n", + "from llm_guard import scan_output\n", + "from litellm import completion\n", + "\n", + "\n", + "def guardrail_tokenlimit(prompt):\n", + " threshold = 400\n", + " response = completion(\n", + " model=\"gemini/gemini-1.5-flash\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n", + " {\"role\": \"user\", \"content\": prompt},\n", + " ],\n", + " )\n", + " response_text = response.choices[0].message.content\n", + "\n", + " scanner = TokenLimit(limit=threshold, encoding_name=\"cl100k_base\")\n", + " sanitized_output, is_valid, risk_score = scanner.scan(prompt)\n", + "\n", + " # Use the global rail to format the result\n", + " result = result_response(\n", + " guardrail_type=\"Token limit\",\n", + " activated=not is_valid,\n", + " guard_output=sanitized_output,\n", + " is_valid=is_valid,\n", + " risk_score=risk_score,\n", + " threshold=threshold,\n", + " response_text=response_text,\n", + " )\n", + "\n", + " return result" + ] + }, + { + "cell_type": "markdown", + "id": "ef7e9430", + "metadata": {}, + "source": [ + "### `InputScanner` - `OutputScanner` Function\n", + "\n", + "The `InputScanner` function runs a series of scanners on a given input query and evaluates whether any of them detect a threat. It returns a boolean value indicating whether a threat was detected and a list of results from scanners that returned a positive detection.\n", + "\n", + "#### Parameters:\n", + "- `query` (*str*): The input to be scanned for potential threats.\n", + "- `listOfScanners` (*list*): A list of scanner functions. Each scanner function should accept the query as input and return a dictionary with a key `\"activated\"` (boolean) to indicate whether a threat was detected.\n", + "\n", + "#### Returns:\n", + "- `detected` (*bool*): `True` if any scanner detects a threat, otherwise `False`.\n", + "- `triggered_scanners` (*list*): A list of dictionaries returned by the scanners that detected a threat.\n", + "\n", + "#### Key Steps:\n", + "1. Initialize `detected` to `False` to track if any scanner finds a threat.\n", + "2. Create an empty list `triggered_scanners` to store results from scanners that detect a threat.\n", + "3. Iterate over each scanner in `listOfScanners`:\n", + " - Run the scanner on the `query`.\n", + " - Check if the scanner's result includes `\"activated\": True`.\n", + " - If a threat is detected:\n", + " - Set `detected` to `True`.\n", + " - Append the scanner's result to `triggered_scanners`.\n", + "4. Return the `detected` status and the list of `triggered_scanners`.\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "890da9f9", + "metadata": {}, + "outputs": [], + "source": [ + "def InputScanner(query, listOfScanners):\n", + " \"\"\"\n", + " Runs all scanners on the query and returns:\n", + " - True if any scanner detects a threat.\n", + " - A list of results from scanners that returned True.\n", + " \"\"\"\n", + " detected = False # Track if any scanner detects a threat\n", + " triggered_scanners = [] # Store results from triggered scanners\n", + "\n", + " # Run each scanner on the query\n", + " for scanner in listOfScanners:\n", + " result = scanner(query)\n", + "\n", + " if result[\n", + " \"activated\"\n", + " ]: # Check if the scanner found a threat (activated=True)\n", + " detected = True # Set detected to True if any scanner triggers\n", + " triggered_scanners.append(result) # Track which scanner triggered\n", + "\n", + " return detected, triggered_scanners" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dfde0c48", + "metadata": {}, + "outputs": [], + "source": [ + "def OutputScanner(response, query, context, listOfScanners):\n", + " \"\"\"\n", + " Runs all scanners on the response and returns:\n", + " - True if any scanner detects a threat.\n", + " - A list of results from scanners that returned True.\n", + " \"\"\"\n", + " detected = False # Track if any scanner detects a threat\n", + " triggered_scanners = [] # Store results from triggered scanners\n", + "\n", + " # Run each scanner on the response\n", + " for scanner in listOfScanners:\n", + " # Check if scanner is `evaluate_rag_response` (which needs query & context)\n", + " if scanner.__name__ == \"evaluate_rag_response\":\n", + " result = scanner(\n", + " response, query, context\n", + " ) # Execute with query & context\n", + " else:\n", + " result = scanner(response) # Default scanner execution\n", + "\n", + " # print(f\"Debug Output Scanner Result: {result}\")\n", + "\n", + " if result[\"activated\"]: # Check if the scanner was triggered\n", + " detected = True\n", + " triggered_scanners.append(result) # Track which scanner triggered\n", + "\n", + " return detected, triggered_scanners\n", + "\n", + "\n", + "# Example usage with a query engine response\n", + "# scanners = [detect_and_anonymize_pii]\n", + "# response = query_engine.query(\"Give me account name of Peter Kelly and Role and Credit Card Number\")\n", + "# detected, triggered_scanners = OutputScanner(str(response), scanners)\n", + "# print(triggered_scanners)" + ] + }, + { + "cell_type": "markdown", + "id": "5f0e33a4-9422-498d-87ee-d917bdf74d80", + "metadata": {}, + "source": [ + "## Custom Multimodal Query Engine\n", + "\n", + "This custom query engine extends standard retrieval-based architectures to handle both text and image data, enabling more comprehensive and context-aware responses. It integrates multimodal reasoning and incorporates advanced input and output validation mechanisms for robust query handling.\n", + "\n", + "### Key Features:\n", + "\n", + "1. **Multimodal Support**:\n", + " - Combines text and image data to generate more informed and accurate responses.\n", + "\n", + "2. **Input and Output Validation**:\n", + " - Scans input queries for sensitive or invalid content and blocks them if necessary.\n", + " - Validates and sanitizes generated responses to ensure compliance with predefined rules.\n", + "\n", + "3. **Context-Aware Prompting**:\n", + " - Retrieves relevant data and constructs a context string for the query.\n", + " - Uses this context to guide the response synthesis process.\n", + "\n", + "4. **Metadata and Logging**:\n", + " - Tracks the query process, including any validations or adjustments made, for transparency and debugging.\n", + "\n", + "### How It Works:\n", + "1. Scans the input query to check for violations.\n", + "2. Retrieves relevant text and image data for the query.\n", + "3. Synthesizes a response using both textual and visual context.\n", + "4. Validates the response for appropriateness before returning it.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "35a94be2-e289-41a6-92e4-d3cb428fb0c8", + "metadata": {}, + "outputs": [], + "source": [ + "from llama_index.core.query_engine import (\n", + " CustomQueryEngine,\n", + " SimpleMultiModalQueryEngine,\n", + ")\n", + "from llama_index.core.retrievers import BaseRetriever\n", + "from llama_index.multi_modal_llms.openai import OpenAIMultiModal\n", + "from llama_index.core.schema import ImageNode, NodeWithScore, MetadataMode\n", + "from llama_index.core.prompts import PromptTemplate\n", + "from llama_index.core.base.response.schema import Response\n", + "\n", + "from typing import List, Callable, Optional\n", + "from pydantic import Field\n", + "\n", + "\n", + "QA_PROMPT_TMPL = \"\"\"\\\n", + "---------------------\n", + "{context_str}\n", + "---------------------\n", + "Given the context information and not prior knowledge, answer the query if it is related to the context. \n", + "If the query is not related to the context, respond with:\n", + "\"I'm sorry, but I can't help with that.\"\n", + "\n", + "Query: {query_str}\n", + "Answer: \"\"\"\n", + "\n", + "QA_PROMPT = PromptTemplate(QA_PROMPT_TMPL)\n", + "\n", + "\n", + "class MultimodalQueryEngine(CustomQueryEngine):\n", + " \"\"\"Custom multimodal Query Engine.\n", + "\n", + " Takes in a retriever to retrieve a set of document nodes.\n", + " Also takes in a prompt template and multimodal model.\n", + "\n", + " \"\"\"\n", + "\n", + " qa_prompt: PromptTemplate\n", + " retriever: BaseRetriever\n", + " multi_modal_llm: GeminiMultiModal\n", + " input_scanners: List[Callable[[str], dict]] = Field(default_factory=list)\n", + " output_scanners: List[Callable[[str], dict]] = Field(default_factory=list)\n", + "\n", + " def __init__(\n", + " self, qa_prompt: Optional[PromptTemplate] = None, **kwargs\n", + " ) -> None:\n", + " \"\"\"Initialize.\"\"\"\n", + " super().__init__(qa_prompt=qa_prompt or QA_PROMPT, **kwargs)\n", + "\n", + " def custom_query(self, query_str: str):\n", + " query_metadata = {\n", + " \"input_scanners\": [],\n", + " \"output_scanners\": [],\n", + " \"retrieved_nodes\": [],\n", + " \"response_status\": \"success\",\n", + " }\n", + "\n", + " input_detected, input_triggered = InputScanner(\n", + " query_str, self.input_scanners\n", + " )\n", + " if input_triggered:\n", + " # print(\"Triggered Input Scanners:\", input_triggered)\n", + " # Log triggered input scanners in metadata\n", + " query_metadata[\"input_scanners\"] = input_triggered\n", + " # If input contains sensitive information, block the query\n", + " if input_detected:\n", + " return Response(\n", + " response=\"I'm sorry, but I can't help with that.\",\n", + " source_nodes=[],\n", + " metadata={\n", + " \"guardrail\": \"Input Scanner\",\n", + " \"triggered_scanners\": input_triggered,\n", + " \"response_status\": \"blocked\",\n", + " },\n", + " )\n", + "\n", + " # retrieve text nodes\n", + " nodes = self.retriever.retrieve(query_str)\n", + " # create ImageNode items from text nodes\n", + " image_nodes = [\n", + " NodeWithScore(node=ImageNode(image_path=n.metadata[\"image_path\"]))\n", + " for n in nodes\n", + " ]\n", + "\n", + " # create context string from text nodes, dump into the prompt\n", + " context_str = \"\\n\\n\".join(\n", + " [r.get_content(metadata_mode=MetadataMode.LLM) for r in nodes]\n", + " )\n", + " fmt_prompt = self.qa_prompt.format(\n", + " context_str=context_str, query_str=query_str\n", + " )\n", + "\n", + " # synthesize an answer from formatted text and images\n", + " llm_response = self.multi_modal_llm.complete(\n", + " prompt=fmt_prompt,\n", + " image_documents=[image_node.node for image_node in image_nodes],\n", + " )\n", + "\n", + " # Step 5: Run Output Scanners\n", + " output_detected, output_triggered = OutputScanner(\n", + " str(llm_response),\n", + " str(query_str),\n", + " str(context_str),\n", + " self.output_scanners,\n", + " )\n", + " if output_triggered:\n", + " # print(\"Triggered Output Scanners:\", output_triggered)\n", + " query_metadata[\n", + " \"output_scanners\"\n", + " ] = output_triggered # Store output scanner info\n", + "\n", + " final_response = str(llm_response)\n", + " if output_detected:\n", + " final_response = \"I'm sorry, but I can't help with that.\"\n", + " query_metadata[\"response_status\"] = \"sanitized\"\n", + " # Return the response with detailed metadata\n", + " return Response(\n", + " response=final_response,\n", + " source_nodes=nodes,\n", + " metadata=query_metadata,\n", + " )" + ] + }, + { + "cell_type": "markdown", + "id": "2ccd3974", + "metadata": {}, + "source": [ + "### Input and Output Scanners Configuration\n", + "\n", + "You can put the scanner which you need to guard your RAG\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a44b28eb", + "metadata": {}, + "outputs": [], + "source": [ + "input_scanners = [guardrail_toxicLanguage, guardrail_tokenlimit]\n", + "output_scanners = [guardrail_toxicLanguage]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0890be59-fb12-4bb5-959b-b2d9600f7774", + "metadata": {}, + "outputs": [], + "source": [ + "query_engine = MultimodalQueryEngine(\n", + " retriever=index.as_retriever(similarity_top_k=9),\n", + " multi_modal_llm=gemini_multimodal,\n", + " input_scanners=input_scanners,\n", + " output_scanners=output_scanners,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "2336f98b-c0a1-413a-849d-8a89bacb90b5", + "metadata": {}, + "source": [ + "## Try out Queries\n", + "\n", + "Let's try out queries." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d78e53cf-35cb-4ef8-b03e-1b47ba15ae64", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Prompt: Tell me about the diverse geographies where Conoco Phillips has a production base\n", + "\u001b[2m2024-12-03 17:43:08\u001b[0m [\u001b[32m\u001b[1mdebug \u001b[0m] \u001b[1mInitialized classification model\u001b[0m \u001b[36mdevice\u001b[0m=\u001b[35mdevice(type='mps')\u001b[0m \u001b[36mmodel\u001b[0m=\u001b[35mModel(path='unitary/unbiased-toxic-roberta', subfolder='', revision='36295dd80b422dc49f40052021430dae76241adc', onnx_path='ProtectAI/unbiased-toxic-roberta-onnx', onnx_revision='34480fa958f6657ad835c345808475755b6974a7', onnx_subfolder='', onnx_filename='model.onnx', kwargs={}, pipeline_kwargs={'batch_size': 1, 'device': device(type='mps'), 'padding': 'max_length', 'top_k': None, 'function_to_apply': 'sigmoid', 'return_token_type_ids': False, 'max_length': 512, 'truncation': True}, tokenizer_kwargs={})\u001b[0m\n", + "\u001b[2m2024-12-03 17:43:09\u001b[0m [\u001b[32m\u001b[1mdebug \u001b[0m] \u001b[1mNot toxicity found in the text\u001b[0m \u001b[36mresults\u001b[0m=\u001b[35m[[{'label': 'toxicity', 'score': 0.00041448281263001263}, {'label': 'male', 'score': 0.00018738119979389012}, {'label': 'insult', 'score': 0.00011956175148952752}, {'label': 'female', 'score': 0.00011725842341547832}, {'label': 'psychiatric_or_mental_illness', 'score': 8.512590284226462e-05}, {'label': 'white', 'score': 7.451862620655447e-05}, {'label': 'christian', 'score': 5.6545581173850223e-05}, {'label': 'muslim', 'score': 5.644273551297374e-05}, {'label': 'black', 'score': 3.8606172893196344e-05}, {'label': 'obscene', 'score': 3.222753730369732e-05}, {'label': 'identity_attack', 'score': 3.1757666874909773e-05}, {'label': 'threat', 'score': 2.8462023692554794e-05}, {'label': 'jewish', 'score': 2.7872381906490773e-05}, {'label': 'homosexual_gay_or_lesbian', 'score': 2.5694836949696764e-05}, {'label': 'sexual_explicit', 'score': 1.859129588410724e-05}, {'label': 'severe_toxicity', 'score': 1.0931341876130318e-06}]]\u001b[0m\n", + "\u001b[2m2024-12-03 17:43:15\u001b[0m [\u001b[32m\u001b[1mdebug \u001b[0m] \u001b[1mPrompt fits the maximum tokens\u001b[0m \u001b[36mnum_tokens\u001b[0m=\u001b[35m15\u001b[0m \u001b[36mthreshold\u001b[0m=\u001b[35m400\u001b[0m\n", + "Prompt: ConocoPhillips has a diverse production base across several geographic locations. These include:\n", + "\n", + "* **Alaska:** The company has a significant presence in Alaska's conventional basins, including the Prudhoe Bay area, with a long history of production and existing infrastructure. The Willow project is also located in Alaska.\n", + "* **Lower 48 (United States):** ConocoPhillips operates extensively in the Lower 48 states, focusing on unconventional plays in the Permian Basin (Delaware and Midland Basins), Eagle Ford, and Bakken.\n", + "* **International:** The company has operations in other international locations, including Qatar (LNG), and previously had operations in the UK, Australia, Indonesia, and Canada (though some of these have been divested). They also have a global marketing presence with offices in London, Singapore, Houston, Calgary, Beijing, and Tokyo.\n", + "\u001b[2m2024-12-03 17:43:36\u001b[0m [\u001b[32m\u001b[1mdebug \u001b[0m] \u001b[1mInitialized classification model\u001b[0m \u001b[36mdevice\u001b[0m=\u001b[35mdevice(type='mps')\u001b[0m \u001b[36mmodel\u001b[0m=\u001b[35mModel(path='unitary/unbiased-toxic-roberta', subfolder='', revision='36295dd80b422dc49f40052021430dae76241adc', onnx_path='ProtectAI/unbiased-toxic-roberta-onnx', onnx_revision='34480fa958f6657ad835c345808475755b6974a7', onnx_subfolder='', onnx_filename='model.onnx', kwargs={}, pipeline_kwargs={'batch_size': 1, 'device': device(type='mps'), 'padding': 'max_length', 'top_k': None, 'function_to_apply': 'sigmoid', 'return_token_type_ids': False, 'max_length': 512, 'truncation': True}, tokenizer_kwargs={})\u001b[0m\n", + "\u001b[2m2024-12-03 17:43:37\u001b[0m [\u001b[32m\u001b[1mdebug \u001b[0m] \u001b[1mNot toxicity found in the text\u001b[0m \u001b[36mresults\u001b[0m=\u001b[35m[[{'label': 'toxicity', 'score': 0.0003606641257647425}, {'label': 'male', 'score': 0.000291528704110533}, {'label': 'insult', 'score': 0.00011418585199862719}, {'label': 'psychiatric_or_mental_illness', 'score': 0.00011314846051391214}, {'label': 'female', 'score': 0.00010114537144545466}, {'label': 'white', 'score': 9.688278078101575e-05}, {'label': 'muslim', 'score': 6.954199488973245e-05}, {'label': 'christian', 'score': 5.551999493036419e-05}, {'label': 'black', 'score': 4.1746119677554816e-05}, {'label': 'identity_attack', 'score': 3.3705578971421346e-05}, {'label': 'homosexual_gay_or_lesbian', 'score': 3.157216633553617e-05}, {'label': 'obscene', 'score': 2.798157584038563e-05}, {'label': 'jewish', 'score': 2.618367398099508e-05}, {'label': 'threat', 'score': 2.1199964976403862e-05}, {'label': 'sexual_explicit', 'score': 1.9145050828228705e-05}, {'label': 'severe_toxicity', 'score': 1.1292050885458593e-06}]]\u001b[0m\n" + ] + } + ], + "source": [ + "query = \"Tell me about the diverse geographies where Conoco Phillips has a production base\"\n", + "response = query_engine.query(query)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8b090dec", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ConocoPhillips has a diverse production base across several geographic locations. These include:\n", + "\n", + "* **Alaska:** The company has a significant presence in Alaska's conventional basins, including the Prudhoe Bay area, with a long history of production and existing infrastructure. The Willow project is also located in Alaska.\n", + "* **Lower 48 (United States):** ConocoPhillips operates extensively in the Lower 48 states, focusing on unconventional plays in the Permian Basin (Delaware and Midland Basins), Eagle Ford, and Bakken.\n", + "* **International:** The company has operations in other international locations, including Qatar (LNG), and previously had operations in the UK, Australia, Indonesia, and Canada (though some of these have been divested). They also have a global marketing presence with offices in London, Singapore, Houston, Calgary, Beijing, and Tokyo.\n" + ] + } + ], + "source": [ + "print(str(response))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "528752bd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'input_scanners': [], 'output_scanners': [], 'retrieved_nodes': [], 'response_status': 'success'}\n" + ] + } + ], + "source": [ + "print(str(response.metadata))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d0b2d82d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Prompt: \n", + " If you're looking for random paragraphs, you've come to the right place. When a random word or a random sentence isn't quite enough, the next logical step is to find a random paragraph. We created the Random Paragraph Generator with you in mind. The process is quite simple. Choose the number of random paragraphs you'd like to see and click the button. Your chosen number of paragraphs will instantly appear.\n", + "\n", + "While it may not be obvious to everyone, there are a number of reasons creating random paragraphs can be useful. A few examples of how some people use this generator are listed in the following paragraphs.\n", + "\n", + "Creative Writing\n", + "Generating random paragraphs can be an excellent way for writers to get their creative flow going at the beginning of the day. The writer has no idea what topic the random paragraph will be about when it appears. This forces the writer to use creativity to complete one of three common writing challenges. The writer can use the paragraph as the first one of a short story and build upon it. A second option is to use the random paragraph somewhere in a short story they create. The third option is to have the random paragraph be the ending paragraph in a short story. No matter which of these challenges is undertaken, the writer is forced to use creativity to incorporate the paragraph into their writing.\n", + "\n", + "Tackle Writers' Block\n", + "A random paragraph can also be an excellent way for a writer to tackle writers' block. Writing block can often happen due to being stuck with a current project that the writer is trying to complete. By inserting a completely random paragraph from which to begin, it can take down some of the issues that may have been causing the writers' block in the first place.\n", + "\n", + "Beginning Writing Routine\n", + "Another productive way to use this tool to begin a daily writing routine. One way is to generate a random paragraph with the intention to try to rewrite it while still keeping the original meaning. The purpose here is to just get the writing started so that when the writer goes onto their day's writing projects, words are already flowing from their fingers.\n", + "\n", + "Writing Challenge\n", + "Another writing challenge can be to take the individual sentences in the random paragraph and incorporate a single sentence from that into a new paragraph to create a short story. Unlike the random sentence generator, the sentences from the random paragraph will have some connection to one another so it will be a bit different. You also won't know exactly how many sentences will appear in the random paragraph.\n", + "\n", + "Programmers\n", + "It's not only writers who can benefit from this free online tool. If you're a programmer who's working on a project where blocks of text are needed, this tool can be a great way to get that. It's a good way to test your programming and that the tool being created is working well.\n", + "\n", + "Above are a few examples of how the random paragraph generator can be beneficial. The best way to see if this random paragraph picker will be useful for your intended purposes is to give it a try. Generate a number of paragraphs to see if they are beneficial to your current project.\n", + "\n", + "If you do find this paragraph tool useful, please do us a favor and let us know how you're using it. It's greatly beneficial for us to know the different ways this tool is being used so we can improve it with updates. This is especially true since there are times when the generators we create get used in completely unanticipated ways from when we initially created them. If you have the time, please send us a quick note on what you'd like to see changed or added to make it better in the future.\n", + "\n", + "Frequently Asked Questions\n", + "\n", + "Can I use these random paragraphs for my project?\n", + "\n", + "Yes! All of the random paragraphs in our generator are free to use for your projects.\n", + "\n", + "Does a computer generate these paragraphs?\n", + "\n", + "No! All of the paragraphs in the generator are written by humans, not computers. When first building this generator we thought about using computers to generate the paragraphs, but they weren't very good and many times didn't make any sense at all. We therefore took the time to create paragraphs specifically for this generator to make it the best that we could.\n", + "\n", + "Can I contribute random paragraphs?\n", + "\n", + "Yes. We're always interested in improving this generator and one of the best ways to do that is to add new and interesting paragraphs to the generator. If you'd like to contribute some random paragraphs, please contact us.\n", + "\n", + "How many words are there in a paragraph?\n", + "\n", + "There are usually about 200 words in a paragraph, but this can vary widely. Most paragraphs focus on a single idea that's expressed with an introductory sentence, then followed by two or more supporting sentences about the idea. A short paragraph may not reach even 50 words while long paragraphs can be over 400 words long, but generally speaking they tend to be approximately 200 words in length.\n", + " \n", + "\u001b[2m2024-12-03 17:43:42\u001b[0m [\u001b[32m\u001b[1mdebug \u001b[0m] \u001b[1mInitialized classification model\u001b[0m \u001b[36mdevice\u001b[0m=\u001b[35mdevice(type='mps')\u001b[0m \u001b[36mmodel\u001b[0m=\u001b[35mModel(path='unitary/unbiased-toxic-roberta', subfolder='', revision='36295dd80b422dc49f40052021430dae76241adc', onnx_path='ProtectAI/unbiased-toxic-roberta-onnx', onnx_revision='34480fa958f6657ad835c345808475755b6974a7', onnx_subfolder='', onnx_filename='model.onnx', kwargs={}, pipeline_kwargs={'batch_size': 1, 'device': device(type='mps'), 'padding': 'max_length', 'top_k': None, 'function_to_apply': 'sigmoid', 'return_token_type_ids': False, 'max_length': 512, 'truncation': True}, tokenizer_kwargs={})\u001b[0m\n", + "\u001b[2m2024-12-03 17:43:42\u001b[0m [\u001b[32m\u001b[1mdebug \u001b[0m] \u001b[1mNot toxicity found in the text\u001b[0m \u001b[36mresults\u001b[0m=\u001b[35m[[{'label': 'toxicity', 'score': 0.0011976422974839807}, {'label': 'insult', 'score': 0.00045695927110500634}, {'label': 'male', 'score': 0.00018701529188547283}, {'label': 'psychiatric_or_mental_illness', 'score': 0.00014795312017668039}, {'label': 'white', 'score': 9.39662495511584e-05}, {'label': 'female', 'score': 7.459904009010643e-05}, {'label': 'obscene', 'score': 6.114380084909499e-05}, {'label': 'threat', 'score': 5.259696990833618e-05}, {'label': 'muslim', 'score': 4.745226033264771e-05}, {'label': 'identity_attack', 'score': 3.541662226780318e-05}, {'label': 'black', 'score': 3.5083121474599466e-05}, {'label': 'christian', 'score': 3.272023604949936e-05}, {'label': 'sexual_explicit', 'score': 3.164245936204679e-05}, {'label': 'jewish', 'score': 1.5377421732409857e-05}, {'label': 'homosexual_gay_or_lesbian', 'score': 1.5361225450760685e-05}, {'label': 'severe_toxicity', 'score': 1.3027844261159771e-06}]]\u001b[0m\n", + "\u001b[2m2024-12-03 17:43:46\u001b[0m [\u001b[33m\u001b[1mwarning \u001b[0m] \u001b[1mPrompt is too big. Splitting into chunks\u001b[0m \u001b[36mchunks\u001b[0m=\u001b[35m[\"\\n If you're looking for random paragraphs, you've come to the right place. When a random word or a random sentence isn't quite enough, the next logical step is to find a random paragraph. We created the Random Paragraph Generator with you in mind. The process is quite simple. Choose the number of random paragraphs you'd like to see and click the button. Your chosen number of paragraphs will instantly appear.\\n\\nWhile it may not be obvious to everyone, there are a number of reasons creating random paragraphs can be useful. A few examples of how some people use this generator are listed in the following paragraphs.\\n\\nCreative Writing\\nGenerating random paragraphs can be an excellent way for writers to get their creative flow going at the beginning of the day. The writer has no idea what topic the random paragraph will be about when it appears. This forces the writer to use creativity to complete one of three common writing challenges. The writer can use the paragraph as the first one of a short story and build upon it. A second option is to use the random paragraph somewhere in a short story they create. The third option is to have the random paragraph be the ending paragraph in a short story. No matter which of these challenges is undertaken, the writer is forced to use creativity to incorporate the paragraph into their writing.\\n\\nTackle Writers' Block\\nA random paragraph can also be an excellent way for a writer to tackle writers' block. Writing block can often happen due to being stuck with a current project that the writer is trying to complete. By inserting a completely random paragraph from which to begin, it can take down some of the issues that may have been causing the writers' block in the first place.\\n\\nBeginning Writing Routine\\nAnother productive way to use this tool to begin a daily writing routine. One way is to generate a random paragraph with the intention to try to rewrite it while still keeping the original meaning. The purpose here is to just get the writing started so that when the writer goes onto their day's writing\", \" projects, words are already flowing from their fingers.\\n\\nWriting Challenge\\nAnother writing challenge can be to take the individual sentences in the random paragraph and incorporate a single sentence from that into a new paragraph to create a short story. Unlike the random sentence generator, the sentences from the random paragraph will have some connection to one another so it will be a bit different. You also won't know exactly how many sentences will appear in the random paragraph.\\n\\nProgrammers\\nIt's not only writers who can benefit from this free online tool. If you're a programmer who's working on a project where blocks of text are needed, this tool can be a great way to get that. It's a good way to test your programming and that the tool being created is working well.\\n\\nAbove are a few examples of how the random paragraph generator can be beneficial. The best way to see if this random paragraph picker will be useful for your intended purposes is to give it a try. Generate a number of paragraphs to see if they are beneficial to your current project.\\n\\nIf you do find this paragraph tool useful, please do us a favor and let us know how you're using it. It's greatly beneficial for us to know the different ways this tool is being used so we can improve it with updates. This is especially true since there are times when the generators we create get used in completely unanticipated ways from when we initially created them. If you have the time, please send us a quick note on what you'd like to see changed or added to make it better in the future.\\n\\nFrequently Asked Questions\\n\\nCan I use these random paragraphs for my project?\\n\\nYes! All of the random paragraphs in our generator are free to use for your projects.\\n\\nDoes a computer generate these paragraphs?\\n\\nNo! All of the paragraphs in the generator are written by humans, not computers. When first building this generator we thought about using computers to generate the paragraphs, but they weren't very good and many times didn't make any sense at all\", \". We therefore took the time to create paragraphs specifically for this generator to make it the best that we could.\\n\\nCan I contribute random paragraphs?\\n\\nYes. We're always interested in improving this generator and one of the best ways to do that is to add new and interesting paragraphs to the generator. If you'd like to contribute some random paragraphs, please contact us.\\n\\nHow many words are there in a paragraph?\\n\\nThere are usually about 200 words in a paragraph, but this can vary widely. Most paragraphs focus on a single idea that's expressed with an introductory sentence, then followed by two or more supporting sentences about the idea. A short paragraph may not reach even 50 words while long paragraphs can be over 400 words long, but generally speaking they tend to be approximately 200 words in length.\\n \"]\u001b[0m \u001b[36mnum_tokens\u001b[0m=\u001b[35m961\u001b[0m\n" + ] + } + ], + "source": [ + "query = \"\"\"\n", + " If you're looking for random paragraphs, you've come to the right place. When a random word or a random sentence isn't quite enough, the next logical step is to find a random paragraph. We created the Random Paragraph Generator with you in mind. The process is quite simple. Choose the number of random paragraphs you'd like to see and click the button. Your chosen number of paragraphs will instantly appear.\n", + "\n", + "While it may not be obvious to everyone, there are a number of reasons creating random paragraphs can be useful. A few examples of how some people use this generator are listed in the following paragraphs.\n", + "\n", + "Creative Writing\n", + "Generating random paragraphs can be an excellent way for writers to get their creative flow going at the beginning of the day. The writer has no idea what topic the random paragraph will be about when it appears. This forces the writer to use creativity to complete one of three common writing challenges. The writer can use the paragraph as the first one of a short story and build upon it. A second option is to use the random paragraph somewhere in a short story they create. The third option is to have the random paragraph be the ending paragraph in a short story. No matter which of these challenges is undertaken, the writer is forced to use creativity to incorporate the paragraph into their writing.\n", + "\n", + "Tackle Writers' Block\n", + "A random paragraph can also be an excellent way for a writer to tackle writers' block. Writing block can often happen due to being stuck with a current project that the writer is trying to complete. By inserting a completely random paragraph from which to begin, it can take down some of the issues that may have been causing the writers' block in the first place.\n", + "\n", + "Beginning Writing Routine\n", + "Another productive way to use this tool to begin a daily writing routine. One way is to generate a random paragraph with the intention to try to rewrite it while still keeping the original meaning. The purpose here is to just get the writing started so that when the writer goes onto their day's writing projects, words are already flowing from their fingers.\n", + "\n", + "Writing Challenge\n", + "Another writing challenge can be to take the individual sentences in the random paragraph and incorporate a single sentence from that into a new paragraph to create a short story. Unlike the random sentence generator, the sentences from the random paragraph will have some connection to one another so it will be a bit different. You also won't know exactly how many sentences will appear in the random paragraph.\n", + "\n", + "Programmers\n", + "It's not only writers who can benefit from this free online tool. If you're a programmer who's working on a project where blocks of text are needed, this tool can be a great way to get that. It's a good way to test your programming and that the tool being created is working well.\n", + "\n", + "Above are a few examples of how the random paragraph generator can be beneficial. The best way to see if this random paragraph picker will be useful for your intended purposes is to give it a try. Generate a number of paragraphs to see if they are beneficial to your current project.\n", + "\n", + "If you do find this paragraph tool useful, please do us a favor and let us know how you're using it. It's greatly beneficial for us to know the different ways this tool is being used so we can improve it with updates. This is especially true since there are times when the generators we create get used in completely unanticipated ways from when we initially created them. If you have the time, please send us a quick note on what you'd like to see changed or added to make it better in the future.\n", + "\n", + "Frequently Asked Questions\n", + "\n", + "Can I use these random paragraphs for my project?\n", + "\n", + "Yes! All of the random paragraphs in our generator are free to use for your projects.\n", + "\n", + "Does a computer generate these paragraphs?\n", + "\n", + "No! All of the paragraphs in the generator are written by humans, not computers. When first building this generator we thought about using computers to generate the paragraphs, but they weren't very good and many times didn't make any sense at all. We therefore took the time to create paragraphs specifically for this generator to make it the best that we could.\n", + "\n", + "Can I contribute random paragraphs?\n", + "\n", + "Yes. We're always interested in improving this generator and one of the best ways to do that is to add new and interesting paragraphs to the generator. If you'd like to contribute some random paragraphs, please contact us.\n", + "\n", + "How many words are there in a paragraph?\n", + "\n", + "There are usually about 200 words in a paragraph, but this can vary widely. Most paragraphs focus on a single idea that's expressed with an introductory sentence, then followed by two or more supporting sentences about the idea. A short paragraph may not reach even 50 words while long paragraphs can be over 400 words long, but generally speaking they tend to be approximately 200 words in length.\n", + " \"\"\"\n", + "response = query_engine.query(query)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f526e1ce", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "I'm sorry, but I can't help with that.\n" + ] + } + ], + "source": [ + "print(str(response))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "355d2aa4-c26f-480e-b512-4446acbd9227", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'guardrail': 'Input Scanner', 'triggered_scanners': [{'guardrail_type': 'Token limit', 'activated': True, 'guardrail_detail': {'guard_output': \"\\n If you're looking for random paragraphs, you've come to the right place. When a random word or a random sentence isn't quite enough, the next logical step is to find a random paragraph. We created the Random Paragraph Generator with you in mind. The process is quite simple. Choose the number of random paragraphs you'd like to see and click the button. Your chosen number of paragraphs will instantly appear.\\n\\nWhile it may not be obvious to everyone, there are a number of reasons creating random paragraphs can be useful. A few examples of how some people use this generator are listed in the following paragraphs.\\n\\nCreative Writing\\nGenerating random paragraphs can be an excellent way for writers to get their creative flow going at the beginning of the day. The writer has no idea what topic the random paragraph will be about when it appears. This forces the writer to use creativity to complete one of three common writing challenges. The writer can use the paragraph as the first one of a short story and build upon it. A second option is to use the random paragraph somewhere in a short story they create. The third option is to have the random paragraph be the ending paragraph in a short story. No matter which of these challenges is undertaken, the writer is forced to use creativity to incorporate the paragraph into their writing.\\n\\nTackle Writers' Block\\nA random paragraph can also be an excellent way for a writer to tackle writers' block. Writing block can often happen due to being stuck with a current project that the writer is trying to complete. By inserting a completely random paragraph from which to begin, it can take down some of the issues that may have been causing the writers' block in the first place.\\n\\nBeginning Writing Routine\\nAnother productive way to use this tool to begin a daily writing routine. One way is to generate a random paragraph with the intention to try to rewrite it while still keeping the original meaning. The purpose here is to just get the writing started so that when the writer goes onto their day's writing\", 'is_valid': False, 'risk_score/threshold': '1.0/400', 'response_text': \"This text describes a random paragraph generator and its various uses. Here's a summary broken down by section:\\n\\n**Introduction:** The text introduces a random paragraph generator, highlighting its simplicity and ease of use.\\n\\n**Uses of the Generator:** The core of the text details how the generator can be beneficial in several contexts:\\n\\n* **Creative Writing:** It aids writers in overcoming writer's block, sparking creativity, and providing starting points or endings for short stories. Three specific challenges are suggested: using the paragraph as the beginning, middle, or end of a story.\\n\\n* **Tackling Writer's Block:** The random paragraph acts as a disruption to overcome creative stagnation.\\n\\n* **Beginning a Writing Routine:** It helps initiate the writing process by providing a text to rewrite or use as inspiration.\\n\\n* **Writing Challenges:** The paragraph's sentences can be individually incorporated into new writing projects.\\n\\n* **Programmers:** The generator provides useful blocks of text for testing purposes in software development.\\n\\n\\n**Call to Action & Feedback:** The authors encourage users to try the generator, provide feedback, and contribute to its improvement by suggesting additions or changes.\\n\\n**Frequently Asked Questions (FAQ):** The FAQ section addresses common questions about the generator, clarifying:\\n\\n* **Usage rights:** The paragraphs are free to use.\\n* **Paragraph generation:** Human-written paragraphs are used, not computer-generated ones.\\n* **Contribution:** Users can contribute their own paragraphs.\\n* **Paragraph length:** Paragraphs are roughly 200 words but can vary significantly.\\n\\n\\nIn essence, the text is a well-structured promotional piece for a random paragraph generator, emphasizing its versatility and usefulness for both writers and programmers, while encouraging user engagement and participation.\\n\"}}], 'response_status': 'blocked'}\n" + ] + } + ], + "source": [ + "print(str(response.metadata))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/docs/examples/prompts/advanced_prompts.ipynb b/docs/docs/examples/prompts/advanced_prompts.ipynb index 6ff8644cca373..2834d50a8e04a 100644 --- a/docs/docs/examples/prompts/advanced_prompts.ipynb +++ b/docs/docs/examples/prompts/advanced_prompts.ipynb @@ -29,7 +29,100 @@ "execution_count": null, "id": "0a50028b", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Looking in indexes: https://mirrors.ustc.edu.cn/pypi/simple\n", + "Collecting llama-index-llms-openai\n", + " Downloading https://mirrors.ustc.edu.cn/pypi/packages/e8/45/e2849d077ad5510b56dd072c9cbfb73daa47debcac5a9582dd118f6659c5/llama_index_llms_openai-0.3.10-py3-none-any.whl (14 kB)\n", + "Collecting llama-index-core<0.13.0,>=0.12.4 (from llama-index-llms-openai)\n", + " Downloading https://mirrors.ustc.edu.cn/pypi/packages/57/78/71984bb2cc8fe5c70d7986ee5034f5b10e07ecf751ada58ca1212ecc7bf7/llama_index_core-0.12.5-py3-none-any.whl (1.6 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m7.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hCollecting openai<2.0.0,>=1.57.1 (from llama-index-llms-openai)\n", + " Downloading https://mirrors.ustc.edu.cn/pypi/packages/37/e7/95437fb676381e927d4cb3f9f8dd90ed24cfd264f572db4d395037428594/openai-1.57.2-py3-none-any.whl (389 kB)\n", + "Requirement already satisfied: PyYAML>=6.0.1 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (6.0.2)\n", + "Collecting SQLAlchemy>=1.4.49 (from SQLAlchemy[asyncio]>=1.4.49->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai)\n", + " Downloading https://mirrors.ustc.edu.cn/pypi/packages/07/15/68ef91de5b8b7f80fb2d2b3b31ed42180c6227fe0a701aed9d01d34f98ec/SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.1 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.1/3.1 MB\u001b[0m \u001b[31m7.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hRequirement already satisfied: aiohttp<4.0.0,>=3.8.6 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (3.10.5)\n", + "Collecting dataclasses-json (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai)\n", + " Downloading https://mirrors.ustc.edu.cn/pypi/packages/c3/be/d0d44e092656fe7a06b55e6103cbce807cdbdee17884a5367c68c9860853/dataclasses_json-0.6.7-py3-none-any.whl (28 kB)\n", + "Collecting deprecated>=1.2.9.3 (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai)\n", + " Using cached https://mirrors.ustc.edu.cn/pypi/packages/1d/8f/c7f227eb42cfeaddce3eb0c96c60cbca37797fa7b34f8e1aeadf6c5c0983/Deprecated-1.2.15-py2.py3-none-any.whl (9.9 kB)\n", + "Collecting dirtyjson<2.0.0,>=1.0.8 (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai)\n", + " Downloading https://mirrors.ustc.edu.cn/pypi/packages/68/69/1bcf70f81de1b4a9f21b3a62ec0c83bdff991c88d6cc2267d02408457e88/dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n", + "Collecting filetype<2.0.0,>=1.2.0 (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai)\n", + " Using cached https://mirrors.ustc.edu.cn/pypi/packages/18/79/1b8fa1bb3568781e84c9200f951c735f3f157429f44be0495da55894d620/filetype-1.2.0-py2.py3-none-any.whl (19 kB)\n", + "Requirement already satisfied: fsspec>=2023.5.0 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (2024.9.0)\n", + "Requirement already satisfied: httpx in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (0.27.2)\n", + "Requirement already satisfied: nest-asyncio<2.0.0,>=1.5.8 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (1.6.0)\n", + "Requirement already satisfied: networkx>=3.0 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (3.3)\n", + "Collecting nltk>3.8.1 (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai)\n", + " Using cached https://mirrors.ustc.edu.cn/pypi/packages/4d/66/7d9e26593edda06e8cb531874633f7c2372279c3b0f46235539fe546df8b/nltk-3.9.1-py3-none-any.whl (1.5 MB)\n", + "Requirement already satisfied: numpy in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (1.26.4)\n", + "Requirement already satisfied: pillow>=9.0.0 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (10.4.0)\n", + "Requirement already satisfied: pydantic>=2.8.0 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (2.8.2)\n", + "Requirement already satisfied: requests>=2.31.0 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (2.32.3)\n", + "Collecting tenacity!=8.4.0,<10.0.0,>=8.2.0 (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai)\n", + " Downloading https://mirrors.ustc.edu.cn/pypi/packages/b6/cb/b86984bed139586d01532a587464b5805f12e397594f19f931c4c2fbfa61/tenacity-9.0.0-py3-none-any.whl (28 kB)\n", + "Requirement already satisfied: tiktoken>=0.3.3 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (0.7.0)\n", + "Requirement already satisfied: tqdm<5.0.0,>=4.66.1 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (4.66.5)\n", + "Requirement already satisfied: typing-extensions>=4.5.0 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (4.12.2)\n", + "Collecting typing-inspect>=0.8.0 (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai)\n", + " Downloading https://mirrors.ustc.edu.cn/pypi/packages/65/f3/107a22063bf27bdccf2024833d3445f4eea42b2e598abfbd46f6a63b6cb0/typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n", + "Collecting wrapt (from llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai)\n", + " Using cached https://mirrors.ustc.edu.cn/pypi/packages/cd/c7/b8c89bf5ca5c4e6a2d0565d149d549cdb4cffb8916d1d1b546b62fb79281/wrapt-1.17.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl (82 kB)\n", + "Requirement already satisfied: anyio<5,>=3.5.0 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from openai<2.0.0,>=1.57.1->llama-index-llms-openai) (4.4.0)\n", + "Requirement already satisfied: distro<2,>=1.7.0 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from openai<2.0.0,>=1.57.1->llama-index-llms-openai) (1.9.0)\n", + "Collecting jiter<1,>=0.4.0 (from openai<2.0.0,>=1.57.1->llama-index-llms-openai)\n", + " Downloading https://mirrors.ustc.edu.cn/pypi/packages/4d/a0/3993cda2e267fe679b45d0bcc2cef0b4504b0aa810659cdae9737d6bace9/jiter-0.8.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (345 kB)\n", + "Requirement already satisfied: sniffio in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from openai<2.0.0,>=1.57.1->llama-index-llms-openai) (1.3.1)\n", + "Requirement already satisfied: aiohappyeyeballs>=2.3.0 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (2.4.0)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (1.3.1)\n", + "Requirement already satisfied: attrs>=17.3.0 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (24.2.0)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (1.4.1)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (6.0.5)\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (1.9.7)\n", + "Requirement already satisfied: async-timeout<5.0,>=4.0 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from aiohttp<4.0.0,>=3.8.6->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (4.0.3)\n", + "Requirement already satisfied: idna>=2.8 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from anyio<5,>=3.5.0->openai<2.0.0,>=1.57.1->llama-index-llms-openai) (3.10)\n", + "Requirement already satisfied: exceptiongroup>=1.0.2 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from anyio<5,>=3.5.0->openai<2.0.0,>=1.57.1->llama-index-llms-openai) (1.2.2)\n", + "Requirement already satisfied: certifi in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from httpx->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (2024.8.30)\n", + "Requirement already satisfied: httpcore==1.* in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from httpx->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (1.0.5)\n", + "Requirement already satisfied: h11<0.15,>=0.13 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from httpcore==1.*->httpx->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (0.14.0)\n", + "Requirement already satisfied: click in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from nltk>3.8.1->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (8.1.7)\n", + "Requirement already satisfied: joblib in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from nltk>3.8.1->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (1.4.2)\n", + "Requirement already satisfied: regex>=2021.8.3 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from nltk>3.8.1->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (2024.7.24)\n", + "Requirement already satisfied: annotated-types>=0.4.0 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from pydantic>=2.8.0->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (0.7.0)\n", + "Requirement already satisfied: pydantic-core==2.20.1 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from pydantic>=2.8.0->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (2.20.1)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from requests>=2.31.0->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (3.3.2)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from requests>=2.31.0->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (2.2.3)\n", + "Collecting greenlet!=0.4.17 (from SQLAlchemy>=1.4.49->SQLAlchemy[asyncio]>=1.4.49->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai)\n", + " Downloading https://mirrors.ustc.edu.cn/pypi/packages/cf/69/79e4d63b9387b48939096e25115b8af7cd8a90397a304f92436bcb21f5b2/greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl (599 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m599.5/599.5 kB\u001b[0m \u001b[31m7.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting mypy-extensions>=0.3.0 (from typing-inspect>=0.8.0->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai)\n", + " Downloading https://mirrors.ustc.edu.cn/pypi/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl (4.7 kB)\n", + "Collecting marshmallow<4.0.0,>=3.18.0 (from dataclasses-json->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai)\n", + " Downloading https://mirrors.ustc.edu.cn/pypi/packages/ac/a7/a78ff54e67ef92a3d12126b98eb98ab8abab3de4a8c46d240c87e514d6bb/marshmallow-3.23.1-py3-none-any.whl (49 kB)\n", + "Requirement already satisfied: packaging>=17.0 in /home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages (from marshmallow<4.0.0,>=3.18.0->dataclasses-json->llama-index-core<0.13.0,>=0.12.4->llama-index-llms-openai) (24.1)\n", + "\u001b[33mWARNING: Error parsing dependencies of huggingface-hub: [Errno 2] No such file or directory: '/home/zhangyh/.pyenv/versions/3.10.14/lib/python3.10/site-packages/huggingface_hub-0.24.6.dist-info/METADATA'\u001b[0m\u001b[33m\n", + "\u001b[0mInstalling collected packages: filetype, dirtyjson, wrapt, tenacity, nltk, mypy-extensions, marshmallow, jiter, greenlet, typing-inspect, SQLAlchemy, deprecated, openai, dataclasses-json, llama-index-core, llama-index-llms-openai\n", + " Attempting uninstall: openai\n", + " Found existing installation: openai 1.39.0\n", + " Uninstalling openai-1.39.0:\n", + " Successfully uninstalled openai-1.39.0\n", + "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", + "xinference 0.14.4 requires huggingface-hub>=0.19.4, which is not installed.\n", + "xinference 0.14.4 requires openai<1.40,>1, but you have openai 1.57.2 which is incompatible.\u001b[0m\u001b[31m\n", + "\u001b[0mSuccessfully installed SQLAlchemy-2.0.36 dataclasses-json-0.6.7 deprecated-1.2.15 dirtyjson-1.0.8 filetype-1.2.0 greenlet-3.1.1 jiter-0.8.2 llama-index-core-0.12.5 llama-index-llms-openai-0.3.10 marshmallow-3.23.1 mypy-extensions-1.0.0 nltk-3.9.1 openai-1.57.2 tenacity-9.0.0 typing-inspect-0.9.0 wrapt-1.17.0\n", + "\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.2\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.3.1\u001b[0m\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], "source": [ "%pip install llama-index-llms-openai" ] @@ -193,14 +286,13 @@ "In this work, we develop and release Llama 2, a collection of pretrained and fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70 billion parameters\n", "---------------------\n", "Given the context information and not prior knowledge, answer the query.\n", - "Please write the answer in the style of Shakespeare\n", "Query: How many params does llama 2 have\n", "Answer: \n" ] } ], "source": [ - "fmt_prompt = partial_prompt_tmpl.format(\n", + "fmt_prompt = prompt_tmpl.format(\n", " context_str=\"In this work, we develop and release Llama 2, a collection of pretrained and fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70 billion parameters\",\n", " query_str=\"How many params does llama 2 have\",\n", ")\n", @@ -294,9 +386,9 @@ ], "metadata": { "kernelspec": { - "display_name": "llama_index_v2", + "display_name": "Python 3", "language": "python", - "name": "llama_index_v2" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/llama-index-core/llama_index/core/__init__.py b/llama-index-core/llama_index/core/__init__.py index d9b1df7e718e5..d1b124f56f4f0 100644 --- a/llama-index-core/llama_index/core/__init__.py +++ b/llama-index-core/llama_index/core/__init__.py @@ -1,6 +1,6 @@ """Init file of LlamaIndex.""" -__version__ = "0.12.6" +__version__ = "0.12.7" import logging from logging import NullHandler diff --git a/llama-index-core/llama_index/core/schema.py b/llama-index-core/llama_index/core/schema.py index 412432c5cbfbc..07a1f57619189 100644 --- a/llama-index-core/llama_index/core/schema.py +++ b/llama-index-core/llama_index/core/schema.py @@ -934,25 +934,38 @@ def __init__(self, **data: Any) -> None: If 'extra_info' was passed, store it in 'metadata'. """ if "doc_id" in data: + value = data.pop("doc_id") if "id_" in data: - msg = "Cannot pass both 'doc_id' and 'id_' to create a Document, use 'id_'" - raise ValueError(msg) - data["id_"] = data.pop("doc_id") + msg = "'doc_id' is deprecated and 'id_' will be used instead" + logging.warning(msg) + else: + data["id_"] = value if "extra_info" in data: + value = data.pop("extra_info") if "metadata" in data: - msg = "Cannot pass both 'extra_info' and 'metadata' to create a Document, use 'metadata'" - raise ValueError(msg) - data["metadata"] = data.pop("extra_info") + msg = "'extra_info' is deprecated and 'metadata' will be used instead" + logging.warning(msg) + else: + data["metadata"] = value if "text" in data: + text = data.pop("text") if "text_resource" in data: - msg = "Cannot pass both 'text' and 'text_resource' to create a Document, use 'text_resource'" - raise ValueError(msg) - data["text_resource"] = MediaResource(text=data.pop("text")) + msg = "'text' is deprecated and 'text_resource' will be used instead" + logging.warning(msg) + else: + data["text_resource"] = MediaResource(text=text) super().__init__(**data) + @model_serializer(mode="wrap") + def custom_model_dump(self, handler: Any) -> Dict[str, Any]: + """For full backward compatibility with the text field, we customize the model serializer.""" + data = super().custom_model_dump(handler) + data["text"] = self.text + return data + @property def text(self) -> str: """Provided for backward compatibility, it returns the content of text_resource.""" diff --git a/llama-index-core/pyproject.toml b/llama-index-core/pyproject.toml index bea2c3679bc0c..9437d85ffc5a7 100644 --- a/llama-index-core/pyproject.toml +++ b/llama-index-core/pyproject.toml @@ -46,7 +46,7 @@ name = "llama-index-core" packages = [{include = "llama_index"}] readme = "README.md" repository = "https://github.com/run-llama/llama_index" -version = "0.12.6" +version = "0.12.7" [tool.poetry.dependencies] SQLAlchemy = {extras = ["asyncio"], version = ">=1.4.49"} diff --git a/llama-index-core/tests/schema/test_schema.py b/llama-index-core/tests/schema/test_schema.py index 65c8eaa62c61f..1c8792ad495dc 100644 --- a/llama-index-core/tests/schema/test_schema.py +++ b/llama-index-core/tests/schema/test_schema.py @@ -1,4 +1,5 @@ import base64 +import logging from io import BytesIO from pathlib import Path from unittest import mock @@ -100,33 +101,46 @@ def test_build_text_node_text_resource() -> None: assert node.text == "test data" -def test_document_init() -> None: - doc = Document(doc_id="test") - assert doc.doc_id == "test" - assert doc.id_ == "test" - with pytest.raises( - ValueError, - match="Cannot pass both 'doc_id' and 'id_' to create a Document, use 'id_'", - ): - doc = Document(id_="test", doc_id="test") - - doc = Document(extra_info={"key": "value"}) - assert doc.metadata == {"key": "value"} - with pytest.raises( - ValueError, - match="Cannot pass both 'extra_info' and 'metadata' to create a Document, use 'metadata'", - ): - doc = Document(extra_info={}, metadata={}) - - doc = Document(text="test") - assert doc.text == "test" - assert doc.text_resource - assert doc.text_resource.text == "test" - with pytest.raises( - ValueError, - match="Cannot pass both 'text' and 'text_resource' to create a Document, use 'text_resource'", - ): - doc = Document(text="test", text_resource="test") +def test_document_init(caplog) -> None: + with caplog.at_level(logging.WARNING): + # Legacy init + doc = Document(doc_id="test") + assert doc.doc_id == "test" + assert doc.id_ == "test" + # Legacy init mixed with new + doc = Document(id_="test", doc_id="legacy_test") + assert "'doc_id' is deprecated and 'id_' will be used instead" in caplog.text + assert doc.id_ == "test" + caplog.clear() + + # Legacy init + doc = Document(extra_info={"key": "value"}) + assert doc.metadata == {"key": "value"} + assert doc.extra_info == {"key": "value"} + # Legacy init mixed with new + doc = Document(extra_info={"old_key": "old_value"}, metadata={"key": "value"}) + assert ( + "'extra_info' is deprecated and 'metadata' will be used instead" + in caplog.text + ) + assert doc.metadata == {"key": "value"} + assert doc.extra_info == {"key": "value"} + caplog.clear() + + # Legacy init + doc = Document(text="test") + assert doc.text == "test" + assert doc.text_resource + assert doc.text_resource.text == "test" + # Legacy init mixed with new + doc = Document(text="legacy_test", text_resource=MediaResource(text="test")) + assert ( + "'text' is deprecated and 'text_resource' will be used instead" + in caplog.text + ) + assert doc.text == "test" + assert doc.text_resource + assert doc.text_resource.text == "test" def test_document_properties(): @@ -145,6 +159,35 @@ def test_document_str(): assert str(doc) == "Doc ID: test_id\nText: Lo..." +def test_document_legacy_roundtrip(): + origin = Document(id_="test_id", text="this is a test") + assert origin.model_dump() == { + "id_": "test_id", + "embedding": None, + "metadata": {}, + "excluded_embed_metadata_keys": [], + "excluded_llm_metadata_keys": [], + "relationships": {}, + "metadata_template": "{key}: {value}", + "metadata_separator": "\n", + "text": "this is a test", + "text_resource": { + "embeddings": None, + "text": "this is a test", + "mimetype": None, + "path": None, + "url": None, + }, + "image_resource": None, + "audio_resource": None, + "video_resource": None, + "text_template": "{metadata_str}\n\n{content}", + "class_name": "Document", + } + dest = Document(**origin.model_dump()) + assert dest.text == "this is a test" + + def test_image_document_empty(): doc = ImageDocument(id_="test") assert doc.id_ == "test" diff --git a/llama-index-integrations/agent/llama-index-agent-openai/llama_index/agent/openai/step.py b/llama-index-integrations/agent/llama-index-agent-openai/llama_index/agent/openai/step.py index 2cfab448eaa50..d772782b07382 100644 --- a/llama-index-integrations/agent/llama-index-agent-openai/llama_index/agent/openai/step.py +++ b/llama-index-integrations/agent/llama-index-agent-openai/llama_index/agent/openai/step.py @@ -661,7 +661,6 @@ async def _arun_step( step, task.extra_state["new_memory"], verbose=self._verbose ) - # TODO: see if we want to do step-based inputs tools = self.get_tools(task.input) openai_tools = [tool.metadata.to_openai_tool() for tool in tools] @@ -670,7 +669,6 @@ async def _arun_step( task, mode=mode, **llm_chat_kwargs ) - # TODO: implement _should_continue latest_tool_calls = self.get_latest_tool_calls(task) or [] latest_tool_outputs: List[ToolOutput] = [] @@ -678,32 +676,39 @@ async def _arun_step( latest_tool_calls, task.extra_state["n_function_calls"] ): is_done = True - else: is_done = False + + # Validate all tool calls first for tool_call in latest_tool_calls: - # Some validation if not isinstance(tool_call, get_args(OpenAIToolCall)): raise ValueError("Invalid tool_call object") - if tool_call.type != "function": raise ValueError("Invalid tool type. Unsupported by OpenAI") - # TODO: maybe execute this with multi-threading - return_direct = await self._acall_function( - tools, - tool_call, - task.extra_state["new_memory"], - latest_tool_outputs, - ) + # Execute all tool calls in parallel using asyncio.gather + tool_results = await asyncio.gather( + *[ + self._acall_function( + tools, + tool_call, + task.extra_state["new_memory"], + latest_tool_outputs, + ) + for tool_call in latest_tool_calls + ] + ) + + # Process results + for return_direct in tool_results: task.extra_state["sources"].append(latest_tool_outputs[-1]) - # change function call to the default value, if a custom function was given - # as an argument (none and auto are predefined by OpenAI) + # change function call to the default value if a custom function was given if tool_choice not in ("auto", "none"): tool_choice = "auto" task.extra_state["n_function_calls"] += 1 + # If any tool call requests direct return and it's the only call if return_direct and len(latest_tool_calls) == 1: is_done = True response_str = latest_tool_outputs[-1].content @@ -723,7 +728,6 @@ async def _arun_step( [ step.get_next_step( step_id=str(uuid.uuid4()), - # NOTE: input is unused input=None, ) ] diff --git a/llama-index-integrations/agent/llama-index-agent-openai/pyproject.toml b/llama-index-integrations/agent/llama-index-agent-openai/pyproject.toml index a11459db92560..86cf42260e7be 100644 --- a/llama-index-integrations/agent/llama-index-agent-openai/pyproject.toml +++ b/llama-index-integrations/agent/llama-index-agent-openai/pyproject.toml @@ -28,7 +28,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-agent-openai" readme = "README.md" -version = "0.4.0" +version = "0.4.1" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-voyageai/llama_index/embeddings/voyageai/base.py b/llama-index-integrations/embeddings/llama-index-embeddings-voyageai/llama_index/embeddings/voyageai/base.py index 860ad1994e84f..065672d84e225 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-voyageai/llama_index/embeddings/voyageai/base.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-voyageai/llama_index/embeddings/voyageai/base.py @@ -1,19 +1,19 @@ """Voyage embeddings file.""" + import logging import os +from io import BytesIO +from pathlib import Path from typing import Any, List, Optional, Union +import voyageai +from PIL import Image + from llama_index.core.base.embeddings.base import Embedding from llama_index.core.bridge.pydantic import PrivateAttr from llama_index.core.callbacks.base import CallbackManager - -import voyageai from llama_index.core.embeddings import MultiModalEmbedding -from io import BytesIO -from pathlib import Path from llama_index.core.schema import ImageType -from PIL import Image - logger = logging.getLogger(__name__) @@ -36,6 +36,8 @@ class VoyageEmbedding(MultiModalEmbedding): _client: voyageai.Client = PrivateAttr(None) _aclient: voyageai.client_async.AsyncClient = PrivateAttr() truncation: Optional[bool] = None + output_dtype: Optional[str] = None + output_dimension: Optional[int] = None def __init__( self, @@ -43,6 +45,8 @@ def __init__( voyage_api_key: Optional[str] = None, embed_batch_size: Optional[int] = None, truncation: Optional[bool] = None, + output_dtype: Optional[str] = None, + output_dimension: Optional[int] = None, callback_manager: Optional[CallbackManager] = None, **kwargs: Any, ): @@ -73,6 +77,8 @@ def __init__( self._client = voyageai.Client(api_key=voyage_api_key) self._aclient = voyageai.AsyncClient(api_key=voyage_api_key) self.truncation = truncation + self.output_dtype = output_dtype + self.output_dimension = output_dimension @classmethod def class_name(cls) -> str: @@ -161,6 +167,8 @@ def _embed(self, texts: List[str], input_type: str) -> List[List[float]]: model=self.model_name, input_type=input_type, truncation=self.truncation, + output_dtype=self.output_dtype, + output_dimension=self.output_dimension, ).embeddings async def _aembed(self, texts: List[str], input_type: str) -> List[List[float]]: @@ -177,6 +185,8 @@ async def _aembed(self, texts: List[str], input_type: str) -> List[List[float]]: model=self.model_name, input_type=input_type, truncation=self.truncation, + output_dtype=self.output_dtype, + output_dimension=self.output_dimension, ) return r.embeddings diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-voyageai/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-voyageai/pyproject.toml index 09b4b3cb06d8b..6f29987d6b85f 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-voyageai/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-voyageai/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-voyageai" readme = "README.md" -version = "0.3.3" +version = "0.3.4" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-voyageai/tests/test_embeddings_voyageai.py b/llama-index-integrations/embeddings/llama-index-embeddings-voyageai/tests/test_embeddings_voyageai.py index 678016600f37f..2fdb09f5cf4d6 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-voyageai/tests/test_embeddings_voyageai.py +++ b/llama-index-integrations/embeddings/llama-index-embeddings-voyageai/tests/test_embeddings_voyageai.py @@ -17,6 +17,8 @@ def test_embedding_class_voyage_2(): assert emb.embed_batch_size == 72 assert emb.model_name == "voyage-2" assert emb.truncation + assert emb.output_dimension is None + assert emb.output_dtype is None def test_embedding_class_voyage_2_with_batch_size(): @@ -27,6 +29,36 @@ def test_embedding_class_voyage_2_with_batch_size(): assert emb.embed_batch_size == 49 assert emb.model_name == "voyage-2" assert emb.truncation is None + assert emb.output_dimension is None + assert emb.output_dtype is None + + +def test_embedding_class_voyage_3_large_with_output_dimension(): + emb = VoyageEmbedding( + model_name="voyage-3-large", + voyage_api_key="NOT_A_VALID_KEY", + output_dimension=512, + ) + assert isinstance(emb, BaseEmbedding) + assert emb.embed_batch_size == 7 + assert emb.model_name == "voyage-3-large" + assert emb.truncation is None + assert emb.output_dimension == 512 + assert emb.output_dtype is None + + +def test_embedding_class_voyage_3_large_with_output_dtype(): + emb = VoyageEmbedding( + model_name="voyage-3-large", + voyage_api_key="NOT_A_VALID_KEY", + output_dtype="float", + ) + assert isinstance(emb, BaseEmbedding) + assert emb.embed_batch_size == 7 + assert emb.model_name == "voyage-3-large" + assert emb.truncation is None + assert emb.output_dimension is None + assert emb.output_dtype == "float" def test_voyageai_embedding_class(): diff --git a/llama-index-integrations/graph_rag/llama-index-graph-rag-cognee/pyproject.toml b/llama-index-integrations/graph_rag/llama-index-graph-rag-cognee/pyproject.toml index 72ea7b11a9581..1d83d21d7fdab 100644 --- a/llama-index-integrations/graph_rag/llama-index-graph-rag-cognee/pyproject.toml +++ b/llama-index-integrations/graph_rag/llama-index-graph-rag-cognee/pyproject.toml @@ -30,11 +30,10 @@ readme = "README.md" version = "0.1.0" [tool.poetry.dependencies] -python = ">=3.9,<3.12" +python = ">=3.10,<3.12" cognee = "^0.1.20" httpx = "~=0.27.0" llama-index-core = "^0.12.5" -pytest-asyncio = "^0.25.0" [tool.poetry.group.dev.dependencies] ipython = "8.10.0" @@ -43,6 +42,7 @@ mypy = "0.991" pre-commit = "3.2.0" pylint = "2.15.10" pytest = "8.2" +pytest-asyncio = "^0.25.0" pytest-mock = "3.11.1" ruff = "0.0.292" tree-sitter-languages = "^1.8.0" diff --git a/llama-index-integrations/graph_rag/llama-index-graph-rag-cognee/tests/BUILD b/llama-index-integrations/graph_rag/llama-index-graph-rag-cognee/tests/BUILD index dabf212d7e716..1f617359f48aa 100644 --- a/llama-index-integrations/graph_rag/llama-index-graph-rag-cognee/tests/BUILD +++ b/llama-index-integrations/graph_rag/llama-index-graph-rag-cognee/tests/BUILD @@ -1 +1,3 @@ -python_tests() +python_tests( + interpreter_constraints=["==3.10.*", "==3.11.*", "==3.12.*"] +) diff --git a/llama-index-integrations/llms/llama-index-llms-gemini/llama_index/llms/gemini/BUILD b/llama-index-integrations/llms/llama-index-llms-gemini/llama_index/llms/gemini/BUILD index db46e8d6c978c..2e35f081cdee9 100644 --- a/llama-index-integrations/llms/llama-index-llms-gemini/llama_index/llms/gemini/BUILD +++ b/llama-index-integrations/llms/llama-index-llms-gemini/llama_index/llms/gemini/BUILD @@ -1 +1,6 @@ python_sources() + +resource( + name="py_typed", + source="py.typed", +) diff --git a/llama-index-integrations/llms/llama-index-llms-gemini/llama_index/llms/gemini/py.typed b/llama-index-integrations/llms/llama-index-llms-gemini/llama_index/llms/gemini/py.typed new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml index 95bde81ceb904..5db22b922c3b4 100644 --- a/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-gemini/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-llms-gemini" readme = "README.md" -version = "0.4.1" +version = "0.4.2" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/llms/llama-index-llms-keywordsai/.gitignore b/llama-index-integrations/llms/llama-index-llms-keywordsai/.gitignore new file mode 100644 index 0000000000000..990c18de22908 --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-keywordsai/.gitignore @@ -0,0 +1,153 @@ +llama_index/_static +.DS_Store +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +bin/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +etc/ +include/ +lib/ +lib64/ +parts/ +sdist/ +share/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +.ruff_cache + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints +notebooks/ + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +pyvenv.cfg + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# Jetbrains +.idea +modules/ +*.swp + +# VsCode +.vscode + +# pipenv +Pipfile +Pipfile.lock + +# pyright +pyrightconfig.json diff --git a/llama-index-integrations/llms/llama-index-llms-keywordsai/BUILD b/llama-index-integrations/llms/llama-index-llms-keywordsai/BUILD new file mode 100644 index 0000000000000..0896ca890d8bf --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-keywordsai/BUILD @@ -0,0 +1,3 @@ +poetry_requirements( + name="poetry", +) diff --git a/llama-index-integrations/llms/llama-index-llms-keywordsai/Makefile b/llama-index-integrations/llms/llama-index-llms-keywordsai/Makefile new file mode 100644 index 0000000000000..b9eab05aa3706 --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-keywordsai/Makefile @@ -0,0 +1,17 @@ +GIT_ROOT ?= $(shell git rev-parse --show-toplevel) + +help: ## Show all Makefile targets. + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[33m%-30s\033[0m %s\n", $$1, $$2}' + +format: ## Run code autoformatters (black). + pre-commit install + git ls-files | xargs pre-commit run black --files + +lint: ## Run linters: pre-commit (black, ruff, codespell) and mypy + pre-commit install && git ls-files | xargs pre-commit run --show-diff-on-failure --files + +test: ## Run tests via pytest. + pytest tests + +watch-docs: ## Build and watch documentation. + sphinx-autobuild docs/ docs/_build/html --open-browser --watch $(GIT_ROOT)/llama_index/ diff --git a/llama-index-integrations/llms/llama-index-llms-keywordsai/README.md b/llama-index-integrations/llms/llama-index-llms-keywordsai/README.md new file mode 100644 index 0000000000000..4f1681e848146 --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-keywordsai/README.md @@ -0,0 +1,131 @@ +# LlamaIndex Llms Integration: KeywordsAI + +## Installation + +To install the required package, run: + +```bash +%pip install llama-index-llms-keywordsai +``` + +## Setup + +1. Set your KeywordsAI API key as an environment variable. You can replace `"sk-..."` with your actual API key: + +```python +import os + +os.environ["OPENAI_API_KEY"] = "sk-..." +``` + +## Basic Usage + +### Generate Completions + +To generate a completion for a prompt, use the `complete` method: + +```python +from llama_index.llms.keywordsai import KeywordsAI + +resp = KeywordsAI().complete("Paul Graham is ") +print(resp) +``` + +### Chat Responses + +To send a chat message and receive a response, create a list of `ChatMessage` instances and use the `chat` method: + +```python +from llama_index.core.llms import ChatMessage + +messages = [ + ChatMessage( + role="system", content="You are a pirate with a colorful personality." + ), + ChatMessage(role="user", content="What is your name?"), +] +resp = KeywordsAI().chat(messages) +print(resp) +``` + +## Streaming Responses + +### Stream Complete + +To stream responses for a prompt, use the `stream_complete` method: + +```python +from llama_index.llms.keywordsai import KeywordsAI + +llm = KeywordsAI() +resp = llm.stream_complete("Paul Graham is ") +for r in resp: + print(r.delta, end="") +``` + +### Stream Chat + +To stream chat responses, use the `stream_chat` method: + +```python +from llama_index.llms.keywordsai import KeywordsAI +from llama_index.core.llms import ChatMessage + +llm = KeywordsAI() +messages = [ + ChatMessage( + role="system", content="You are a pirate with a colorful personality." + ), + ChatMessage(role="user", content="What is your name?"), +] +resp = llm.stream_chat(messages) +for r in resp: + print(r.delta, end="") +``` + +## Configure Model + +You can specify a particular model when creating the `KeywordsAI` instance: + +```python +llm = KeywordsAI(model="gpt-3.5-turbo") +resp = llm.complete("Paul Graham is ") +print(resp) + +messages = [ + ChatMessage( + role="system", content="You are a pirate with a colorful personality." + ), + ChatMessage(role="user", content="What is your name?"), +] +resp = llm.chat(messages) +print(resp) +``` + +## Asynchronous Usage + +You can also use asynchronous methods for completion: + +```python +from llama_index.llms.keywordsai import KeywordsAI + +llm = KeywordsAI(model="gpt-3.5-turbo") +resp = await llm.acomplete("Paul Graham is ") +print(resp) +``` + +## Set API Key at a Per-Instance Level + +If desired, you can have separate LLM instances use different API keys: + +```python +from llama_index.llms.keywordsai import KeywordsAI + +llm = KeywordsAI(model="gpt-3.5-turbo", api_key="BAD_KEY") +resp = KeywordsAI().complete("Paul Graham is ") +print(resp) +``` + +### LLM Implementation example + +https://docs.llamaindex.ai/en/stable/examples/llm/keywordsai/ diff --git a/llama-index-integrations/llms/llama-index-llms-keywordsai/llama_index/llms/keywordsai/BUILD b/llama-index-integrations/llms/llama-index-llms-keywordsai/llama_index/llms/keywordsai/BUILD new file mode 100644 index 0000000000000..db46e8d6c978c --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-keywordsai/llama_index/llms/keywordsai/BUILD @@ -0,0 +1 @@ +python_sources() diff --git a/llama-index-integrations/llms/llama-index-llms-keywordsai/llama_index/llms/keywordsai/__init__.py b/llama-index-integrations/llms/llama-index-llms-keywordsai/llama_index/llms/keywordsai/__init__.py new file mode 100644 index 0000000000000..0be4e8994fb8e --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-keywordsai/llama_index/llms/keywordsai/__init__.py @@ -0,0 +1,3 @@ +from llama_index.llms.keywordsai.base import KeywordsAI + +__all__ = ["KeywordsAI"] diff --git a/llama-index-integrations/llms/llama-index-llms-keywordsai/llama_index/llms/keywordsai/base.py b/llama-index-integrations/llms/llama-index-llms-keywordsai/llama_index/llms/keywordsai/base.py new file mode 100644 index 0000000000000..0567aac8f474f --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-keywordsai/llama_index/llms/keywordsai/base.py @@ -0,0 +1,270 @@ +import functools +from typing import ( + Any, + Callable, + Dict, + Optional, + Sequence, +) + +import httpx +from llama_index.core.base.llms.types import ( + ChatMessage, + ChatResponse, + LLMMetadata, + MessageRole, +) +from llama_index.core.bridge.pydantic import Field, PrivateAttr +from llama_index.core.callbacks import CallbackManager +from llama_index.core.constants import ( + DEFAULT_TEMPERATURE, +) +from llama_index.core.types import BaseOutputParser, PydanticProgramMode +from .utils import ( + create_retry_decorator, + is_chat_model, + is_function_calling_model, + keywordsai_modelname_to_contextsize, + resolve_keywordsai_credentials, +) +from openai import OpenAI as SyncOpenAI + +import llama_index.core.instrumentation as instrument + +from llama_index.llms.openai import OpenAI, AsyncOpenAI +from llama_index.llms.openai.utils import O1_MODELS + +dispatcher = instrument.get_dispatcher(__name__) + +DEFAULT_KEYWORDSAI_MODEL = "gpt-4o" + + +def llm_retry_decorator(f: Callable[[Any], Any]) -> Callable[[Any], Any]: + @functools.wraps(f) + def wrapper(self, *args: Any, **kwargs: Any) -> Any: + max_retries = getattr(self, "max_retries", 0) + if max_retries <= 0: + return f(self, *args, **kwargs) + + retry = create_retry_decorator( + max_retries=max_retries, + random_exponential=True, + stop_after_delay_seconds=60, + min_seconds=1, + max_seconds=20, + ) + return retry(f)(self, *args, **kwargs) + + return wrapper + + +def force_single_tool_call(response: ChatResponse) -> None: + tool_calls = response.message.additional_kwargs.get("tool_calls", []) + if len(tool_calls) > 1: + response.message.additional_kwargs["tool_calls"] = [tool_calls[0]] + + +class KeywordsAI(OpenAI): + """ + KeywordsAI LLM. + + Args: + model: name of the KeywordsAI model to use. + temperature: a float from 0 to 1 controlling randomness in generation; higher will lead to more creative, less deterministic responses. + max_tokens: the maximum number of tokens to generate. + customer_identifier: the unique ID of the customer for keywords observability. + thread_identifier: the unique ID of the thread for keywords observability. + additional_kwargs: Add additional parameters to OpenAI SDK request body. + max_retries: How many times to retry the API call if it fails. + timeout: How long to wait, in seconds, for an API call before failing. + reuse_client: Reuse the KeywordsAI client between requests. When doing anything with large volumes of async API calls, setting this to false can improve stability. + api_key: Your KeywordsAI api key + api_base: The base URL of the API to call + api_version: the version of the API to call + callback_manager: the callback manager is used for observability. + default_headers: override the default headers for API requests. + http_client: pass in your own httpx.Client instance. + async_http_client: pass in your own httpx.AsyncClient instance. + + Examples: + `pip install llama-index-llms-keywordsai` + + ```python + import os + import openai + + os.environ["KEYWORDSAI_API_KEY"] = "sk-..." + openai.api_key = os.environ["KEYWORDSAI_API_KEY"] + + from llama_index.llms.keywordai import KeywordsAI + + llm = KeywordsAI(model="gpt-3.5-turbo") + + stream = llm.stream("Hi, write a short story") + + for r in stream: + print(r.delta, end="") + ``` + """ + + model: str = Field( + default=DEFAULT_KEYWORDSAI_MODEL, description="The KeywordsAI model to use." + ) + temperature: float = Field( + default=DEFAULT_TEMPERATURE, + description="The temperature to use during generation.", + ge=0.0, + le=1.0, + ) + max_tokens: Optional[int] = Field( + description="The maximum number of tokens to generate.", + gt=0, + ) + logprobs: Optional[bool] = Field( + description="Whether to return logprobs per token.", + default=None, + ) + top_logprobs: int = Field( + description="The number of top token log probs to return.", + default=0, + ge=0, + le=20, + ) + customer_identifier: Optional[str] = Field( + default=None, + description="The unique ID of the customer for keywords observability.", + ) + thread_identifier: Optional[str] = Field( + default=None, + description="The unique ID of the thread for kewywords observability.", + ) + additional_kwargs: Dict[str, Any] = Field( + default_factory=dict, description="Additional kwargs for the KeywordsAI API." + ) + max_retries: int = Field( + default=3, + description="The maximum number of API retries.", + ge=0, + ) + timeout: float = Field( + default=60.0, + description="The timeout, in seconds, for API requests.", + ge=0, + ) + default_headers: Optional[Dict[str, str]] = Field( + default=None, description="The default headers for API requests." + ) + reuse_client: bool = Field( + default=True, + description=( + "Reuse the KeywordsAI client between requests. When doing anything with large " + "volumes of async API calls, setting this to false can improve stability." + ), + ) + + api_key: str = Field(default=None, description="The KeywordsAI API key.") + api_base: str = Field(description="The base URL for KeywordsAI API.") + api_version: str = Field(description="The API version for KeywordsAI API.") + strict: bool = Field( + default=False, + description="Whether to use strict mode for invoking tools/using schemas.", + ) + + _client: Optional[SyncOpenAI] = PrivateAttr() + _aclient: Optional[AsyncOpenAI] = PrivateAttr() + _http_client: Optional[httpx.Client] = PrivateAttr() + _async_http_client: Optional[httpx.AsyncClient] = PrivateAttr() + + def __init__( + self, + model: str = DEFAULT_KEYWORDSAI_MODEL, + temperature: float = DEFAULT_TEMPERATURE, + max_tokens: Optional[int] = None, + customer_identifier: Optional[str] = None, + thread_identifier: Optional[str] = None, + additional_kwargs: Optional[Dict[str, Any]] = None, + max_retries: int = 3, + timeout: float = 60.0, + reuse_client: bool = True, + api_key: Optional[str] = None, + api_base: Optional[str] = None, + api_version: Optional[str] = None, + callback_manager: Optional[CallbackManager] = None, + default_headers: Optional[Dict[str, str]] = None, + http_client: Optional[httpx.Client] = None, + async_http_client: Optional[httpx.AsyncClient] = None, + openai_client: Optional[SyncOpenAI] = None, + async_openai_client: Optional[AsyncOpenAI] = None, + # base class + system_prompt: Optional[str] = None, + messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, + completion_to_prompt: Optional[Callable[[str], str]] = None, + pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, + output_parser: Optional[BaseOutputParser] = None, + strict: bool = False, + **kwargs: Any, + ) -> None: + additional_kwargs = additional_kwargs or {} + if not additional_kwargs.get("extra_body"): + additional_kwargs["extra_body"] = {} + if customer_identifier: + additional_kwargs["extra_body"]["customer_identifier"] = customer_identifier + if thread_identifier: + additional_kwargs["extra_body"]["thread_identifier"] = thread_identifier + + api_key, api_base, api_version = resolve_keywordsai_credentials( + api_key=api_key, + api_base=api_base, + api_version=api_version, + ) + + # TODO: Temp forced to 1.0 for o1 + if model in O1_MODELS: + temperature = 1.0 + + super().__init__( + model=model, + temperature=temperature, + max_tokens=max_tokens, + additional_kwargs=additional_kwargs, + max_retries=max_retries, + callback_manager=callback_manager, + api_key=api_key, + api_version=api_version, + api_base=api_base, + timeout=timeout, + reuse_client=reuse_client, + default_headers=default_headers, + system_prompt=system_prompt, + messages_to_prompt=messages_to_prompt, + completion_to_prompt=completion_to_prompt, + pydantic_program_mode=pydantic_program_mode, + output_parser=output_parser, + strict=strict, + **kwargs, + ) + + self._client = openai_client + self._aclient = async_openai_client + self._http_client = http_client + self._async_http_client = async_http_client + + @classmethod + def class_name(cls) -> str: + return "keywordsai_llm" + + @property + def metadata(self) -> LLMMetadata: + return LLMMetadata( + context_window=keywordsai_modelname_to_contextsize(self._get_model_name()), + num_output=self.max_tokens or -1, + is_chat_model=is_chat_model(model=self._get_model_name()), + is_function_calling_model=is_function_calling_model( + model=self._get_model_name() + ), + model_name=self.model, + # TODO: Temp for O1 beta + system_role=MessageRole.USER + if self.model in O1_MODELS + else MessageRole.SYSTEM, + ) diff --git a/llama-index-integrations/llms/llama-index-llms-keywordsai/llama_index/llms/keywordsai/utils.py b/llama-index-integrations/llms/llama-index-llms-keywordsai/llama_index/llms/keywordsai/utils.py new file mode 100644 index 0000000000000..39b7b768ed025 --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-keywordsai/llama_index/llms/keywordsai/utils.py @@ -0,0 +1,166 @@ +import logging +import os +from typing import Any, Callable, Optional, Union, Tuple +from functools import lru_cache + +from tenacity import ( + before_sleep_log, + retry, + retry_if_exception_type, + stop_after_attempt, + stop_after_delay, + wait_exponential, + wait_random_exponential, +) +from tenacity.stop import stop_base +from llama_index.core.base.llms.generic_utils import get_from_param_or_env +import openai +from openai.types.chat import ChatCompletionMessageToolCall +from openai.types.chat.chat_completion_chunk import ChoiceDeltaToolCall + +DEFAULT_KEYWORDSAI_API_TYPE = "keywords_ai" +DEFAULT_KEYWORDSAI_API_BASE = "https://api.keywordsai.co/api/" +DEFAULT_KEYWORDSAI_API_VERSION = "" + +MISSING_API_KEY_ERROR_MESSAGE = """No API key found for KeywordsAI. +Please set either the KEYWORDSAI_API_KEY environment variable. +""" + +logger = logging.getLogger(__name__) + +KeywordsAIToolCall = Union[ChatCompletionMessageToolCall, ChoiceDeltaToolCall] + + +@lru_cache +def get_keywords_models(): + """Get available models from KeywordsAI API. + + Returns: + List of model configurations with pricing and provider info. + + Raises: + Exception if API call fails after retries. + """ + import requests + from typing import Dict, List + + def _get_models() -> List[Dict]: + response = requests.get("https://api.keywordsai.co/api/models/public") + if not response.ok: + raise Exception(f"Failed to fetch models: {response.status_code}") + + data = response.json() + models = [m for m in data["models"] if m.get("input_cost")] + + # Process model data + for model in models: + # Convert costs from per token to per million tokens + model["input_cost"] = model["input_cost"] * 1e6 + model["output_cost"] = model["output_cost"] * 1e6 + + # Normalize Google provider names + if model["provider"]["provider_id"] in [ + "google_palm", + "google_vertex_ai", + "google_gemini_ai", + ]: + model["provider"]["provider_id"] = "google" + model["provider"]["provider_name"] = "Google" + + return {model["model_name"]: model for model in models} + + # Use retry decorator for resilience + retry_decorator = create_retry_decorator( + max_retries=3, random_exponential=True, min_seconds=1, max_seconds=10 + ) + + try: + return retry_decorator(_get_models)() + except Exception as e: + logger.error(f"Failed to fetch models after retries: {e!s}") + return [] + + +def create_retry_decorator( + max_retries: int, + random_exponential: bool = False, + stop_after_delay_seconds: Optional[float] = None, + min_seconds: float = 4, + max_seconds: float = 60, +) -> Callable[[Any], Any]: + wait_strategy = ( + wait_random_exponential(min=min_seconds, max=max_seconds) + if random_exponential + else wait_exponential(multiplier=1, min=min_seconds, max=max_seconds) + ) + + stop_strategy: stop_base = stop_after_attempt(max_retries) + if stop_after_delay_seconds is not None: + stop_strategy = stop_strategy | stop_after_delay(stop_after_delay_seconds) + + return retry( + reraise=True, + stop=stop_strategy, + wait=wait_strategy, + retry=( + retry_if_exception_type( + ( + openai.APIConnectionError, + openai.APITimeoutError, + openai.RateLimitError, + openai.InternalServerError, + ) + ) + ), + before_sleep=before_sleep_log(logger, logging.WARNING), + ) + + +def keywordsai_modelname_to_contextsize(modelname: str) -> int: + """Calculate the maximum number of tokens possible to generate for a model.""" + models = get_keywords_models() + return models[modelname]["max_context_window"] + + +def is_chat_model(model: str) -> bool: + # TODO: check if they are chat models + return True + + +def is_function_calling_model(model: str) -> bool: + models = get_keywords_models() + return models[model]["function_call"] == 1 + + +def resolve_keywordsai_credentials( + api_key: Optional[str] = None, + api_base: Optional[str] = None, + api_version: Optional[str] = None, +) -> Tuple[Optional[str], str, str]: + """ "Resolve KeywordsAI credentials. + + The order of precedence is: + 1. param + 2. env + 3. openai module + 4. default + """ + # resolve from param or env + api_key = get_from_param_or_env("api_key", api_key, "KEYWORDSAI_API_KEY", "") + api_base = get_from_param_or_env("api_base", api_base, "KEYWORDSAI_API_BASE", "") + api_version = get_from_param_or_env( + "api_version", api_version, "KEYWORDSAI_API_VERSION", "" + ) + + # resolve from openai module or default + final_api_key = api_key or "" + final_api_base = api_base or DEFAULT_KEYWORDSAI_API_BASE + + return final_api_key, str(final_api_base), api_version + + +def validate_keywordsai_api_key(api_key: Optional[str] = None) -> None: + openai_api_key = api_key or os.environ.get("KEYWORDSAI_API_KEY", "") + + if not openai_api_key: + raise ValueError(MISSING_API_KEY_ERROR_MESSAGE) diff --git a/llama-index-integrations/llms/llama-index-llms-keywordsai/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-keywordsai/pyproject.toml new file mode 100644 index 0000000000000..6e7051cc925ec --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-keywordsai/pyproject.toml @@ -0,0 +1,63 @@ +[build-system] +build-backend = "poetry.core.masonry.api" +requires = ["poetry-core"] + +[tool.codespell] +check-filenames = true +check-hidden = true +skip = "*.csv,*.html,*.json,*.jsonl,*.pdf,*.txt,*.ipynb" + +[tool.llamahub] +contains_example = false +import_path = "llama_index.llms.keywordsai" + +[tool.llamahub.class_authors] +KeywordsAI = "keywordsai" + +[tool.mypy] +disallow_untyped_defs = true +exclude = ["_static", "build", "examples", "notebooks", "venv"] +ignore_missing_imports = true +python_version = "3.8" + +[tool.poetry] +authors = ["llama-index"] +description = "llama-index llms keywordsai integration" +exclude = ["**/BUILD"] +license = "MIT" +name = "llama-index-llms-keywordsai" +readme = "README.md" +version = "1.0.0" + +[tool.poetry.dependencies] +python = ">=3.8.1,<4.0" +llama-index-core = "^0.11.7" +llama-index-llms-openai = "^0.2.16" + +[tool.poetry.group.dev.dependencies] +ipython = "8.10.0" +jupyter = "^1.0.0" +mypy = "0.991" +pre-commit = "3.2.0" +pylint = "2.15.10" +pytest = "7.2.1" +pytest-mock = "3.11.1" +ruff = "0.0.292" +tree-sitter-languages = "^1.8.0" +types-Deprecated = ">=0.1.0" +types-PyYAML = "^6.0.12.12" +types-protobuf = "^4.24.0.4" +types-redis = "4.5.5.0" +types-requests = "2.28.11.8" +types-setuptools = "67.1.0.0" + +[tool.poetry.group.dev.dependencies.black] +extras = ["jupyter"] +version = "<=23.9.1,>=23.7.0" + +[tool.poetry.group.dev.dependencies.codespell] +extras = ["toml"] +version = ">=v2.2.6" + +[[tool.poetry.packages]] +include = "llama_index/" diff --git a/llama-index-integrations/llms/llama-index-llms-keywordsai/tests/BUILD b/llama-index-integrations/llms/llama-index-llms-keywordsai/tests/BUILD new file mode 100644 index 0000000000000..dabf212d7e716 --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-keywordsai/tests/BUILD @@ -0,0 +1 @@ +python_tests() diff --git a/llama-index-integrations/llms/llama-index-llms-keywordsai/tests/__init__.py b/llama-index-integrations/llms/llama-index-llms-keywordsai/tests/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/llama-index-integrations/llms/llama-index-llms-keywordsai/tests/test_llms_keywordsai.py b/llama-index-integrations/llms/llama-index-llms-keywordsai/tests/test_llms_keywordsai.py new file mode 100644 index 0000000000000..9e55ce194956c --- /dev/null +++ b/llama-index-integrations/llms/llama-index-llms-keywordsai/tests/test_llms_keywordsai.py @@ -0,0 +1,7 @@ +from llama_index.core.base.llms.base import BaseLLM +from llama_index.llms.keywordsai import KeywordsAI + + +def test_text_inference_embedding_class(): + names_of_base_classes = [b.__name__ for b in KeywordsAI.__mro__] + assert BaseLLM.__name__ in names_of_base_classes diff --git a/llama-index-integrations/llms/llama-index-llms-oci-genai/llama_index/llms/oci_genai/base.py b/llama-index-integrations/llms/llama-index-llms-oci-genai/llama_index/llms/oci_genai/base.py index f0df254601e8e..b2413b21517dd 100644 --- a/llama-index-integrations/llms/llama-index-llms-oci-genai/llama_index/llms/oci_genai/base.py +++ b/llama-index-integrations/llms/llama-index-llms-oci-genai/llama_index/llms/oci_genai/base.py @@ -1,5 +1,5 @@ import json -from typing import Any, Callable, Dict, Optional, Sequence +from typing import Any, Callable, Dict, Optional, Sequence, List, Union, TYPE_CHECKING from llama_index.core.base.llms.types import ( ChatMessage, @@ -12,6 +12,10 @@ LLMMetadata, MessageRole, ) +from llama_index.core.base.llms.generic_utils import ( + chat_to_completion_decorator, + stream_chat_to_completion_decorator, +) from llama_index.core.bridge.pydantic import Field, PrivateAttr from llama_index.core.callbacks import CallbackManager @@ -22,8 +26,7 @@ llm_chat_callback, llm_completion_callback, ) - -from llama_index.core.llms.llm import LLM +from llama_index.core.llms.function_calling import FunctionCallingLLM, ToolSelection from llama_index.core.types import BaseOutputParser, PydanticProgramMode from llama_index.llms.oci_genai.utils import ( @@ -34,40 +37,17 @@ get_completion_generator, get_chat_generator, get_context_size, + _format_oci_tool_calls, + force_single_tool_call, + validate_tool_call, ) +if TYPE_CHECKING: + from llama_index.core.tools.types import BaseTool -# TODO: -# (1) placeholder for future LLMs in utils.py e.g., llama3, command R+ -class OCIGenAI(LLM): - """OCI large language models. - - To authenticate, the OCI client uses the methods described in - https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdk_authentication_methods.htm - - The authentifcation method is passed through auth_type and should be one of: - API_KEY (default), SECURITY_TOKEN, INSTANCE_PRINCIPAL, RESOURCE_PRINCIPAL - Make sure you have the required policies (profile/roles) to - access the OCI Generative AI service. - If a specific config profile is used, you must pass - the name of the profile (from ~/.oci/config) through auth_profile. - - To use, you must provide the compartment id - along with the endpoint url, and model id - as named parameters to the constructor. - - Example: - .. code-block:: python - - from llama_index.llms.oci_genai import OCIGenAI - - llm = OCIGenAI( - model="MY_MODEL_ID", - service_endpoint="https://inference.generativeai.us-chicago-1.oci.oraclecloud.com", - compartment_id="MY_OCID" - ) - """ +class OCIGenAI(FunctionCallingLLM): + """OCI large language models with function calling support.""" model: str = Field(description="Id of the OCI Generative AI model to use.") temperature: float = Field(description="The temperature to use for sampling.") @@ -190,7 +170,6 @@ def __init__( @classmethod def class_name(cls) -> str: - """Get class name.""" return "OCIGenAI_LLM" @property @@ -223,57 +202,27 @@ def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]: def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: - inference_params = self._get_all_kwargs(**kwargs) - inference_params["is_stream"] = False - inference_params["prompt"] = prompt - - request = self._completion_generator( - compartment_id=self.compartment_id, - serving_mode=self._serving_mode, - inference_request=self._provider.oci_completion_request(**inference_params), - ) - - response = self._client.generate_text(request) - return CompletionResponse( - text=self._provider.completion_response_to_text(response), - raw=response.__dict__, - ) + complete_fn = chat_to_completion_decorator(self.chat) + return complete_fn(prompt, **kwargs) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: - inference_params = self._get_all_kwargs(**kwargs) - inference_params["is_stream"] = True - inference_params["prompt"] = prompt - - request = self._completion_generator( - compartment_id=self.compartment_id, - serving_mode=self._serving_mode, - inference_request=self._provider.oci_completion_request(**inference_params), - ) - - response = self._client.generate_text(request) - - def gen() -> CompletionResponseGen: - content = "" - for event in response.data.events(): - content_delta = self._provider.completion_stream_to_text( - json.loads(event.data) - ) - content += content_delta - yield CompletionResponse( - text=content, delta=content_delta, raw=event.__dict__ - ) - - return gen() + stream_complete_fn = stream_chat_to_completion_decorator(self.stream_chat) + return stream_complete_fn(prompt, **kwargs) @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: oci_params = self._provider.messages_to_oci_params(messages) oci_params["is_stream"] = False + tools = kwargs.pop("tools", None) all_kwargs = self._get_all_kwargs(**kwargs) chat_params = {**all_kwargs, **oci_params} + if tools: + chat_params["tools"] = [ + self._provider.convert_to_oci_tool(tool) for tool in tools + ] request = self._chat_generator( compartment_id=self.compartment_id, @@ -283,10 +232,20 @@ def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: response = self._client.chat(request) + generation_info = self._provider.chat_generation_info(response) + + llm_output = { + "model_id": response.data.model_id, + "model_version": response.data.model_version, + "request_id": response.request_id, + "content-length": response.headers["content-length"], + } + return ChatResponse( message=ChatMessage( role=MessageRole.ASSISTANT, content=self._provider.chat_response_to_text(response), + additional_kwargs=generation_info, ), raw=response.__dict__, ) @@ -296,8 +255,13 @@ def stream_chat( ) -> ChatResponseGen: oci_params = self._provider.messages_to_oci_params(messages) oci_params["is_stream"] = True + tools = kwargs.pop("tools", None) all_kwargs = self._get_all_kwargs(**kwargs) chat_params = {**all_kwargs, **oci_params} + if tools: + chat_params["tools"] = [ + self._provider.convert_to_oci_tool(tool) for tool in tools + ] request = self._chat_generator( compartment_id=self.compartment_id, @@ -309,39 +273,169 @@ def stream_chat( def gen() -> ChatResponseGen: content = "" + tool_calls_accumulated = [] + for event in response.data.events(): content_delta = self._provider.chat_stream_to_text( json.loads(event.data) ) content += content_delta - yield ChatResponse( - message=ChatMessage(role=MessageRole.ASSISTANT, content=content), - delta=content_delta, - raw=event.__dict__, - ) - return gen() + try: + event_data = json.loads(event.data) + + tool_calls_data = None + for key in ["toolCalls", "tool_calls", "functionCalls"]: + if key in event_data: + tool_calls_data = event_data[key] + break + + if tool_calls_data: + new_tool_calls = _format_oci_tool_calls(tool_calls_data) + for tool_call in new_tool_calls: + existing = next( + ( + t + for t in tool_calls_accumulated + if t["name"] == tool_call["name"] + ), + None, + ) + if existing: + existing.update(tool_call) + else: + tool_calls_accumulated.append(tool_call) + + generation_info = self._provider.chat_stream_generation_info( + event_data + ) + if tool_calls_accumulated: + generation_info["tool_calls"] = tool_calls_accumulated + + yield ChatResponse( + message=ChatMessage( + role=MessageRole.ASSISTANT, + content=content, + additional_kwargs=generation_info, + ), + delta=content_delta, + raw=event.__dict__, + ) + + except json.JSONDecodeError: + yield ChatResponse( + message=ChatMessage( + role=MessageRole.ASSISTANT, content=content + ), + delta=content_delta, + raw=event.__dict__, + ) + + except Exception as e: + print(f"Error processing stream chunk: {e}") + yield ChatResponse( + message=ChatMessage( + role=MessageRole.ASSISTANT, content=content + ), + delta=content_delta, + raw=event.__dict__, + ) - async def acomplete( - self, prompt: str, formatted: bool = False, **kwargs: Any - ) -> CompletionResponse: - # do synchronous complete for now - return self.complete(prompt, formatted=formatted, **kwargs) + return gen() async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: - # do synchronous chat for now - return self.chat(messages, **kwargs) + raise NotImplementedError("Async chat is not implemented yet.") + + async def acomplete( + self, prompt: str, formatted: bool = False, **kwargs: Any + ) -> CompletionResponse: + raise NotImplementedError("Async complete is not implemented yet.") async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: - # do synchronous stream chat for now - return self.stream_chat(messages, **kwargs) + raise NotImplementedError("Async stream chat is not implemented yet.") async def astream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseAsyncGen: - # do synchronous stream complete for now - return self.stream_complete(prompt, formatted, **kwargs) + raise NotImplementedError("Async stream complete is not implemented yet.") + + # Function tooling integration methods + def _prepare_chat_with_tools( + self, + tools: Sequence["BaseTool"], + user_msg: Optional[Union[str, ChatMessage]] = None, + chat_history: Optional[List[ChatMessage]] = None, + verbose: bool = False, + allow_parallel_tool_calls: bool = False, + **kwargs: Any, + ) -> Dict[str, Any]: + tool_specs = [self._provider.convert_to_oci_tool(tool) for tool in tools] + + if isinstance(user_msg, str): + user_msg = ChatMessage(role=MessageRole.USER, content=user_msg) + + messages = chat_history or [] + if user_msg: + messages.append(user_msg) + + oci_params = self._provider.messages_to_oci_params(messages) + chat_params = self._get_all_kwargs(**kwargs) + + return { + "messages": messages, + "tools": tool_specs, + **oci_params, + **chat_params, + } + + def _validate_chat_with_tools_response( + self, + response: ChatResponse, + tools: List["BaseTool"], + allow_parallel_tool_calls: bool = False, + **kwargs: Any, + ) -> ChatResponse: + """Validate the response from chat_with_tools.""" + if not allow_parallel_tool_calls: + force_single_tool_call(response) + return response + + def get_tool_calls_from_response( + self, + response: "ChatResponse", + error_on_no_tool_call: bool = True, + **kwargs: Any, + ) -> List[ToolSelection]: + """Predict and call the tool.""" + tool_calls = response.message.additional_kwargs.get("tool_calls", []) + + if len(tool_calls) < 1: + if error_on_no_tool_call: + raise ValueError( + f"Expected at least one tool call, but got {len(tool_calls)} tool calls." + ) + else: + return [] + + tool_selections = [] + for tool_call in tool_calls: + validate_tool_call(tool_call) + argument_dict = ( + json.loads(tool_call["input"]) + if isinstance(tool_call["input"], str) + else tool_call["input"] + ) + + tool_selections.append( + ToolSelection( + tool_id=tool_call["toolUseId"], + tool_name=tool_call["name"], + tool_kwargs=argument_dict, + ) + ) + + return tool_selections diff --git a/llama-index-integrations/llms/llama-index-llms-oci-genai/llama_index/llms/oci_genai/utils.py b/llama-index-integrations/llms/llama-index-llms-oci-genai/llama_index/llms/oci_genai/utils.py index 4cf47c150c586..ee440c3b8b915 100644 --- a/llama-index-integrations/llms/llama-index-llms-oci-genai/llama_index/llms/oci_genai/utils.py +++ b/llama-index-integrations/llms/llama-index-llms-oci-genai/llama_index/llms/oci_genai/utils.py @@ -1,7 +1,13 @@ +import inspect +import json +import uuid from abc import ABC, abstractmethod from enum import Enum -from typing import Any, Sequence, Dict -from llama_index.core.base.llms.types import ChatMessage +from typing import Any, Sequence, Dict, Union, Type, Callable, Optional, List +from llama_index.core.base.llms.types import ChatMessage, MessageRole, ChatResponse +from llama_index.core.bridge.pydantic import BaseModel +from llama_index.core.tools import BaseTool +from oci.generative_ai_inference.models import CohereTool class OCIAuthType(Enum): @@ -15,30 +21,69 @@ class OCIAuthType(Enum): CUSTOM_ENDPOINT_PREFIX = "ocid1.generativeaiendpoint" -COMPLETION_MODELS = { - "cohere.command": 4096, - "cohere.command-light": 4096, - "meta.llama-2-70b-chat": 4096, -} +COMPLETION_MODELS = {} # completion endpoint has been deprecated CHAT_MODELS = { "cohere.command-r-16k": 16000, - "cohere.command-r-plus": 128000, # placeholder for future support + "cohere.command-r-plus": 128000, + "cohere.command-r-08-2024": 128000, + "cohere.command-r-plus-08-2024": 128000, "meta.llama-3-70b-instruct": 8192, + "meta.llama-3.1-70b-instruct": 128000, + "meta.llama-3.1-405b-instruct": 128000, + "meta.llama-3.2-90b-vision-instruct": 128000, } OCIGENAI_LLMS = {**COMPLETION_MODELS, **CHAT_MODELS} -STREAMING_MODELS = { - "cohere.command", - "cohere.command-light", - "meta.llama-2-70b-chat", - "cohere.command-r-16k", - "cohere.command-r-plus", - "meta.llama-3-70b-instruct", +JSON_TO_PYTHON_TYPES = { + "string": "str", + "number": "float", + "boolean": "bool", + "integer": "int", + "array": "List", + "object": "Dict", } +def _format_oci_tool_calls( + tool_calls: Optional[List[Any]] = None, +) -> List[Dict]: + """ + Formats an OCI GenAI API response into the tool call format used in LlamaIndex. + Handles both dictionary and object formats. + """ + if not tool_calls: + return [] + + formatted_tool_calls = [] + for tool_call in tool_calls: + # Handle both object and dict formats + if isinstance(tool_call, dict): + name = tool_call.get("name", tool_call.get("functionName")) + parameters = tool_call.get( + "parameters", tool_call.get("functionParameters") + ) + else: + name = getattr(tool_call, "name", getattr(tool_call, "functionName", None)) + parameters = getattr( + tool_call, "parameters", getattr(tool_call, "functionParameters", None) + ) + + if name and parameters: + formatted_tool_calls.append( + { + "toolUseId": uuid.uuid4().hex[:], + "name": name, + "input": json.dumps(parameters) + if isinstance(parameters, dict) + else parameters, + } + ) + + return formatted_tool_calls + + def create_client(auth_type, auth_profile, service_endpoint): """OCI Gen AI client. @@ -166,10 +211,25 @@ def chat_response_to_text(self, response: Any) -> str: def chat_stream_to_text(self, event_data: Dict) -> str: ... + @abstractmethod + def chat_generation_info(self, response: Any) -> Dict[str, Any]: + ... + + @abstractmethod + def chat_stream_generation_info(self, event_data: Dict) -> Dict[str, Any]: + ... + @abstractmethod def messages_to_oci_params(self, messages: Sequence[ChatMessage]) -> Dict[str, Any]: ... + @abstractmethod + def convert_to_oci_tool( + self, + tool: Union[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]], + ) -> Dict[str, Any]: + ... + class CohereProvider(Provider): def __init__(self) -> None: @@ -184,10 +244,14 @@ def __init__(self) -> None: self.oci_completion_request = models.CohereLlmInferenceRequest self.oci_chat_request = models.CohereChatRequest + self.oci_tool = models.CohereTool + self.oci_tool_param = models.CohereParameterDefinition + self.oci_tool_result = models.CohereToolResult + self.oci_tool_call = models.CohereToolCall self.oci_chat_message = { "USER": models.CohereUserMessage, - "SYSTEM": models.CohereSystemMessage, "CHATBOT": models.CohereChatBotMessage, + "SYSTEM": models.CohereSystemMessage, "TOOL": models.CohereToolMessage, } self.chat_api_format = models.BaseChatRequest.API_FORMAT_COHERE @@ -202,31 +266,274 @@ def chat_response_to_text(self, response: Any) -> str: return response.data.chat_response.text def chat_stream_to_text(self, event_data: Dict) -> str: - if "text" in event_data and "finishReason" not in event_data: - return event_data["text"] - else: - return "" + if "text" in event_data: + if "finishedReason" in event_data or "toolCalls" in event_data: + return "" + else: + return event_data["text"] + return "" + + def chat_generation_info(self, response: Any) -> Dict[str, Any]: + generation_info: Dict[str, Any] = { + "finish_reason": response.data.chat_response.finish_reason, + "documents": response.data.chat_response.documents, + "citations": response.data.chat_response.citations, + "search_queries": response.data.chat_response.search_queries, + "is_search_required": response.data.chat_response.is_search_required, + } + if response.data.chat_response.tool_calls: + # Only populate tool_calls when 1) present on the response and + # 2) has one or more calls. + generation_info["tool_calls"] = _format_oci_tool_calls( + response.data.chat_response.tool_calls + ) + + return generation_info + + def chat_stream_generation_info(self, event_data: Dict) -> Dict[str, Any]: + """Extract generation info from a streaming chat response.""" + generation_info: Dict[str, Any] = { + "finish_reason": event_data.get("finishReason"), + "documents": event_data.get("documents", []), + "citations": event_data.get("citations", []), + "search_queries": event_data.get("searchQueries", []), + "is_search_required": event_data.get("isSearchRequired", False), + } + + # Handle tool calls if present + if "toolCalls" in event_data: + generation_info["tool_calls"] = _format_oci_tool_calls( + event_data["toolCalls"] + ) + + return {k: v for k, v in generation_info.items() if v is not None} def messages_to_oci_params(self, messages: Sequence[ChatMessage]) -> Dict[str, Any]: role_map = { "user": "USER", "system": "SYSTEM", - "chatbot": "CHATBOT", "assistant": "CHATBOT", "tool": "TOOL", + "function": "TOOL", + "chatbot": "CHATBOT", + "model": "CHATBOT", } - oci_chat_history = [ - self.oci_chat_message[role_map[msg.role]](message=msg.content) - for msg in messages[:-1] - ] + oci_chat_history = [] + + for msg in messages[:-1]: + role = role_map[msg.role.value] + + # Handle tool calls for AI/Assistant messages + if role == "CHATBOT" and "tool_calls" in msg.additional_kwargs: + tool_calls = [] + for tool_call in msg.additional_kwargs.get("tool_calls", []): + validate_tool_call(tool_call) + tool_calls.append( + self.oci_tool_call( + name=tool_call.get("name"), + parameters=json.loads(tool_call["input"]) + if isinstance(tool_call["input"], str) + else tool_call["input"], + ) + ) + + oci_chat_history.append( + self.oci_chat_message[role]( + message=msg.content if msg.content else " ", + tool_calls=tool_calls if tool_calls else None, + ) + ) + elif role == "TOOL": + # tool message only has tool results field and no message field + continue + else: + oci_chat_history.append( + self.oci_chat_message[role](message=msg.content or " ") + ) - return { - "message": messages[-1].content, + # Handling the current chat turn, especially the latest message + current_chat_turn_messages = [] + for message in messages[::-1]: + current_chat_turn_messages.append(message) + if message.role == MessageRole.USER: + break + current_chat_turn_messages = current_chat_turn_messages[::-1] + + oci_tool_results = [] + for message in current_chat_turn_messages: + if message.role == MessageRole.TOOL: + tool_message = message + previous_ai_msgs = [ + message + for message in current_chat_turn_messages + if message.role == MessageRole.ASSISTANT + and "tool_calls" in message.additional_kwargs + ] + if previous_ai_msgs: + previous_ai_msg = previous_ai_msgs[-1] + for li_tool_call in previous_ai_msg.additional_kwargs.get( + "tool_calls", [] + ): + validate_tool_call(li_tool_call) + if li_tool_call[ + "toolUseId" + ] == tool_message.additional_kwargs.get("tool_call_id"): + tool_result = self.oci_tool_result() + tool_result.call = self.oci_tool_call( + name=li_tool_call.get("name"), + parameters=json.loads(li_tool_call["input"]) + if isinstance(li_tool_call["input"], str) + else li_tool_call["input"], + ) + tool_result.outputs = [{"output": tool_message.content}] + oci_tool_results.append(tool_result) + + if not oci_tool_results: + oci_tool_results = None + + message_str = "" if oci_tool_results or not messages else messages[-1].content + + oci_params = { + "message": message_str, "chat_history": oci_chat_history, + "tool_results": oci_tool_results, "api_format": self.chat_api_format, } + return {k: v for k, v in oci_params.items() if v is not None} + + def convert_to_oci_tool( + self, + tool: Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool], + ) -> CohereTool: + """ + Convert a Pydantic class, JSON schema dict, callable, or BaseTool to a CohereTool format for OCI. + + Args: + tool: The tool to convert, which can be a Pydantic class, a callable, or a JSON schema dictionary. + + Returns: + A CohereTool representing the tool in the OCI API format. + """ + if isinstance(tool, BaseTool): + # Extract tool name and description for BaseTool + tool_name, tool_description = getattr(tool, "name", None), getattr( + tool, "description", None + ) + if not tool_name or not tool_description: + tool_name = getattr(tool.metadata, "name", None) + if tool_fn := getattr(tool, "fn", None): + tool_description = tool_fn.__doc__ + if not tool_name: + tool_name = tool_fn.__name__ + else: + tool_description = getattr(tool.metadata, "description", None) + if not tool_name or not tool_description: + raise ValueError( + f"Tool {tool} does not have a name or description." + ) + + return self.oci_tool( + name=tool_name, + description=tool_description, + parameter_definitions={ + p_name: self.oci_tool_param( + type=JSON_TO_PYTHON_TYPES.get( + p_def.get("type"), p_def.get("type") + ), + description=p_def.get("description", ""), + is_required=p_name + in tool.metadata.get_parameters_dict().get("required", []), + ) + for p_name, p_def in tool.metadata.get_parameters_dict() + .get("properties", {}) + .items() + }, + ) + + elif isinstance(tool, dict): + # Ensure dict-based tools follow a standard schema format + if not all(k in tool for k in ("title", "description", "properties")): + raise ValueError( + "Unsupported dict type. Tool must be passed in as a BaseTool instance, JSON schema dict, or BaseModel type." + ) + return self.oci_tool( + name=tool.get("title"), + description=tool.get("description"), + parameter_definitions={ + p_name: self.oci_tool_param( + type=JSON_TO_PYTHON_TYPES.get( + p_def.get("type"), p_def.get("type") + ), + description=p_def.get("description", ""), + is_required=p_name in tool.get("required", []), + ) + for p_name, p_def in tool.get("properties", {}).items() + }, + ) + + elif isinstance(tool, type) and issubclass(tool, BaseModel): + # Handle Pydantic BaseModel tools + schema = tool.model_json_schema() + properties = schema.get("properties", {}) + return self.oci_tool( + name=schema.get("title", tool.__name__), + description=schema.get("description", tool.__name__), + parameter_definitions={ + p_name: self.oci_tool_param( + type=JSON_TO_PYTHON_TYPES.get( + p_def.get("type"), p_def.get("type") + ), + description=p_def.get("description", ""), + is_required=p_name in schema.get("required", []), + ) + for p_name, p_def in properties.items() + }, + ) + + elif callable(tool): + # Use inspect to extract callable signature and arguments + signature = inspect.signature(tool) + parameters = {} + for param_name, param in signature.parameters.items(): + param_type = ( + param.annotation if param.annotation != inspect._empty else "string" + ) + param_default = ( + param.default if param.default != inspect._empty else None + ) + + # Convert type to JSON schema type (or leave as default) + json_type = JSON_TO_PYTHON_TYPES.get( + param_type, + param_type.__name__ if isinstance(param_type, type) else "string", + ) + + parameters[param_name] = { + "type": json_type, + "description": f"Parameter: {param_name}", + "is_required": param_default is None, + } + + return self.oci_tool( + name=tool.__name__, + description=tool.__doc__ or f"Callable function: {tool.__name__}", + parameter_definitions={ + param_name: self.oci_tool_param( + type=param_data["type"], + description=param_data["description"], + is_required=param_data["is_required"], + ) + for param_name, param_data in parameters.items() + }, + ) + + else: + raise ValueError( + f"Unsupported tool type {type(tool)}. Tool must be passed in as a BaseTool instance, JSON schema dict, or BaseModel type." + ) + class MetaProvider(Provider): def __init__(self) -> None: @@ -264,6 +571,17 @@ def chat_stream_to_text(self, event_data: Dict) -> str: else: return "" + def chat_generation_info(self, response: Any) -> Dict[str, Any]: + return { + "finish_reason": response.data.chat_response.choices[0].finish_reason, + "time_created": str(response.data.chat_response.time_created), + } + + def chat_stream_generation_info(self, event_data: Dict) -> Dict[str, Any]: + return { + "finish_reason": event_data["finishReason"], + } + def messages_to_oci_params(self, messages: Sequence[ChatMessage]) -> Dict[str, Any]: role_map = { "user": "USER", @@ -285,6 +603,14 @@ def messages_to_oci_params(self, messages: Sequence[ChatMessage]) -> Dict[str, A "top_k": -1, } + def convert_to_oci_tool( + self, + tool: Union[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]], + ) -> Dict[str, Any]: + raise NotImplementedError( + "Tools not supported for OCI Generative AI Meta models" + ) + PROVIDERS = { "cohere": CohereProvider(), @@ -327,3 +653,18 @@ def get_context_size(model: str, context_size: int = None) -> int: ) from e else: return context_size + + +def validate_tool_call(tool_call: Dict[str, Any]): + if ( + "input" not in tool_call + or "toolUseId" not in tool_call + or "name" not in tool_call + ): + raise ValueError("Invalid tool call.") + + +def force_single_tool_call(response: ChatResponse) -> None: + tool_calls = response.message.additional_kwargs.get("tool_calls", []) + if len(tool_calls) > 1: + response.message.additional_kwargs["tool_calls"] = [tool_calls[0]] diff --git a/llama-index-integrations/llms/llama-index-llms-oci-genai/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-oci-genai/pyproject.toml index 339543872b5bc..d17ae35716acc 100644 --- a/llama-index-integrations/llms/llama-index-llms-oci-genai/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-oci-genai/pyproject.toml @@ -31,11 +31,11 @@ license = "MIT" name = "llama-index-llms-oci-genai" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.3.0" +version = "0.4.0" [tool.poetry.dependencies] -python = ">=3.9,<4.0" -oci = "^2.128.0" +python = ">=3.8.1,<4.0" +oci = "^2.134.0" llama-index-core = "^0.12.0" [tool.poetry.group.dev.dependencies] diff --git a/llama-index-integrations/llms/llama-index-llms-oci-genai/tests/test_oci_genai.py b/llama-index-integrations/llms/llama-index-llms-oci-genai/tests/test_oci_genai.py index 32c9cc1cbba32..43b841e6403d3 100644 --- a/llama-index-integrations/llms/llama-index-llms-oci-genai/tests/test_oci_genai.py +++ b/llama-index-integrations/llms/llama-index-llms-oci-genai/tests/test_oci_genai.py @@ -1,4 +1,5 @@ -###Test OCI Generative AI LLM service +# Test OCI Generative AI LLM service + from unittest.mock import MagicMock from typing import Any @@ -7,6 +8,8 @@ from llama_index.llms.oci_genai import OCIGenAI from llama_index.core.base.llms.types import ChatMessage, ChatResponse, MessageRole +from llama_index.core.tools import FunctionTool +import json class MockResponseDict(dict): @@ -14,9 +17,7 @@ def __getattr__(self, val) -> Any: # type: ignore[no-untyped-def] return self[val] -@pytest.mark.parametrize( - "test_model_id", ["cohere.command", "cohere.command-light", "meta.llama-2-70b-chat"] -) +@pytest.mark.parametrize("test_model_id", []) def test_llm_complete(monkeypatch: MonkeyPatch, test_model_id: str) -> None: """Test valid completion call to OCI Generative AI LLM service.""" oci_gen_ai_client = MagicMock() @@ -79,7 +80,13 @@ def mocked_response(*args): # type: ignore[no-untyped-def] @pytest.mark.parametrize( - "test_model_id", ["cohere.command-r-16k", "meta.llama-3-70b-instruct"] + "test_model_id", + [ + "cohere.command-r-16k", + "cohere.command-r-plus", + "meta.llama-3-70b-instruct", + "meta.llama-3.1-70b-instruct", + ], ) def test_llm_chat(monkeypatch: MonkeyPatch, test_model_id: str) -> None: """Test valid chat call to OCI Generative AI LLM service.""" @@ -100,10 +107,20 @@ def mocked_response(*args): # type: ignore[no-untyped-def] "chat_response": MockResponseDict( { "text": response_text, + "finish_reason": "stop", + "documents": [], + "citations": [], + "search_queries": [], + "is_search_required": False, + "tool_calls": None, } - ) + ), + "model_id": "cohere.command-r-16k", + "model_version": "1.0", } ), + "request_id": "req-1234567890", + "headers": {"content-length": "1234"}, } ) elif provider == "MetaProvider": @@ -127,14 +144,20 @@ def mocked_response(*args): # type: ignore[no-untyped-def] ) ] } - ) + ), + "finish_reason": "stop", } ) - ] + ], + "time_created": "2024-11-03T12:00:00Z", } - ) + ), + "model_id": "meta.llama-3-70b-instruct", + "model_version": "1.0", } ), + "request_id": "req-0987654321", + "headers": {"content-length": "1234"}, } ) return response @@ -145,12 +168,161 @@ def mocked_response(*args): # type: ignore[no-untyped-def] ChatMessage(role="user", content="User message"), ] + # For Meta provider, we expect fewer fields in additional_kwargs + if provider == "MetaProvider": + additional_kwargs = { + "finish_reason": "stop", + "time_created": "2024-11-03T12:00:00Z", + } + else: + additional_kwargs = { + "finish_reason": "stop", + "documents": [], + "citations": [], + "search_queries": [], + "is_search_required": False, + } + expected = ChatResponse( message=ChatMessage( - role=MessageRole.ASSISTANT, content="Assistant chat reply." + role=MessageRole.ASSISTANT, + content="Assistant chat reply.", + additional_kwargs=additional_kwargs, ), - raw=llm._client.chat.__dict__, + raw={}, # Mocked raw data + additional_kwargs={ + "model_id": test_model_id, + "model_version": "1.0", + "request_id": "req-1234567890" + if test_model_id == "cohere.command-r-16k" + else "req-0987654321", + "content-length": "1234", + }, ) actual = llm.chat(messages, temperature=0.2) - assert actual == expected + assert actual.message.content == expected.message.content + + +@pytest.mark.parametrize( + "test_model_id", ["cohere.command-r-16k", "cohere.command-r-plus"] +) +def test_llm_chat_with_tools(monkeypatch: MonkeyPatch, test_model_id: str) -> None: + """Test chat_with_tools call to OCI Generative AI LLM service with tool calling.""" + oci_gen_ai_client = MagicMock() + llm = OCIGenAI(model=test_model_id, client=oci_gen_ai_client) + + provider = llm._provider.__class__.__name__ + + def mock_tool_function(param1: str) -> str: + """Mock tool function that takes a string parameter.""" + return f"Mock tool function called with {param1}" + + # Create proper FunctionTool + mock_tool = FunctionTool.from_defaults(fn=mock_tool_function) + tools = [mock_tool] + + messages = [ + ChatMessage(role="user", content="User message"), + ] + + # Mock the client response + def mocked_response(*args, **kwargs): + response_text = "Assistant chat reply." + tool_calls = [ + MockResponseDict( + { + "name": "mock_tool_function", + "parameters": {"param1": "test"}, + } + ) + ] + response = None + if provider == "CohereProvider": + response = MockResponseDict( + { + "status": 200, + "data": MockResponseDict( + { + "chat_response": MockResponseDict( + { + "text": response_text, + "finish_reason": "stop", + "documents": [], + "citations": [], + "search_queries": [], + "is_search_required": False, + "tool_calls": tool_calls, + } + ), + "model_id": test_model_id, + "model_version": "1.0", + } + ), + "request_id": "req-1234567890", + "headers": {"content-length": "1234"}, + } + ) + else: + # MetaProvider does not support tools + raise NotImplementedError("Tools not supported for this provider.") + return response + + monkeypatch.setattr(llm._client, "chat", mocked_response) + + actual_response = llm.chat( + messages=messages, + tools=tools, + ) + + # Expected response structure + expected_tool_calls = [ + { + "name": "mock_tool_function", + "toolUseId": actual_response.message.additional_kwargs["tool_calls"][0][ + "toolUseId" + ], + "input": json.dumps({"param1": "test"}), + } + ] + + expected_response = ChatResponse( + message=ChatMessage( + role=MessageRole.ASSISTANT, + content="Assistant chat reply.", + additional_kwargs={ + "finish_reason": "stop", + "documents": [], + "citations": [], + "search_queries": [], + "is_search_required": False, + "tool_calls": expected_tool_calls, + }, + ), + raw={}, + ) + + # Compare everything except the toolUseId which is randomly generated + assert actual_response.message.role == expected_response.message.role + assert actual_response.message.content == expected_response.message.content + + actual_kwargs = actual_response.message.additional_kwargs + expected_kwargs = expected_response.message.additional_kwargs + + # Check all non-tool_calls fields + for key in [k for k in expected_kwargs if k != "tool_calls"]: + assert actual_kwargs[key] == expected_kwargs[key] + + # Check tool calls separately + actual_tool_calls = actual_kwargs["tool_calls"] + assert len(actual_tool_calls) == len(expected_tool_calls) + + for actual_tc, expected_tc in zip(actual_tool_calls, expected_tool_calls): + assert actual_tc["name"] == expected_tc["name"] + assert actual_tc["input"] == expected_tc["input"] + assert "toolUseId" in actual_tc + assert isinstance(actual_tc["toolUseId"], str) + assert len(actual_tc["toolUseId"]) > 0 + + # Check additional_kwargs + assert actual_response.additional_kwargs == expected_response.additional_kwargs diff --git a/llama-index-integrations/llms/llama-index-llms-openai/llama_index/llms/openai/BUILD b/llama-index-integrations/llms/llama-index-llms-openai/llama_index/llms/openai/BUILD index db46e8d6c978c..2e35f081cdee9 100644 --- a/llama-index-integrations/llms/llama-index-llms-openai/llama_index/llms/openai/BUILD +++ b/llama-index-integrations/llms/llama-index-llms-openai/llama_index/llms/openai/BUILD @@ -1 +1,6 @@ python_sources() + +resource( + name="py_typed", + source="py.typed", +) diff --git a/llama-index-integrations/llms/llama-index-llms-openai/llama_index/llms/openai/py.typed b/llama-index-integrations/llms/llama-index-llms-openai/llama_index/llms/openai/py.typed new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/llama_index/postprocessor/voyageai_rerank/base.py b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/llama_index/postprocessor/voyageai_rerank/base.py index 090a91de9a7b9..7a6cf670b39f0 100644 --- a/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/llama_index/postprocessor/voyageai_rerank/base.py +++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/llama_index/postprocessor/voyageai_rerank/base.py @@ -26,8 +26,8 @@ class VoyageAIRerank(BaseNodePostprocessor): def __init__( self, - api_key: str, model: str, + api_key: Optional[str] = None, top_n: Optional[int] = None, truncation: Optional[bool] = None, # deprecated diff --git a/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/pyproject.toml b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/pyproject.toml index 7f1d23dbc9af2..d15dd97efe8f9 100644 --- a/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/pyproject.toml +++ b/llama-index-integrations/postprocessor/llama-index-postprocessor-voyageai-rerank/pyproject.toml @@ -30,11 +30,11 @@ license = "MIT" name = "llama-index-postprocessor-voyageai-rerank" packages = [{include = "llama_index/"}] readme = "README.md" -version = "0.3.0" +version = "0.3.1" [tool.poetry.dependencies] python = ">=3.9,<4.0" -voyageai = "^0.2.1" +voyageai = {python = ">=3.9,<3.13", version = ">=0.3.2,<0.4.0"} llama-index-core = "^0.12.0" [tool.poetry.group.dev.dependencies] diff --git a/llama-index-integrations/readers/llama-index-readers-docling/tests/test_readers_docling.py b/llama-index-integrations/readers/llama-index-readers-docling/tests/test_readers_docling.py index e3d825fbe7002..7116862948ab9 100644 --- a/llama-index-integrations/readers/llama-index-readers-docling/tests/test_readers_docling.py +++ b/llama-index-integrations/readers/llama-index-readers-docling/tests/test_readers_docling.py @@ -79,6 +79,7 @@ "video_resource": None, "text_template": "{metadata_str}\n\n{content}", "class_name": "Document", + "text": '{"schema_name": "DoclingDocument", "version": "1.0.0", "name": "sample", "origin": {"mimetype": "text/html", "binary_hash": 42, "filename": "sample.html"}, "furniture": {"self_ref": "#/furniture", "children": [], "name": "_root_", "label": "unspecified"}, "body": {"self_ref": "#/body", "children": [{"$ref": "#/texts/0"}, {"$ref": "#/texts/1"}], "name": "_root_", "label": "unspecified"}, "groups": [], "texts": [{"self_ref": "#/texts/0", "parent": {"$ref": "#/body"}, "children": [], "label": "paragraph", "prov": [], "orig": "Some text", "text": "Some text"}, {"self_ref": "#/texts/1", "parent": {"$ref": "#/body"}, "children": [], "label": "paragraph", "prov": [], "orig": "Another paragraph", "text": "Another paragraph"}], "pictures": [], "tables": [], "key_value_items": [], "pages": {}}', } ] } @@ -106,6 +107,7 @@ "video_resource": None, "text_template": "{metadata_str}\n\n{content}", "class_name": "Document", + "text": "Some text\n\nAnother paragraph", } ] } diff --git a/llama-index-integrations/readers/llama-index-readers-gitlab/README.md b/llama-index-integrations/readers/llama-index-readers-gitlab/README.md index 92a8684417c4d..66d751fc21518 100644 --- a/llama-index-integrations/readers/llama-index-readers-gitlab/README.md +++ b/llama-index-integrations/readers/llama-index-readers-gitlab/README.md @@ -15,7 +15,7 @@ This reader will read through files in a repo based on branch or commit. ```python import gitlab -from llama_index.readers.github import GitLabRepositoryReader +from llama_index.readers.gitlab import GitLabRepositoryReader gitlab_client = gitlab.Gitlab("https://gitlab.com") @@ -32,7 +32,7 @@ docs = project_repo_reader.load_data(file_path="README.rst", ref="develop") ```python import gitlab -from llama_index.readers.github import GitLabIssuesReader +from llama_index.readers.gitlab import GitLabIssuesReader gitlab_client = gitlab.Gitlab("https://gitlab.com") diff --git a/llama-index-integrations/vector_stores/llama-index-vector-stores-azureaisearch/llama_index/vector_stores/azureaisearch/base.py b/llama-index-integrations/vector_stores/llama-index-vector-stores-azureaisearch/llama_index/vector_stores/azureaisearch/base.py index 0601a408fc41c..4bac8671043b1 100644 --- a/llama-index-integrations/vector_stores/llama-index-vector-stores-azureaisearch/llama_index/vector_stores/azureaisearch/base.py +++ b/llama-index-integrations/vector_stores/llama-index-vector-stores-azureaisearch/llama_index/vector_stores/azureaisearch/base.py @@ -4,7 +4,7 @@ import json import logging from enum import auto -from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast +from typing import Any, Callable, Dict, List, Optional, Tuple, Union from azure.search.documents import SearchClient from azure.search.documents.aio import SearchClient as AsyncSearchClient @@ -523,12 +523,17 @@ async def _avalidate_index(self, index_name: Optional[str]) -> None: def __init__( self, - search_or_index_client: Any, + search_or_index_client: Union[ + SearchClient, SearchIndexClient, AsyncSearchClient, AsyncSearchIndexClient + ], id_field_key: str, chunk_field_key: str, embedding_field_key: str, metadata_string_field_key: str, doc_id_field_key: str, + async_search_or_index_client: Optional[ + Union[AsyncSearchClient, AsyncSearchIndexClient] + ] = None, filterable_metadata_field_keys: Optional[ Union[ List[str], @@ -617,13 +622,6 @@ def __init__( self._user_agent = ( f"{base_user_agent} {user_agent}" if user_agent else base_user_agent ) - - self._index_client: SearchIndexClient = cast(SearchIndexClient, None) - self._async_index_client: AsyncSearchIndexClient = cast( - AsyncSearchIndexClient, None - ) - self._search_client: SearchClient = cast(SearchClient, None) - self._async_search_client: AsyncSearchClient = cast(AsyncSearchClient, None) self._embedding_dimensionality = embedding_dimensionality self._index_name = index_name @@ -639,11 +637,22 @@ def __init__( self._language_analyzer = language_analyzer self._compression_type = compression_type.lower() - # Validate search_or_index_client + # Initialize clients to None + self._index_client = None + self._async_index_client = None + self._search_client = None + self._async_search_client = None + + if search_or_index_client and async_search_or_index_client is None: + logger.warning( + "async_search_or_index_client is None. Depending on the client type passed " + "in, sync or async functions may not work." + ) + + # Validate sync search_or_index_client if search_or_index_client is not None: if isinstance(search_or_index_client, SearchIndexClient): - # If SearchIndexClient is supplied so must index_name - self._index_client = cast(SearchIndexClient, search_or_index_client) + self._index_client = search_or_index_client self._index_client._client._config.user_agent_policy.add_user_agent( self._user_agent ) @@ -660,18 +669,32 @@ def __init__( self._user_agent ) - elif isinstance(search_or_index_client, AsyncSearchIndexClient): - # If SearchIndexClient is supplied so must index_name - self._async_index_client = cast( - AsyncSearchIndexClient, search_or_index_client + elif isinstance(search_or_index_client, SearchClient): + self._search_client = search_or_index_client + self._search_client._client._config.user_agent_policy.add_user_agent( + self._user_agent ) + # Validate index_name + if index_name: + raise ValueError( + "index_name cannot be supplied if search_or_index_client " + "is of type azure.search.documents.SearchClient" + ) + + # Validate async search_or_index_client -- if not provided, assume the search_or_index_client could be async + async_search_or_index_client = ( + async_search_or_index_client or search_or_index_client + ) + if async_search_or_index_client is not None: + if isinstance(async_search_or_index_client, AsyncSearchIndexClient): + self._async_index_client = async_search_or_index_client self._async_index_client._client._config.user_agent_policy.add_user_agent( self._user_agent ) if not index_name: raise ValueError( - "index_name must be supplied if search_or_index_client is of " + "index_name must be supplied if async_search_or_index_client is of " "type azure.search.documents.aio.SearchIndexClient" ) @@ -682,22 +705,8 @@ def __init__( self._user_agent ) - elif isinstance(search_or_index_client, SearchClient): - self._search_client = cast(SearchClient, search_or_index_client) - self._search_client._client._config.user_agent_policy.add_user_agent( - self._user_agent - ) - # Validate index_name - if index_name: - raise ValueError( - "index_name cannot be supplied if search_or_index_client " - "is of type azure.search.documents.SearchClient" - ) - - elif isinstance(search_or_index_client, AsyncSearchClient): - self._async_search_client = cast( - AsyncSearchClient, search_or_index_client - ) + elif isinstance(async_search_or_index_client, AsyncSearchClient): + self._async_search_client = async_search_or_index_client self._async_search_client._client._config.user_agent_policy.add_user_agent( self._user_agent ) @@ -705,35 +714,31 @@ def __init__( # Validate index_name if index_name: raise ValueError( - "index_name cannot be supplied if search_or_index_client " - "is of type azure.search.documents.SearchClient" + "index_name cannot be supplied if async_search_or_index_client " + "is of type azure.search.documents.aio.SearchClient" ) - if isinstance(search_or_index_client, AsyncSearchIndexClient): - if not self._async_index_client and not self._async_search_client: - raise ValueError( - "search_or_index_client must be of type " - "azure.search.documents.SearchIndexClient or " - "azure.search.documents.SearchClient" - ) - - if isinstance(search_or_index_client, SearchIndexClient): - if not self._index_client and not self._search_client: - raise ValueError( - "search_or_index_client must be of type " - "azure.search.documents.SearchIndexClient or " - "azure.search.documents.SearchClient" - ) - else: - raise ValueError("search_or_index_client not specified") + # Validate that at least one client was provided + if not any( + [ + self._search_client, + self._async_search_client, + self._index_client, + self._async_index_client, + ] + ): + raise ValueError( + "Either search_or_index_client or async_search_or_index_client must be provided" + ) + # Validate index management requirements if index_management == IndexManagement.CREATE_IF_NOT_EXISTS and not ( self._index_client or self._async_index_client ): raise ValueError( "index_management has value of IndexManagement.CREATE_IF_NOT_EXISTS " - "but search_or_index_client is not of type " - "azure.search.documents.SearchIndexClient or azure.search.documents.aio.SearchIndexClient " + "but neither search_or_index_client nor async_search_or_index_client is of type " + "azure.search.documents.SearchIndexClient or azure.search.documents.aio.SearchIndexClient" ) self._index_management = index_management @@ -1161,20 +1166,36 @@ def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResul odata_filter = self._create_odata_filter(query.filters) azure_query_result_search: AzureQueryResultSearchBase = ( AzureQueryResultSearchDefault( - query, self._field_mapping, odata_filter, self._search_client + query, + self._field_mapping, + odata_filter, + self._search_client, + self._async_search_client, ) ) if query.mode == VectorStoreQueryMode.SPARSE: azure_query_result_search = AzureQueryResultSearchSparse( - query, self._field_mapping, odata_filter, self._search_client + query, + self._field_mapping, + odata_filter, + self._search_client, + self._async_search_client, ) elif query.mode == VectorStoreQueryMode.HYBRID: azure_query_result_search = AzureQueryResultSearchHybrid( - query, self._field_mapping, odata_filter, self._search_client + query, + self._field_mapping, + odata_filter, + self._search_client, + self._async_search_client, ) elif query.mode == VectorStoreQueryMode.SEMANTIC_HYBRID: azure_query_result_search = AzureQueryResultSearchSemanticHybrid( - query, self._field_mapping, odata_filter, self._search_client + query, + self._field_mapping, + odata_filter, + self._search_client, + self._async_search_client, ) return azure_query_result_search.search() @@ -1193,20 +1214,36 @@ async def aquery( azure_query_result_search: AzureQueryResultSearchBase = ( AzureQueryResultSearchDefault( - query, self._field_mapping, odata_filter, self._async_search_client + query, + self._field_mapping, + odata_filter, + self._search_client, + self._async_search_client, ) ) if query.mode == VectorStoreQueryMode.SPARSE: azure_query_result_search = AzureQueryResultSearchSparse( - query, self._field_mapping, odata_filter, self._async_search_client + query, + self._field_mapping, + odata_filter, + self._search_client, + self._async_search_client, ) elif query.mode == VectorStoreQueryMode.HYBRID: azure_query_result_search = AzureQueryResultSearchHybrid( - query, self._field_mapping, odata_filter, self._async_search_client + query, + self._field_mapping, + odata_filter, + self._search_client, + self._async_search_client, ) elif query.mode == VectorStoreQueryMode.SEMANTIC_HYBRID: azure_query_result_search = AzureQueryResultSearchSemanticHybrid( - query, self._field_mapping, odata_filter, self._async_search_client + query, + self._field_mapping, + odata_filter, + self._search_client, + self._async_search_client, ) return await azure_query_result_search.asearch() @@ -1339,12 +1376,14 @@ def __init__( query: VectorStoreQuery, field_mapping: Dict[str, str], odata_filter: Optional[str], - search_client: Any, + search_client: SearchClient, + async_search_client: AsyncSearchClient, ) -> None: self._query = query self._field_mapping = field_mapping self._odata_filter = odata_filter self._search_client = search_client + self._async_search_client = async_search_client @property def _select_fields(self) -> List[str]: @@ -1417,7 +1456,7 @@ def _create_query_result( async def _acreate_query_result( self, search_query: str, vectors: Optional[List[Any]] ) -> VectorStoreQueryResult: - results = await self._search_client.search( + results = await self._async_search_client.search( search_text=search_query, vector_queries=vectors, top=self._query.similarity_top_k, diff --git a/llama-index-integrations/vector_stores/llama-index-vector-stores-azureaisearch/pyproject.toml b/llama-index-integrations/vector_stores/llama-index-vector-stores-azureaisearch/pyproject.toml index 73798a43b5b61..85bfd1c674ef9 100644 --- a/llama-index-integrations/vector_stores/llama-index-vector-stores-azureaisearch/pyproject.toml +++ b/llama-index-integrations/vector_stores/llama-index-vector-stores-azureaisearch/pyproject.toml @@ -28,7 +28,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-vector-stores-azureaisearch" readme = "README.md" -version = "0.3.0" +version = "0.3.1" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosmongo/llama_index/vector_stores/azurecosmosmongo/base.py b/llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosmongo/llama_index/vector_stores/azurecosmosmongo/base.py index 0a89fac8e1ca0..f0103bc50aa59 100644 --- a/llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosmongo/llama_index/vector_stores/azurecosmosmongo/base.py +++ b/llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosmongo/llama_index/vector_stores/azurecosmosmongo/base.py @@ -7,7 +7,6 @@ import logging import os from typing import Any, Dict, List, Optional, cast -from datetime import date import pymongo from llama_index.core.bridge.pydantic import PrivateAttr @@ -261,7 +260,6 @@ def add( self._embedding_key: node.get_embedding(), self._text_key: node.get_content(metadata_mode=MetadataMode.NONE) or "", self._metadata_key: metadata, - "timeStamp": date.today(), } data_to_insert.append(entry) ids.append(node.node_id) diff --git a/llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosmongo/pyproject.toml b/llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosmongo/pyproject.toml index 43e1ebc8df9ff..cbe300ac472ca 100644 --- a/llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosmongo/pyproject.toml +++ b/llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosmongo/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-vector-stores-azurecosmosmongo" readme = "README.md" -version = "0.3.0" +version = "0.4.0" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosnosql/llama_index/vector_stores/azurecosmosnosql/base.py b/llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosnosql/llama_index/vector_stores/azurecosmosnosql/base.py index 1ab8275d0f409..11d4ba21772f7 100644 --- a/llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosnosql/llama_index/vector_stores/azurecosmosnosql/base.py +++ b/llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosnosql/llama_index/vector_stores/azurecosmosnosql/base.py @@ -5,7 +5,6 @@ """ import logging from typing import Any, Optional, Dict, cast, List -from datetime import date from azure.identity import ClientSecretCredential from azure.cosmos import CosmosClient @@ -289,7 +288,6 @@ def add( self._embedding_key: node.get_embedding(), self._text_key: node.get_content(metadata_mode=MetadataMode.NONE) or "", self._metadata_key: metadata, - "timeStamp": date.today(), } data_to_insert.append(entry) ids.append(node.node_id) diff --git a/llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosnosql/pyproject.toml b/llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosnosql/pyproject.toml index 41b0347816aa4..fb554a00a95e7 100644 --- a/llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosnosql/pyproject.toml +++ b/llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosnosql/pyproject.toml @@ -27,7 +27,7 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-vector-stores-azurecosmosnosql" readme = "README.md" -version = "1.2.0" +version = "1.3.0" [tool.poetry.dependencies] python = ">=3.9,<4.0" diff --git a/poetry.lock b/poetry.lock index 3ba1832f8d7ae..1db9926019c88 100644 --- a/poetry.lock +++ b/poetry.lock @@ -13,87 +13,87 @@ files = [ [[package]] name = "aiohttp" -version = "3.11.10" +version = "3.11.11" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.9" files = [ - {file = "aiohttp-3.11.10-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cbad88a61fa743c5d283ad501b01c153820734118b65aee2bd7dbb735475ce0d"}, - {file = "aiohttp-3.11.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80886dac673ceaef499de2f393fc80bb4481a129e6cb29e624a12e3296cc088f"}, - {file = "aiohttp-3.11.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:61b9bae80ed1f338c42f57c16918853dc51775fb5cb61da70d590de14d8b5fb4"}, - {file = "aiohttp-3.11.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e2e576caec5c6a6b93f41626c9c02fc87cd91538b81a3670b2e04452a63def6"}, - {file = "aiohttp-3.11.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02c13415b5732fb6ee7ff64583a5e6ed1c57aa68f17d2bda79c04888dfdc2769"}, - {file = "aiohttp-3.11.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4cfce37f31f20800a6a6620ce2cdd6737b82e42e06e6e9bd1b36f546feb3c44f"}, - {file = "aiohttp-3.11.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3bbbfff4c679c64e6e23cb213f57cc2c9165c9a65d63717108a644eb5a7398df"}, - {file = "aiohttp-3.11.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49c7dbbc1a559ae14fc48387a115b7d4bbc84b4a2c3b9299c31696953c2a5219"}, - {file = "aiohttp-3.11.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:68386d78743e6570f054fe7949d6cb37ef2b672b4d3405ce91fafa996f7d9b4d"}, - {file = "aiohttp-3.11.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9ef405356ba989fb57f84cac66f7b0260772836191ccefbb987f414bcd2979d9"}, - {file = "aiohttp-3.11.10-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:5d6958671b296febe7f5f859bea581a21c1d05430d1bbdcf2b393599b1cdce77"}, - {file = "aiohttp-3.11.10-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:99b7920e7165be5a9e9a3a7f1b680f06f68ff0d0328ff4079e5163990d046767"}, - {file = "aiohttp-3.11.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0dc49f42422163efb7e6f1df2636fe3db72713f6cd94688e339dbe33fe06d61d"}, - {file = "aiohttp-3.11.10-cp310-cp310-win32.whl", hash = "sha256:40d1c7a7f750b5648642586ba7206999650208dbe5afbcc5284bcec6579c9b91"}, - {file = "aiohttp-3.11.10-cp310-cp310-win_amd64.whl", hash = "sha256:68ff6f48b51bd78ea92b31079817aff539f6c8fc80b6b8d6ca347d7c02384e33"}, - {file = "aiohttp-3.11.10-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:77c4aa15a89847b9891abf97f3d4048f3c2d667e00f8a623c89ad2dccee6771b"}, - {file = "aiohttp-3.11.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:909af95a72cedbefe5596f0bdf3055740f96c1a4baa0dd11fd74ca4de0b4e3f1"}, - {file = "aiohttp-3.11.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:386fbe79863eb564e9f3615b959e28b222259da0c48fd1be5929ac838bc65683"}, - {file = "aiohttp-3.11.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3de34936eb1a647aa919655ff8d38b618e9f6b7f250cc19a57a4bf7fd2062b6d"}, - {file = "aiohttp-3.11.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c9527819b29cd2b9f52033e7fb9ff08073df49b4799c89cb5754624ecd98299"}, - {file = "aiohttp-3.11.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65a96e3e03300b41f261bbfd40dfdbf1c301e87eab7cd61c054b1f2e7c89b9e8"}, - {file = "aiohttp-3.11.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98f5635f7b74bcd4f6f72fcd85bea2154b323a9f05226a80bc7398d0c90763b0"}, - {file = "aiohttp-3.11.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:03b6002e20938fc6ee0918c81d9e776bebccc84690e2b03ed132331cca065ee5"}, - {file = "aiohttp-3.11.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6362cc6c23c08d18ddbf0e8c4d5159b5df74fea1a5278ff4f2c79aed3f4e9f46"}, - {file = "aiohttp-3.11.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3691ed7726fef54e928fe26344d930c0c8575bc968c3e239c2e1a04bd8cf7838"}, - {file = "aiohttp-3.11.10-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31d5093d3acd02b31c649d3a69bb072d539d4c7659b87caa4f6d2bcf57c2fa2b"}, - {file = "aiohttp-3.11.10-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:8b3cf2dc0f0690a33f2d2b2cb15db87a65f1c609f53c37e226f84edb08d10f52"}, - {file = "aiohttp-3.11.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:fbbaea811a2bba171197b08eea288b9402faa2bab2ba0858eecdd0a4105753a3"}, - {file = "aiohttp-3.11.10-cp311-cp311-win32.whl", hash = "sha256:4b2c7ac59c5698a7a8207ba72d9e9c15b0fc484a560be0788b31312c2c5504e4"}, - {file = "aiohttp-3.11.10-cp311-cp311-win_amd64.whl", hash = "sha256:974d3a2cce5fcfa32f06b13ccc8f20c6ad9c51802bb7f829eae8a1845c4019ec"}, - {file = "aiohttp-3.11.10-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b78f053a7ecfc35f0451d961dacdc671f4bcbc2f58241a7c820e9d82559844cf"}, - {file = "aiohttp-3.11.10-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab7485222db0959a87fbe8125e233b5a6f01f4400785b36e8a7878170d8c3138"}, - {file = "aiohttp-3.11.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cf14627232dfa8730453752e9cdc210966490992234d77ff90bc8dc0dce361d5"}, - {file = "aiohttp-3.11.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:076bc454a7e6fd646bc82ea7f98296be0b1219b5e3ef8a488afbdd8e81fbac50"}, - {file = "aiohttp-3.11.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:482cafb7dc886bebeb6c9ba7925e03591a62ab34298ee70d3dd47ba966370d2c"}, - {file = "aiohttp-3.11.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf3d1a519a324af764a46da4115bdbd566b3c73fb793ffb97f9111dbc684fc4d"}, - {file = "aiohttp-3.11.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24213ba85a419103e641e55c27dc7ff03536c4873470c2478cce3311ba1eee7b"}, - {file = "aiohttp-3.11.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b99acd4730ad1b196bfb03ee0803e4adac371ae8efa7e1cbc820200fc5ded109"}, - {file = "aiohttp-3.11.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:14cdb5a9570be5a04eec2ace174a48ae85833c2aadc86de68f55541f66ce42ab"}, - {file = "aiohttp-3.11.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7e97d622cb083e86f18317282084bc9fbf261801b0192c34fe4b1febd9f7ae69"}, - {file = "aiohttp-3.11.10-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:012f176945af138abc10c4a48743327a92b4ca9adc7a0e078077cdb5dbab7be0"}, - {file = "aiohttp-3.11.10-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44224d815853962f48fe124748227773acd9686eba6dc102578defd6fc99e8d9"}, - {file = "aiohttp-3.11.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c87bf31b7fdab94ae3adbe4a48e711bfc5f89d21cf4c197e75561def39e223bc"}, - {file = "aiohttp-3.11.10-cp312-cp312-win32.whl", hash = "sha256:06a8e2ee1cbac16fe61e51e0b0c269400e781b13bcfc33f5425912391a542985"}, - {file = "aiohttp-3.11.10-cp312-cp312-win_amd64.whl", hash = "sha256:be2b516f56ea883a3e14dda17059716593526e10fb6303189aaf5503937db408"}, - {file = "aiohttp-3.11.10-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8cc5203b817b748adccb07f36390feb730b1bc5f56683445bfe924fc270b8816"}, - {file = "aiohttp-3.11.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5ef359ebc6949e3a34c65ce20230fae70920714367c63afd80ea0c2702902ccf"}, - {file = "aiohttp-3.11.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9bca390cb247dbfaec3c664326e034ef23882c3f3bfa5fbf0b56cad0320aaca5"}, - {file = "aiohttp-3.11.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:811f23b3351ca532af598405db1093f018edf81368e689d1b508c57dcc6b6a32"}, - {file = "aiohttp-3.11.10-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddf5f7d877615f6a1e75971bfa5ac88609af3b74796ff3e06879e8422729fd01"}, - {file = "aiohttp-3.11.10-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6ab29b8a0beb6f8eaf1e5049252cfe74adbaafd39ba91e10f18caeb0e99ffb34"}, - {file = "aiohttp-3.11.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c49a76c1038c2dd116fa443eba26bbb8e6c37e924e2513574856de3b6516be99"}, - {file = "aiohttp-3.11.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f3dc0e330575f5b134918976a645e79adf333c0a1439dcf6899a80776c9ab39"}, - {file = "aiohttp-3.11.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:efb15a17a12497685304b2d976cb4939e55137df7b09fa53f1b6a023f01fcb4e"}, - {file = "aiohttp-3.11.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:db1d0b28fcb7f1d35600150c3e4b490775251dea70f894bf15c678fdd84eda6a"}, - {file = "aiohttp-3.11.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:15fccaf62a4889527539ecb86834084ecf6e9ea70588efde86e8bc775e0e7542"}, - {file = "aiohttp-3.11.10-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:593c114a2221444f30749cc5e5f4012488f56bd14de2af44fe23e1e9894a9c60"}, - {file = "aiohttp-3.11.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7852bbcb4d0d2f0c4d583f40c3bc750ee033265d80598d0f9cb6f372baa6b836"}, - {file = "aiohttp-3.11.10-cp313-cp313-win32.whl", hash = "sha256:65e55ca7debae8faaffee0ebb4b47a51b4075f01e9b641c31e554fd376595c6c"}, - {file = "aiohttp-3.11.10-cp313-cp313-win_amd64.whl", hash = "sha256:beb39a6d60a709ae3fb3516a1581777e7e8b76933bb88c8f4420d875bb0267c6"}, - {file = "aiohttp-3.11.10-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0580f2e12de2138f34debcd5d88894786453a76e98febaf3e8fe5db62d01c9bf"}, - {file = "aiohttp-3.11.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a55d2ad345684e7c3dd2c20d2f9572e9e1d5446d57200ff630e6ede7612e307f"}, - {file = "aiohttp-3.11.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04814571cb72d65a6899db6099e377ed00710bf2e3eafd2985166f2918beaf59"}, - {file = "aiohttp-3.11.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e44a9a3c053b90c6f09b1bb4edd880959f5328cf63052503f892c41ea786d99f"}, - {file = "aiohttp-3.11.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:502a1464ccbc800b4b1995b302efaf426e8763fadf185e933c2931df7db9a199"}, - {file = "aiohttp-3.11.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:613e5169f8ae77b1933e42e418a95931fb4867b2991fc311430b15901ed67079"}, - {file = "aiohttp-3.11.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cca22a61b7fe45da8fc73c3443150c3608750bbe27641fc7558ec5117b27fdf"}, - {file = "aiohttp-3.11.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:86a5dfcc39309470bd7b68c591d84056d195428d5d2e0b5ccadfbaf25b026ebc"}, - {file = "aiohttp-3.11.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:77ae58586930ee6b2b6f696c82cf8e78c8016ec4795c53e36718365f6959dc82"}, - {file = "aiohttp-3.11.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:78153314f26d5abef3239b4a9af20c229c6f3ecb97d4c1c01b22c4f87669820c"}, - {file = "aiohttp-3.11.10-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:98283b94cc0e11c73acaf1c9698dea80c830ca476492c0fe2622bd931f34b487"}, - {file = "aiohttp-3.11.10-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:53bf2097e05c2accc166c142a2090e4c6fd86581bde3fd9b2d3f9e93dda66ac1"}, - {file = "aiohttp-3.11.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5532f0441fc09c119e1dca18fbc0687e64fbeb45aa4d6a87211ceaee50a74c4"}, - {file = "aiohttp-3.11.10-cp39-cp39-win32.whl", hash = "sha256:47ad15a65fb41c570cd0ad9a9ff8012489e68176e7207ec7b82a0940dddfd8be"}, - {file = "aiohttp-3.11.10-cp39-cp39-win_amd64.whl", hash = "sha256:c6b9e6d7e41656d78e37ce754813fa44b455c3d0d0dced2a047def7dc5570b74"}, - {file = "aiohttp-3.11.10.tar.gz", hash = "sha256:b1fc6b45010a8d0ff9e88f9f2418c6fd408c99c211257334aff41597ebece42e"}, + {file = "aiohttp-3.11.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a60804bff28662cbcf340a4d61598891f12eea3a66af48ecfdc975ceec21e3c8"}, + {file = "aiohttp-3.11.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4b4fa1cb5f270fb3eab079536b764ad740bb749ce69a94d4ec30ceee1b5940d5"}, + {file = "aiohttp-3.11.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:731468f555656767cda219ab42e033355fe48c85fbe3ba83a349631541715ba2"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb23d8bb86282b342481cad4370ea0853a39e4a32a0042bb52ca6bdde132df43"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f047569d655f81cb70ea5be942ee5d4421b6219c3f05d131f64088c73bb0917f"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd7659baae9ccf94ae5fe8bfaa2c7bc2e94d24611528395ce88d009107e00c6d"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af01e42ad87ae24932138f154105e88da13ce7d202a6de93fafdafb2883a00ef"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5854be2f3e5a729800bac57a8d76af464e160f19676ab6aea74bde18ad19d438"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6526e5fb4e14f4bbf30411216780c9967c20c5a55f2f51d3abd6de68320cc2f3"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:85992ee30a31835fc482468637b3e5bd085fa8fe9392ba0bdcbdc1ef5e9e3c55"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:88a12ad8ccf325a8a5ed80e6d7c3bdc247d66175afedbe104ee2aaca72960d8e"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:0a6d3fbf2232e3a08c41eca81ae4f1dff3d8f1a30bae415ebe0af2d2458b8a33"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84a585799c58b795573c7fa9b84c455adf3e1d72f19a2bf498b54a95ae0d194c"}, + {file = "aiohttp-3.11.11-cp310-cp310-win32.whl", hash = "sha256:bfde76a8f430cf5c5584553adf9926534352251d379dcb266ad2b93c54a29745"}, + {file = "aiohttp-3.11.11-cp310-cp310-win_amd64.whl", hash = "sha256:0fd82b8e9c383af11d2b26f27a478640b6b83d669440c0a71481f7c865a51da9"}, + {file = "aiohttp-3.11.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ba74ec819177af1ef7f59063c6d35a214a8fde6f987f7661f4f0eecc468a8f76"}, + {file = "aiohttp-3.11.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4af57160800b7a815f3fe0eba9b46bf28aafc195555f1824555fa2cfab6c1538"}, + {file = "aiohttp-3.11.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ffa336210cf9cd8ed117011085817d00abe4c08f99968deef0013ea283547204"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81b8fe282183e4a3c7a1b72f5ade1094ed1c6345a8f153506d114af5bf8accd9"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3af41686ccec6a0f2bdc66686dc0f403c41ac2089f80e2214a0f82d001052c03"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70d1f9dde0e5dd9e292a6d4d00058737052b01f3532f69c0c65818dac26dc287"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:249cc6912405917344192b9f9ea5cd5b139d49e0d2f5c7f70bdfaf6b4dbf3a2e"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0eb98d90b6690827dcc84c246811feeb4e1eea683c0eac6caed7549be9c84665"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ec82bf1fda6cecce7f7b915f9196601a1bd1a3079796b76d16ae4cce6d0ef89b"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9fd46ce0845cfe28f108888b3ab17abff84ff695e01e73657eec3f96d72eef34"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:bd176afcf8f5d2aed50c3647d4925d0db0579d96f75a31e77cbaf67d8a87742d"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:ec2aa89305006fba9ffb98970db6c8221541be7bee4c1d027421d6f6df7d1ce2"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:92cde43018a2e17d48bb09c79e4d4cb0e236de5063ce897a5e40ac7cb4878773"}, + {file = "aiohttp-3.11.11-cp311-cp311-win32.whl", hash = "sha256:aba807f9569455cba566882c8938f1a549f205ee43c27b126e5450dc9f83cc62"}, + {file = "aiohttp-3.11.11-cp311-cp311-win_amd64.whl", hash = "sha256:ae545f31489548c87b0cced5755cfe5a5308d00407000e72c4fa30b19c3220ac"}, + {file = "aiohttp-3.11.11-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e595c591a48bbc295ebf47cb91aebf9bd32f3ff76749ecf282ea7f9f6bb73886"}, + {file = "aiohttp-3.11.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3ea1b59dc06396b0b424740a10a0a63974c725b1c64736ff788a3689d36c02d2"}, + {file = "aiohttp-3.11.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8811f3f098a78ffa16e0ea36dffd577eb031aea797cbdba81be039a4169e242c"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7227b87a355ce1f4bf83bfae4399b1f5bb42e0259cb9405824bd03d2f4336a"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d40f9da8cabbf295d3a9dae1295c69975b86d941bc20f0a087f0477fa0a66231"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffb3dc385f6bb1568aa974fe65da84723210e5d9707e360e9ecb51f59406cd2e"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8f5f7515f3552d899c61202d99dcb17d6e3b0de777900405611cd747cecd1b8"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3499c7ffbfd9c6a3d8d6a2b01c26639da7e43d47c7b4f788016226b1e711caa8"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8e2bf8029dbf0810c7bfbc3e594b51c4cc9101fbffb583a3923aea184724203c"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b6212a60e5c482ef90f2d788835387070a88d52cf6241d3916733c9176d39eab"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d119fafe7b634dbfa25a8c597718e69a930e4847f0b88e172744be24515140da"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:6fba278063559acc730abf49845d0e9a9e1ba74f85f0ee6efd5803f08b285853"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:92fc484e34b733704ad77210c7957679c5c3877bd1e6b6d74b185e9320cc716e"}, + {file = "aiohttp-3.11.11-cp312-cp312-win32.whl", hash = "sha256:9f5b3c1ed63c8fa937a920b6c1bec78b74ee09593b3f5b979ab2ae5ef60d7600"}, + {file = "aiohttp-3.11.11-cp312-cp312-win_amd64.whl", hash = "sha256:1e69966ea6ef0c14ee53ef7a3d68b564cc408121ea56c0caa2dc918c1b2f553d"}, + {file = "aiohttp-3.11.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:541d823548ab69d13d23730a06f97460f4238ad2e5ed966aaf850d7c369782d9"}, + {file = "aiohttp-3.11.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:929f3ed33743a49ab127c58c3e0a827de0664bfcda566108989a14068f820194"}, + {file = "aiohttp-3.11.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0882c2820fd0132240edbb4a51eb8ceb6eef8181db9ad5291ab3332e0d71df5f"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b63de12e44935d5aca7ed7ed98a255a11e5cb47f83a9fded7a5e41c40277d104"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa54f8ef31d23c506910c21163f22b124facb573bff73930735cf9fe38bf7dff"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a344d5dc18074e3872777b62f5f7d584ae4344cd6006c17ba12103759d407af3"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b7fb429ab1aafa1f48578eb315ca45bd46e9c37de11fe45c7f5f4138091e2f1"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c341c7d868750e31961d6d8e60ff040fb9d3d3a46d77fd85e1ab8e76c3e9a5c4"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ed9ee95614a71e87f1a70bc81603f6c6760128b140bc4030abe6abaa988f1c3d"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:de8d38f1c2810fa2a4f1d995a2e9c70bb8737b18da04ac2afbf3971f65781d87"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a9b7371665d4f00deb8f32208c7c5e652059b0fda41cf6dbcac6114a041f1cc2"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:620598717fce1b3bd14dd09947ea53e1ad510317c85dda2c9c65b622edc96b12"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bf8d9bfee991d8acc72d060d53860f356e07a50f0e0d09a8dfedea1c554dd0d5"}, + {file = "aiohttp-3.11.11-cp313-cp313-win32.whl", hash = "sha256:9d73ee3725b7a737ad86c2eac5c57a4a97793d9f442599bea5ec67ac9f4bdc3d"}, + {file = "aiohttp-3.11.11-cp313-cp313-win_amd64.whl", hash = "sha256:c7a06301c2fb096bdb0bd25fe2011531c1453b9f2c163c8031600ec73af1cc99"}, + {file = "aiohttp-3.11.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3e23419d832d969f659c208557de4a123e30a10d26e1e14b73431d3c13444c2e"}, + {file = "aiohttp-3.11.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:21fef42317cf02e05d3b09c028712e1d73a9606f02467fd803f7c1f39cc59add"}, + {file = "aiohttp-3.11.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1f21bb8d0235fc10c09ce1d11ffbd40fc50d3f08a89e4cf3a0c503dc2562247a"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1642eceeaa5ab6c9b6dfeaaa626ae314d808188ab23ae196a34c9d97efb68350"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2170816e34e10f2fd120f603e951630f8a112e1be3b60963a1f159f5699059a6"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8be8508d110d93061197fd2d6a74f7401f73b6d12f8822bbcd6d74f2b55d71b1"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4eed954b161e6b9b65f6be446ed448ed3921763cc432053ceb606f89d793927e"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6c9af134da4bc9b3bd3e6a70072509f295d10ee60c697826225b60b9959acdd"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:44167fc6a763d534a6908bdb2592269b4bf30a03239bcb1654781adf5e49caf1"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:479b8c6ebd12aedfe64563b85920525d05d394b85f166b7873c8bde6da612f9c"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:10b4ff0ad793d98605958089fabfa350e8e62bd5d40aa65cdc69d6785859f94e"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b540bd67cfb54e6f0865ceccd9979687210d7ed1a1cc8c01f8e67e2f1e883d28"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1dac54e8ce2ed83b1f6b1a54005c87dfed139cf3f777fdc8afc76e7841101226"}, + {file = "aiohttp-3.11.11-cp39-cp39-win32.whl", hash = "sha256:568c1236b2fde93b7720f95a890741854c1200fba4a3471ff48b2934d2d93fd3"}, + {file = "aiohttp-3.11.11-cp39-cp39-win_amd64.whl", hash = "sha256:943a8b052e54dfd6439fd7989f67fc6a7f2138d0a2cf0a7de5f18aa4fe7eb3b1"}, + {file = "aiohttp-3.11.11.tar.gz", hash = "sha256:bb49c7f1e6ebf3821a42d81d494f538107610c3a705987f53068546b0e90303e"}, ] [package.dependencies] @@ -1681,13 +1681,13 @@ llama-index-llms-openai = ">=0.3.0,<0.4.0" [[package]] name = "llama-index-core" -version = "0.12.6" +version = "0.12.7" description = "Interface between LLMs and your data" optional = false python-versions = "<4.0,>=3.9" files = [ - {file = "llama_index_core-0.12.6-py3-none-any.whl", hash = "sha256:31d72b61c9f582bb879bf110e09a97b0106acdee155ce92cbbeed4df74ce0b8c"}, - {file = "llama_index_core-0.12.6.tar.gz", hash = "sha256:fb380db6472d1d5e25297676b4e82a7644890fcc4d264ab10ac12490ebf359d4"}, + {file = "llama_index_core-0.12.7-py3-none-any.whl", hash = "sha256:691493915598c09b636f964e85b8baca630faa362a4a8ea130ddea8584ab8d0a"}, + {file = "llama_index_core-0.12.7.tar.gz", hash = "sha256:9935b249c08f87c124962a8ea1e301e1b5bfa7e3ffd6771b6cb59a0de9bb8cb5"}, ] [package.dependencies] @@ -1747,28 +1747,28 @@ llama-index-core = ">=0.12.0,<0.13.0" [[package]] name = "llama-index-llms-openai" -version = "0.3.10" +version = "0.3.11" description = "llama-index llms openai integration" optional = false python-versions = "<4.0,>=3.9" files = [ - {file = "llama_index_llms_openai-0.3.10-py3-none-any.whl", hash = "sha256:ceb344f4a1424970096652135d9c2663a50fa53eb6d24d146c7f989d8c098e2e"}, - {file = "llama_index_llms_openai-0.3.10.tar.gz", hash = "sha256:fda3345aa4ba41b25d92817ee8a5699931e79c7c879243f172d6e0e6cbc5c5b3"}, + {file = "llama_index_llms_openai-0.3.11-py3-none-any.whl", hash = "sha256:83c916f9dad3b7b573c16e48244c5e6e7dcc48cb936904a9f0eb0be213655d5c"}, + {file = "llama_index_llms_openai-0.3.11.tar.gz", hash = "sha256:3c603f842dd59b28ee0088633d26ebb83265d278680ef3b88c0255969ed85745"}, ] [package.dependencies] llama-index-core = ">=0.12.4,<0.13.0" -openai = ">=1.57.1,<2.0.0" +openai = ">=1.58.1,<2.0.0" [[package]] name = "llama-index-multi-modal-llms-openai" -version = "0.4.0" +version = "0.4.1" description = "llama-index multi-modal-llms openai integration" optional = false python-versions = "<4.0,>=3.9" files = [ - {file = "llama_index_multi_modal_llms_openai-0.4.0-py3-none-any.whl", hash = "sha256:c5bda1b3c6d14eee87a819ba72b122d82877829695dce8f90a8c600ac16ce243"}, - {file = "llama_index_multi_modal_llms_openai-0.4.0.tar.gz", hash = "sha256:11c3ac7e2d7ace9dbcdd9a662f27bca5fefce98c5682abaffb7dd01d59776658"}, + {file = "llama_index_multi_modal_llms_openai-0.4.1-py3-none-any.whl", hash = "sha256:3ae94185f5090f2a36accba1f63d4059f8276e5eddf2ab61902f0ea18f2dc1ef"}, + {file = "llama_index_multi_modal_llms_openai-0.4.1.tar.gz", hash = "sha256:6cdc137b5ce0c9a00642e6812a9d3d21aebd1c21477bdb13b3ad14b49053ccaa"}, ] [package.dependencies] @@ -1845,13 +1845,13 @@ llama-parse = ">=0.5.0" [[package]] name = "llama-parse" -version = "0.5.17" +version = "0.5.18" description = "Parse files into RAG-Optimized formats." optional = false python-versions = "<4.0,>=3.9" files = [ - {file = "llama_parse-0.5.17-py3-none-any.whl", hash = "sha256:0981c7e4ac21bc3b314830c6e9eaed4b20db874ec8f8868540707cf2cca07051"}, - {file = "llama_parse-0.5.17.tar.gz", hash = "sha256:2ba2700ca3b15e84ef072fedcbbfeae2fc13ce7b63fee20fcd5249e6fcdbd381"}, + {file = "llama_parse-0.5.18-py3-none-any.whl", hash = "sha256:b3432a94e2507bd37cd86a25911c3f8dc74ff4293fa8e1583a2b3677129793dc"}, + {file = "llama_parse-0.5.18.tar.gz", hash = "sha256:5e8da9a3a02c79b609d378f32f6a29bca7c3f20b3fe1df6ccb8b4e775b656f42"}, ] [package.dependencies] @@ -1973,13 +1973,13 @@ files = [ [[package]] name = "marshmallow" -version = "3.23.1" +version = "3.23.2" description = "A lightweight library for converting complex datatypes to and from native Python datatypes." optional = false python-versions = ">=3.9" files = [ - {file = "marshmallow-3.23.1-py3-none-any.whl", hash = "sha256:fece2eb2c941180ea1b7fcbd4a83c51bfdd50093fdd3ad2585ee5e1df2508491"}, - {file = "marshmallow-3.23.1.tar.gz", hash = "sha256:3a8dfda6edd8dcdbf216c0ede1d1e78d230a6dc9c5a088f58c4083b974a0d468"}, + {file = "marshmallow-3.23.2-py3-none-any.whl", hash = "sha256:bcaf2d6fd74fb1459f8450e85d994997ad3e70036452cbfa4ab685acb19479b3"}, + {file = "marshmallow-3.23.2.tar.gz", hash = "sha256:c448ac6455ca4d794773f00bae22c2f351d62d739929f761dce5eacb5c468d7f"}, ] [package.dependencies] @@ -3127,18 +3127,18 @@ files = [ [[package]] name = "pydantic" -version = "2.10.3" +version = "2.10.4" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.10.3-py3-none-any.whl", hash = "sha256:be04d85bbc7b65651c5f8e6b9976ed9c6f41782a55524cef079a34a0bb82144d"}, - {file = "pydantic-2.10.3.tar.gz", hash = "sha256:cb5ac360ce894ceacd69c403187900a02c4b20b693a9dd1d643e1effab9eadf9"}, + {file = "pydantic-2.10.4-py3-none-any.whl", hash = "sha256:597e135ea68be3a37552fb524bc7d0d66dcf93d395acd93a00682f1efcb8ee3d"}, + {file = "pydantic-2.10.4.tar.gz", hash = "sha256:82f12e9723da6de4fe2ba888b5971157b3be7ad914267dea8f05f82b28254f06"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.27.1" +pydantic-core = "2.27.2" typing-extensions = ">=4.12.2" [package.extras] @@ -3147,111 +3147,111 @@ timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.27.1" +version = "2.27.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"}, - {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"}, - {file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"}, - {file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"}, - {file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"}, - {file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"}, - {file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"}, - {file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"}, - {file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"}, - {file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"}, - {file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"}, - {file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"}, - {file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"}, - {file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"}, - {file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"}, - {file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"}, - {file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"}, - {file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"}, - {file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"}, - {file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"}, - {file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"}, - {file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"}, - {file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"}, - {file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"}, - {file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"}, - {file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"}, - {file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"}, - {file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, + {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, ] [package.dependencies] @@ -4827,4 +4827,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4.0" -content-hash = "ea0986450834a66f9deca7907e09a071322ab7f608cdea83c5d8af9ad78bd55f" +content-hash = "9f4ebc2a5f945731cb5aff1f2d0b3282dcf31e50d7d0d06fd03830f2d219dd52" diff --git a/pyproject.toml b/pyproject.toml index 27aff3fec993a..1d796b958dba5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,7 +45,7 @@ name = "llama-index" packages = [{from = "_llama-index", include = "llama_index"}] readme = "README.md" repository = "https://github.com/run-llama/llama_index" -version = "0.12.6" +version = "0.12.7" [tool.poetry.dependencies] python = ">=3.9,<4.0" @@ -57,7 +57,7 @@ llama-index-agent-openai = "^0.4.0" llama-index-readers-file = "^0.4.0" llama-index-readers-llama-parse = ">=0.4.0" llama-index-indices-managed-llama-cloud = ">=0.4.0" -llama-index-core = "^0.12.6" +llama-index-core = "^0.12.7" llama-index-multi-modal-llms-openai = "^0.4.0" llama-index-cli = "^0.4.0" nltk = ">3.8.1" # avoids a CVE, temp until next release, should be in llama-index-core