diff --git a/CHANGELOG.md b/CHANGELOG.md index 6ff3df05c2473..076f398a3db1c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,75 @@ # ChangeLog +## [2024-11-05] + +### `llama-index-core` [0.11.22] + +- bring back support for prompt templates in context chat engines (#16821) +- Fixed the JSON Format of Generated Sub-Question (double curly brackets) (#16820) +- markdown splitter improve metadata (#16789) +- fix empty index + generation synthesizer (#16785) + +### `llama-index-embeddings-azure-inference` [0.2.4] + +- Support for api_version and Azure AI model inference service (#16802) + +### `llama-index-embeddings-gemini` [0.2.2] + +- fix await-async-embeddings (#16790) + +### `llama-index-embeddings-siliconflow` [0.1.0] + +- add siliconflow embedding class (#16753) + +### `llama-index-indices-managed-vectara` [0.2.4] + +- Hotfix: Chain Query Configuration (#16818) + +### `llama-index-llms-anthropic` [0.3.9] + +- Add Anthropic Claude Haiku 3.5 to the list of supported Claude models (#16823) + +### `llama-index-llms-azure-inference` [0.2.4] + +- Support for api_version and Azure AI model inference service (#16802) + +### `llama-index-llms-bedrock` [0.2.6] + +- Add Anthropic Claude Haiku 3.5 to the list of supported Claude models for bedrock and bedrock-converse integrations (#16825) + +### `llama-index-llms-bedrock-converse` [0.3.7] + +- Add Anthropic Claude Haiku 3.5 to the list of supported Claude models for bedrock and bedrock-converse integrations (#16825) + +### `llama-index-llms-dashscope` [0.2.5] + +- More tolerant definition of LLMMetadata information (#16830) +- Fix abstract method signature error (#16809) + +### `llama-index-llms-vllm` [0.3.0] + +- remove beam search param for latest vllm (#16817) + +### `llama-index-postprocessor-colpali-rerank` [0.1.0] + +- Add ColPali as reranker (#16829) + +### `llama-index-postprocessor-siliconflow-rerank` [0.1.0] + +- add siliconflow rerank class (#16737) + +### `llama-index-readers-microsoft-onedrive` [0.2.2] + +- fix: add required_exts for one drive reader (#16822) + +### `llama-index-vector-stores-chroma` [0.3.0] + +- Support breaking changes to filter syntax in latest chroma (#16806) + +### `llama-index-vector-stores-pinecone` [0.3.0] + +- support sparse embedding models, fix delete for serverless for pinecone (#16819) + ## [2024-10-31] ### `llama-index-core` [0.11.21] diff --git a/docs/docs/CHANGELOG.md b/docs/docs/CHANGELOG.md index 966d9a9540eff..076f398a3db1c 100644 --- a/docs/docs/CHANGELOG.md +++ b/docs/docs/CHANGELOG.md @@ -1,5 +1,75 @@ # ChangeLog +## [2024-11-05] + +### `llama-index-core` [0.11.22] + +- bring back support for prompt templates in context chat engines (#16821) +- Fixed the JSON Format of Generated Sub-Question (double curly brackets) (#16820) +- markdown splitter improve metadata (#16789) +- fix empty index + generation synthesizer (#16785) + +### `llama-index-embeddings-azure-inference` [0.2.4] + +- Support for api_version and Azure AI model inference service (#16802) + +### `llama-index-embeddings-gemini` [0.2.2] + +- fix await-async-embeddings (#16790) + +### `llama-index-embeddings-siliconflow` [0.1.0] + +- add siliconflow embedding class (#16753) + +### `llama-index-indices-managed-vectara` [0.2.4] + +- Hotfix: Chain Query Configuration (#16818) + +### `llama-index-llms-anthropic` [0.3.9] + +- Add Anthropic Claude Haiku 3.5 to the list of supported Claude models (#16823) + +### `llama-index-llms-azure-inference` [0.2.4] + +- Support for api_version and Azure AI model inference service (#16802) + +### `llama-index-llms-bedrock` [0.2.6] + +- Add Anthropic Claude Haiku 3.5 to the list of supported Claude models for bedrock and bedrock-converse integrations (#16825) + +### `llama-index-llms-bedrock-converse` [0.3.7] + +- Add Anthropic Claude Haiku 3.5 to the list of supported Claude models for bedrock and bedrock-converse integrations (#16825) + +### `llama-index-llms-dashscope` [0.2.5] + +- More tolerant definition of LLMMetadata information (#16830) +- Fix abstract method signature error (#16809) + +### `llama-index-llms-vllm` [0.3.0] + +- remove beam search param for latest vllm (#16817) + +### `llama-index-postprocessor-colpali-rerank` [0.1.0] + +- Add ColPali as reranker (#16829) + +### `llama-index-postprocessor-siliconflow-rerank` [0.1.0] + +- add siliconflow rerank class (#16737) + +### `llama-index-readers-microsoft-onedrive` [0.2.2] + +- fix: add required_exts for one drive reader (#16822) + +### `llama-index-vector-stores-chroma` [0.3.0] + +- Support breaking changes to filter syntax in latest chroma (#16806) + +### `llama-index-vector-stores-pinecone` [0.3.0] + +- support sparse embedding models, fix delete for serverless for pinecone (#16819) + ## [2024-10-31] ### `llama-index-core` [0.11.21] @@ -7,7 +77,7 @@ - Fixed issue with default value set as None for workflow `ctx.get()` (#16756) - fix various issues with react agent streaming (#16755) - add unit test for query pipeline (#16749) -- Fix _merge_ref_doc_kv_pairs duped for-loop (#16739) +- Fix \_merge_ref_doc_kv_pairs duped for-loop (#16739) - bugfix: determine if nodes is none when creating index (#16703) - fixes LLMRerank default_parse_choice_select_answer_fn parsing issue (#16736) - fix return type check on workflows (#16724) diff --git a/docs/docs/api_reference/callbacks/opentelemetry.md b/docs/docs/api_reference/callbacks/opentelemetry.md new file mode 100644 index 0000000000000..199b9dbb732fd --- /dev/null +++ b/docs/docs/api_reference/callbacks/opentelemetry.md @@ -0,0 +1,6 @@ +::: llama_index.callbacks.opentelemetry + options: + members: + - OpenTelemetryEventHandler + - OpenTelemetrySpanHandler + - instrument_opentelemetry diff --git a/docs/docs/api_reference/llms/sambanovacloud.md b/docs/docs/api_reference/llms/sambanovacloud.md new file mode 100644 index 0000000000000..c117d24b4c541 --- /dev/null +++ b/docs/docs/api_reference/llms/sambanovacloud.md @@ -0,0 +1,4 @@ +::: llama_index.llms.sambanovacloud + options: + members: + - SambaNovaCloud diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 21d3aea8c27c0..9fe396f4a1acf 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -852,6 +852,7 @@ nav: - ./api_reference/callbacks/literalai.md - ./api_reference/callbacks/llama_debug.md - ./api_reference/callbacks/openinference.md + - ./api_reference/callbacks/opentelemetry.md - ./api_reference/callbacks/opik.md - ./api_reference/callbacks/promptlayer.md - ./api_reference/callbacks/token_counter.md @@ -1038,6 +1039,7 @@ nav: - ./api_reference/llms/rungpt.md - ./api_reference/llms/sagemaker_endpoint.md - ./api_reference/llms/sambanova.md + - ./api_reference/llms/sambanovacloud.md - ./api_reference/llms/solar.md - ./api_reference/llms/text_generation_inference.md - ./api_reference/llms/together.md @@ -2302,6 +2304,8 @@ plugins: - ../llama-index-integrations/embeddings/llama-index-embeddings-siliconflow - ../llama-index-integrations/memory/llama-index-memory-mem0 - ../llama-index-integrations/postprocessor/llama-index-postprocessor-siliconflow-rerank + - ../llama-index-integrations/callbacks/llama-index-callbacks-opentelemetry + - ../llama-index-integrations/llms/llama-index-llms-sambanovacloud - redirects: redirect_maps: ./api/llama_index.vector_stores.MongoDBAtlasVectorSearch.html: api_reference/storage/vector_store/mongodb.md diff --git a/llama-index-core/llama_index/core/__init__.py b/llama-index-core/llama_index/core/__init__.py index 0cccfb08d3b3b..3fa9696b93f68 100644 --- a/llama-index-core/llama_index/core/__init__.py +++ b/llama-index-core/llama_index/core/__init__.py @@ -1,6 +1,6 @@ """Init file of LlamaIndex.""" -__version__ = "0.11.21" +__version__ = "0.11.22" import logging from logging import NullHandler diff --git a/llama-index-core/pyproject.toml b/llama-index-core/pyproject.toml index ae6877ff51af9..b329818e88b10 100644 --- a/llama-index-core/pyproject.toml +++ b/llama-index-core/pyproject.toml @@ -46,7 +46,7 @@ name = "llama-index-core" packages = [{include = "llama_index"}] readme = "README.md" repository = "https://github.com/run-llama/llama_index" -version = "0.11.21" +version = "0.11.22" [tool.poetry.dependencies] SQLAlchemy = {extras = ["asyncio"], version = ">=1.4.49"} diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-siliconflow/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-siliconflow/pyproject.toml index fff927e601285..0d3cb58d4da04 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-siliconflow/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-siliconflow/pyproject.toml @@ -27,11 +27,12 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-siliconflow" readme = "README.md" -version = "0.1.0" +version = "0.1.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" llama-index-core = "^0.11.0" +aiohttp = "*" [tool.poetry.group.dev.dependencies] ipython = "8.10.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-textembed/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-textembed/pyproject.toml index 4821723a20921..d3f2f1618dc75 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-textembed/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-textembed/pyproject.toml @@ -27,11 +27,12 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-textembed" readme = "README.md" -version = "0.1.0" +version = "0.1.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" llama-index-core = "^0.11.0" +aiohttp = "*" [tool.poetry.group.dev.dependencies] ipython = "8.10.0" diff --git a/llama-index-integrations/embeddings/llama-index-embeddings-xinference/pyproject.toml b/llama-index-integrations/embeddings/llama-index-embeddings-xinference/pyproject.toml index a87209bee4861..93d06739b9fad 100644 --- a/llama-index-integrations/embeddings/llama-index-embeddings-xinference/pyproject.toml +++ b/llama-index-integrations/embeddings/llama-index-embeddings-xinference/pyproject.toml @@ -27,11 +27,12 @@ exclude = ["**/BUILD"] license = "MIT" name = "llama-index-embeddings-xinference" readme = "README.md" -version = "0.1.0" +version = "0.1.1" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" llama-index-core = "^0.11.0" +aiohttp = "*" [tool.poetry.group.dev.dependencies] ipython = "8.10.0" diff --git a/llama-index-integrations/llms/llama-index-llms-sambanovacloud/pyproject.toml b/llama-index-integrations/llms/llama-index-llms-sambanovacloud/pyproject.toml index 0b6052ce6e3e4..518b1623a2f4f 100644 --- a/llama-index-integrations/llms/llama-index-llms-sambanovacloud/pyproject.toml +++ b/llama-index-integrations/llms/llama-index-llms-sambanovacloud/pyproject.toml @@ -25,12 +25,13 @@ authors = ["Your Name "] description = "llama-index llms sambanova cloud integration" name = "llama-index-llms-sambanovacloud" readme = "README.md" -version = "0.3.1" +version = "0.3.2" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" llama-index-core = "^0.11.0" python-dotenv = "^1.0.1" +aiohttp = "*" [tool.poetry.group.dev.dependencies] ipython = "8.10.0" diff --git a/poetry.lock b/poetry.lock index f5923d61de7df..d85df0670361a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1732,13 +1732,13 @@ llama-index-llms-openai = ">=0.2.0,<0.3.0" [[package]] name = "llama-index-core" -version = "0.11.21" +version = "0.11.22" description = "Interface between LLMs and your data" optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "llama_index_core-0.11.21-py3-none-any.whl", hash = "sha256:08d0a605d022127f2eee45d2000b19d1b95fc6f1f3387c8424d924dfa795882d"}, - {file = "llama_index_core-0.11.21.tar.gz", hash = "sha256:720b6e57e5350a72a22657caa69a8a871fa3da3b37edc2adf4a0bde8e5790ad3"}, + {file = "llama_index_core-0.11.22-py3-none-any.whl", hash = "sha256:5c59d95dec9bb0727f25b03de89392c69076b2e4aaa6acbd8773de1f07502e9e"}, + {file = "llama_index_core-0.11.22.tar.gz", hash = "sha256:ddc30b9c873495de40ad8278d0c894ba09f32f6aa7fc638012b1b22b74c32553"}, ] [package.dependencies] @@ -1933,13 +1933,13 @@ llama-parse = ">=0.5.0" [[package]] name = "llama-parse" -version = "0.5.12" +version = "0.5.13" description = "Parse files into RAG-Optimized formats." optional = false python-versions = "<4.0,>=3.8.1" files = [ - {file = "llama_parse-0.5.12-py3-none-any.whl", hash = "sha256:6011feb49da5db4bcbeea1cc6688b6ff24b483877fda80b03fe59239cd08b907"}, - {file = "llama_parse-0.5.12.tar.gz", hash = "sha256:e241606cf3574425df76c0f5d01a31a95c792c6fbef80aaf72f8ed6448bd1715"}, + {file = "llama_parse-0.5.13-py3-none-any.whl", hash = "sha256:d7f14549a5a6a5944f06372a7244c7683209a4a946a41844467a1d6eb5bbf066"}, + {file = "llama_parse-0.5.13.tar.gz", hash = "sha256:80fe0e6f184ca4fae642d15ccfa927771c04f518bd160b084ed789201469e768"}, ] [package.dependencies] @@ -2252,13 +2252,13 @@ pygments = ">2.12.0" [[package]] name = "mkdocs-material" -version = "9.5.43" +version = "9.5.44" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.43-py3-none-any.whl", hash = "sha256:4aae0664c456fd12837a3192e0225c17960ba8bf55d7f0a7daef7e4b0b914a34"}, - {file = "mkdocs_material-9.5.43.tar.gz", hash = "sha256:83be7ff30b65a1e4930dfa4ab911e75780a3afc9583d162692e434581cb46979"}, + {file = "mkdocs_material-9.5.44-py3-none-any.whl", hash = "sha256:47015f9c167d58a5ff5e682da37441fc4d66a1c79334bfc08d774763cacf69ca"}, + {file = "mkdocs_material-9.5.44.tar.gz", hash = "sha256:f3a6c968e524166b3f3ed1fb97d3ed3e0091183b0545cedf7156a2a6804c56c0"}, ] [package.dependencies] @@ -2703,13 +2703,13 @@ files = [ [[package]] name = "openai" -version = "1.53.0" +version = "1.54.1" description = "The official Python library for the openai API" optional = false -python-versions = ">=3.7.1" +python-versions = ">=3.8" files = [ - {file = "openai-1.53.0-py3-none-any.whl", hash = "sha256:20f408c32fc5cb66e60c6882c994cdca580a5648e10045cd840734194f033418"}, - {file = "openai-1.53.0.tar.gz", hash = "sha256:be2c4e77721b166cce8130e544178b7d579f751b4b074ffbaade3854b6f85ec5"}, + {file = "openai-1.54.1-py3-none-any.whl", hash = "sha256:3cb49ccb6bfdc724ad01cc397d323ef8314fc7d45e19e9de2afdd6484a533324"}, + {file = "openai-1.54.1.tar.gz", hash = "sha256:5b832bf82002ba8c4f6e5e25c1c0f5d468c22f043711544c716eaffdb30dd6f1"}, ] [package.dependencies] @@ -4841,4 +4841,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "5b8fe2b40c288f486eb28300b535e00433437571fa29e600eb1640bbf0b86f48" +content-hash = "b6f14e55241cdf2331dd5b90155c000df6eb358be19ca8304fad70e616b1628e" diff --git a/pyproject.toml b/pyproject.toml index 13108181df806..7fcd4012a840a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,7 +45,7 @@ name = "llama-index" packages = [{from = "_llama-index", include = "llama_index"}] readme = "README.md" repository = "https://github.com/run-llama/llama_index" -version = "0.11.21" +version = "0.11.22" [tool.poetry.dependencies] python = ">=3.8.1,<4.0" @@ -58,7 +58,7 @@ llama-index-agent-openai = "^0.3.4" llama-index-readers-file = "^0.2.0" llama-index-readers-llama-parse = ">=0.3.0" llama-index-indices-managed-llama-cloud = ">=0.3.0" -llama-index-core = "^0.11.20" +llama-index-core = "^0.11.22" llama-index-multi-modal-llms-openai = "^0.2.0" llama-index-cli = "^0.3.1" nltk = ">3.8.1" # avoids a CVE, temp until next release, should be in llama-index-core