Skip to content

Commit

Permalink
Merge branch 'cosmosb-inserting-bugfix' of https://github.com/Jeffrey…
Browse files Browse the repository at this point in the history
…English/llama_index into cosmosb-inserting-bugfix
  • Loading branch information
JeffreyEnglish committed Dec 17, 2024
2 parents 19e0b47 + 53ca05b commit 399801d
Show file tree
Hide file tree
Showing 5 changed files with 71 additions and 40 deletions.
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""Voyage embeddings file."""
import logging
import os
from typing import Any, List, Optional, Union

from llama_index.core.base.embeddings.base import Embedding
Expand All @@ -8,7 +9,6 @@

import voyageai
from llama_index.core.embeddings import MultiModalEmbedding
import base64
from io import BytesIO
from pathlib import Path
from llama_index.core.schema import ImageType
Expand Down Expand Up @@ -83,36 +83,29 @@ def _validate_image_format(file_type: str) -> bool:
"""Validate image format."""
return file_type.lower() in SUPPORTED_IMAGE_FORMATS

def _text_to_content(self, input_str: str) -> dict:
return {"type": "text", "text": input_str}

def _texts_to_content(self, input_strs: List[str]) -> List[dict]:
@classmethod
def _texts_to_content(cls, input_strs: List[str]) -> List[dict]:
return [{"content": [{"type": "text", "text": x}]} for x in input_strs]

def _image_to_content(self, image_input: Union[str, Path, BytesIO]) -> dict:
def _image_to_content(self, image_input: Union[str, Path, BytesIO]) -> Image:
"""Convert an image to a base64 Data URL."""
if isinstance(image_input, (str, Path)):
image = Image.open(str(image_input))
# If it's a string or Path, assume it's a file path
content = {"type": "image_url", "image_url": image_input}
image_path = str(image_input)
file_extension = os.path.splitext(image_path)[1][1:].lower()
elif isinstance(image_input, BytesIO):
# If it's a BytesIO, use it directly
image = Image.open(image_input)
file_extension = image.format.lower()
image_input.seek(0) # Reset the BytesIO stream to the beginning
image_data = image_input.read()

if self._validate_image_format(file_extension):
enc_img = base64.b64encode(image_data).decode("utf-8")
content = {
"type": "image_base64",
"image_base64": f"data:image/{file_extension};base64,{enc_img}",
}
else:
raise ValueError(f"Unsupported image format: {file_extension}")
else:
raise ValueError("Unsupported input type. Must be a file path or BytesIO.")

return {"content": [content]}
if self._validate_image_format(file_extension):
return image
else:
raise ValueError(f"Unsupported image format: {file_extension}")

def _embed_image(
self, image_path: ImageType, input_type: Optional[str] = None
Expand All @@ -125,7 +118,7 @@ def _embed_image(
processed_image = self._image_to_content(image_path)
return self._client.multimodal_embed(
model=self.model_name,
inputs=[processed_image],
inputs=[[processed_image]],
input_type=input_type,
truncation=self.truncation,
).embeddings[0]
Expand All @@ -142,7 +135,7 @@ async def _aembed_image(
return (
await self._aclient.multimodal_embed(
model=self.model_name,
inputs=[processed_image],
inputs=[[processed_image]],
input_type=input_type,
truncation=self.truncation,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ exclude = ["**/BUILD"]
license = "MIT"
name = "llama-index-embeddings-voyageai"
readme = "README.md"
version = "0.3.2"
version = "0.3.3"

[tool.poetry.dependencies]
python = ">=3.9,<4.0"
Expand Down
38 changes: 38 additions & 0 deletions llama-index-integrations/tools/llama-index-tools-google/README.md
Original file line number Diff line number Diff line change
@@ -1 +1,39 @@
# LlamaIndex Tools Integration: Google

### Provides a set of tools to interact with Google services.

- you need to enable each of the below services in your google cloud console, under a same API key for a service, in
order to use them.

### Quick Start:

```python
# pip install llama-index-tools-google
from llama_index.tools.google import GmailToolSpec
from llama_index.tools.google import GoogleCalendarToolSpec
from llama_index.tools.google import GoogleSearchToolSpec
```

#### [custom search service](https://developers.google.com/custom-search/v1/overview)

```python
google_spec = GoogleSearchToolSpec(key="your-key", engine="your-engine")
```

- `key` collected from your service console
- `engine` which represents the search engine to use, you can create a custom search
engine [here](https://cse.google.com/cse/all)

#### [calendar read, create]()

- requires OAuth 2.0 credentials, you can create them [here](https://console.developers.google.com/apis/credentials)
- store oAuth`credentials.json` in the same directory as the runnable agent.
- you will need to manually approve the Oath every time this tool is invoked

#### [gmail read, create]()

- same as calendar

### known defects

- the calendar tool create is not able to generate an event if the agent is not able to infer the timezome
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,16 @@ Here's an example usage of the TavilyToolSpec.

```python
from llama_index.tools.tavily_research import TavilyToolSpec
from llama_index.agent.openai import OpenAIAgent
from llama_index.core.agent import FunctionCallingAgent
from llama_index.llms.openai import OpenAI

tavily_tool = TavilyToolSpec(
api_key="your-key",
)
agent = OpenAIAgent.from_tools(tavily_tool.to_tool_list())
agent = FunctionCallingAgent.from_tools(
tavily_tool.to_tool_list(),
llm=OpenAI(model="gpt-4o"),
)

agent.chat("What happened in the latest Burning Man festival?")
```
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,19 +23,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install llama-index llama-hub tavily-python"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ce6c6e5d-f61e-461e-8e8d-99d27fb3fcaf",
"metadata": {},
"outputs": [],
"source": [
"# Set up OpenAI\n",
"import openai\n",
"from llama_index.agent import OpenAIAgent"
"%pip install llama-index-tools-tavily-research llama-index"
]
},
{
Expand All @@ -45,7 +33,11 @@
"metadata": {},
"outputs": [],
"source": [
"openai.api_key = \"sk-api-key\""
"# set your openai key, if using openai\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"sk-...\"\n",
"os.environ[\"TAVILY_API_KEY\"] = \"...\""
]
},
{
Expand Down Expand Up @@ -125,8 +117,12 @@
"metadata": {},
"outputs": [],
"source": [
"agent = OpenAIAgent.from_tools(\n",
"from llama_index.core.agent import FunctionCallingAgent\n",
"from llama_index.llms.openai import OpenAI\n",
"\n",
"agent = FunctionCallingAgent.from_tools(\n",
" tavily_tool_list,\n",
" llm=OpenAI(model=\"gpt-4o\"),\n",
")"
]
},
Expand Down Expand Up @@ -176,9 +172,9 @@
],
"metadata": {
"kernelspec": {
"display_name": "llama_hub",
"display_name": "llama-index-caVs7DDe-py3.10",
"language": "python",
"name": "llama_hub"
"name": "python3"
},
"language_info": {
"codemirror_mode": {
Expand Down

0 comments on commit 399801d

Please sign in to comment.