diff --git a/.github/workflows/contrib-openai.yml b/.github/workflows/contrib-openai.yml index 92216cd8fc4e..c60a45b3ad1b 100644 --- a/.github/workflows/contrib-openai.yml +++ b/.github/workflows/contrib-openai.yml @@ -41,11 +41,15 @@ jobs: pip install -e . python -c "import autogen" pip install coverage pytest-asyncio + - name: Install PostgreSQL + run: | + sudo apt install postgresql -y + - name: Start PostgreSQL service + run: sudo service postgresql start - name: Install packages for test when needed run: | pip install docker - pip install qdrant_client[fastembed] - pip install -e .[retrievechat] + pip install -e .[retrievechat-qdrant,retrievechat-pgvector] - name: Coverage env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} @@ -53,7 +57,7 @@ jobs: AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }} OAI_CONFIG_LIST: ${{ secrets.OAI_CONFIG_LIST }} run: | - coverage run -a -m pytest test/agentchat/contrib/test_retrievechat.py::test_retrievechat test/agentchat/contrib/test_qdrant_retrievechat.py::test_retrievechat + coverage run -a -m pytest test/agentchat/contrib/test_retrievechat.py::test_retrievechat test/agentchat/contrib/test_qdrant_retrievechat.py::test_retrievechat test/agentchat/contrib/test_pgvector_retrievechat.py::test_retrievechat coverage xml - name: Upload coverage to Codecov uses: codecov/codecov-action@v3 diff --git a/.github/workflows/contrib-tests.yml b/.github/workflows/contrib-tests.yml index 872c2c5981a3..4e042b458e08 100644 --- a/.github/workflows/contrib-tests.yml +++ b/.github/workflows/contrib-tests.yml @@ -42,16 +42,20 @@ jobs: - name: Install qdrant_client when python-version is 3.10 if: matrix.python-version == '3.10' run: | - pip install qdrant_client[fastembed] + pip install .[retrievechat-qdrant] - name: Install unstructured when python-version is 3.9 and on linux - if: matrix.python-version == '3.9' && matrix.os == 'ubuntu-latest' run: | sudo apt-get update sudo apt-get install -y tesseract-ocr poppler-utils pip install unstructured[all-docs]==0.13.0 - - name: Install packages and dependencies for RetrieveChat + - name: Install and Start PostgreSQL + runs-on: ubuntu-latest run: | - pip install -e .[retrievechat] + sudo apt install postgresql -y + sudo service postgresql start + - name: Install packages and dependencies for PGVector + run: | + pip install -e .[retrievechat-pgvector] - name: Set AUTOGEN_USE_DOCKER based on OS shell: bash run: | diff --git a/autogen/agentchat/contrib/vectordb/base.py b/autogen/agentchat/contrib/vectordb/base.py index 187d0d6acbbe..29a080086193 100644 --- a/autogen/agentchat/contrib/vectordb/base.py +++ b/autogen/agentchat/contrib/vectordb/base.py @@ -185,7 +185,7 @@ class VectorDBFactory: Factory class for creating vector databases. """ - PREDEFINED_VECTOR_DB = ["chroma"] + PREDEFINED_VECTOR_DB = ["chroma", "pgvector"] @staticmethod def create_vector_db(db_type: str, **kwargs) -> VectorDB: @@ -203,6 +203,10 @@ def create_vector_db(db_type: str, **kwargs) -> VectorDB: from .chromadb import ChromaVectorDB return ChromaVectorDB(**kwargs) + if db_type.lower() in ["pgvector", "pgvectordb"]: + from .pgvectordb import PGVectorDB + + return PGVectorDB(**kwargs) else: raise ValueError( f"Unsupported vector database type: {db_type}. Valid types are {VectorDBFactory.PREDEFINED_VECTOR_DB}." diff --git a/autogen/agentchat/contrib/vectordb/pgvectordb.py b/autogen/agentchat/contrib/vectordb/pgvectordb.py new file mode 100644 index 000000000000..ae9d5cbbbecc --- /dev/null +++ b/autogen/agentchat/contrib/vectordb/pgvectordb.py @@ -0,0 +1,736 @@ +import os +import re +from typing import Callable, List + +import numpy as np +from sentence_transformers import SentenceTransformer + +from .base import Document, ItemID, QueryResults, VectorDB +from .utils import get_logger + +try: + import pgvector + from pgvector.psycopg import register_vector +except ImportError: + raise ImportError("Please install pgvector: `pip install pgvector`") + +try: + import psycopg +except ImportError: + raise ImportError("Please install pgvector: `pip install psycopg`") + +PGVECTOR_MAX_BATCH_SIZE = os.environ.get("PGVECTOR_MAX_BATCH_SIZE", 40000) +logger = get_logger(__name__) + + +class Collection: + """ + A Collection object for PGVector. + + Attributes: + client: The PGVector client. + collection_name (str): The name of the collection. Default is "documents". + embedding_function (Callable): The embedding function used to generate the vector representation. + metadata (Optional[dict]): The metadata of the collection. + get_or_create (Optional): The flag indicating whether to get or create the collection. + + """ + + def __init__( + self, + client=None, + collection_name: str = "autogen-docs", + embedding_function: Callable = None, + metadata=None, + get_or_create=None, + ): + """ + Initialize the Collection object. + + Args: + client: The PostgreSQL client. + collection_name: The name of the collection. Default is "documents". + embedding_function: The embedding function used to generate the vector representation. + metadata: The metadata of the collection. + get_or_create: The flag indicating whether to get or create the collection. + + Returns: + None + """ + self.client = client + self.embedding_function = embedding_function + self.name = self.set_collection_name(collection_name) + self.require_embeddings_or_documents = False + self.ids = [] + self.embedding_function = ( + SentenceTransformer("all-MiniLM-L6-v2") if embedding_function is None else embedding_function + ) + self.metadata = metadata if metadata else {"hnsw:space": "ip", "hnsw:construction_ef": 32, "hnsw:M": 16} + self.documents = "" + self.get_or_create = get_or_create + + def set_collection_name(self, collection_name): + name = re.sub("-", "_", collection_name) + self.name = name + return self.name + + def add(self, ids: List[ItemID], embeddings: List, metadatas: List, documents: List): + """ + Add documents to the collection. + + Args: + ids (List[ItemID]): A list of document IDs. + embeddings (List): A list of document embeddings. + metadatas (List): A list of document metadatas. + documents (List): A list of documents. + + Returns: + None + """ + cursor = self.client.cursor() + + sql_values = [] + for doc_id, embedding, metadata, document in zip(ids, embeddings, metadatas, documents): + sql_values.append((doc_id, embedding, metadata, document)) + sql_string = f"INSERT INTO {self.name} (id, embedding, metadata, document) " f"VALUES (%s, %s, %s, %s);" + cursor.executemany(sql_string, sql_values) + cursor.close() + + def upsert(self, ids: List[ItemID], documents: List, embeddings: List = None, metadatas: List = None) -> None: + """ + Upsert documents into the collection. + + Args: + ids (List[ItemID]): A list of document IDs. + documents (List): A list of documents. + embeddings (List): A list of document embeddings. + metadatas (List): A list of document metadatas. + + Returns: + None + """ + cursor = self.client.cursor() + sql_values = [] + if embeddings is not None and metadatas is not None: + for doc_id, embedding, metadata, document in zip(ids, embeddings, metadatas, documents): + metadata = re.sub("'", '"', str(metadata)) + sql_values.append((doc_id, embedding, metadata, document, embedding, metadata, document)) + sql_string = ( + f"INSERT INTO {self.name} (id, embedding, metadatas, documents)\n" + f"VALUES (%s, %s, %s, %s)\n" + f"ON CONFLICT (id)\n" + f"DO UPDATE SET embedding = %s,\n" + f"metadatas = %s, documents = %s;\n" + ) + elif embeddings is not None: + for doc_id, embedding, document in zip(ids, embeddings, documents): + sql_values.append((doc_id, embedding, document, embedding, document)) + sql_string = ( + f"INSERT INTO {self.name} (id, embedding, documents) " + f"VALUES (%s, %s, %s) ON CONFLICT (id)\n" + f"DO UPDATE SET embedding = %s, documents = %s;\n" + ) + elif metadatas is not None: + for doc_id, metadata, document in zip(ids, metadatas, documents): + metadata = re.sub("'", '"', str(metadata)) + embedding = self.embedding_function.encode(document) + sql_values.append((doc_id, metadata, embedding, document, metadata, document, embedding)) + sql_string = ( + f"INSERT INTO {self.name} (id, metadatas, embedding, documents)\n" + f"VALUES (%s, %s, %s, %s)\n" + f"ON CONFLICT (id)\n" + f"DO UPDATE SET metadatas = %s, documents = %s, embedding = %s;\n" + ) + else: + for doc_id, document in zip(ids, documents): + embedding = self.embedding_function.encode(document) + sql_values.append((doc_id, document, embedding, document)) + sql_string = ( + f"INSERT INTO {self.name} (id, documents, embedding)\n" + f"VALUES (%s, %s, %s)\n" + f"ON CONFLICT (id)\n" + f"DO UPDATE SET documents = %s;\n" + ) + logger.debug(f"Upsert SQL String:\n{sql_string}\n{sql_values}") + cursor.executemany(sql_string, sql_values) + cursor.close() + + def count(self): + """ + Get the total number of documents in the collection. + + Returns: + int: The total number of documents. + """ + cursor = self.client.cursor() + query = f"SELECT COUNT(*) FROM {self.name}" + cursor.execute(query) + total = cursor.fetchone()[0] + cursor.close() + try: + total = int(total) + except (TypeError, ValueError): + total = None + return total + + def get(self, ids=None, include=None, where=None, limit=None, offset=None): + """ + Retrieve documents from the collection. + + Args: + ids (Optional[List]): A list of document IDs. + include (Optional): The fields to include. + where (Optional): Additional filtering criteria. + limit (Optional): The maximum number of documents to retrieve. + offset (Optional): The offset for pagination. + + Returns: + List: The retrieved documents. + """ + cursor = self.client.cursor() + if include: + query = f'SELECT (id, {", ".join(map(str, include))}, embedding) FROM {self.name}' + else: + query = f"SELECT * FROM {self.name}" + if ids: + query = f"{query} WHERE id IN {ids}" + elif where: + query = f"{query} WHERE {where}" + if offset: + query = f"{query} OFFSET {offset}" + if limit: + query = f"{query} LIMIT {limit}" + retreived_documents = [] + try: + cursor.execute(query) + retrieval = cursor.fetchall() + for retrieved_document in retrieval: + retreived_documents.append( + Document( + id=retrieved_document[0][0], + metadata=retrieved_document[0][1], + content=retrieved_document[0][2], + embedding=retrieved_document[0][3], + ) + ) + except (psycopg.errors.UndefinedTable, psycopg.errors.UndefinedColumn): + logger.info(f"Error executing select on non-existent table: {self.name}. Creating it instead.") + self.create_collection(collection_name=self.name) + logger.info(f"Created table {self.name}") + cursor.close() + return retreived_documents + + def update(self, ids: List, embeddings: List, metadatas: List, documents: List): + """ + Update documents in the collection. + + Args: + ids (List): A list of document IDs. + embeddings (List): A list of document embeddings. + metadatas (List): A list of document metadatas. + documents (List): A list of documents. + + Returns: + None + """ + cursor = self.client.cursor() + sql_values = [] + for doc_id, embedding, metadata, document in zip(ids, embeddings, metadatas, documents): + sql_values.append((doc_id, embedding, metadata, document, doc_id, embedding, metadata, document)) + sql_string = ( + f"INSERT INTO {self.name} (id, embedding, metadata, document) " + f"VALUES (%s, %s, %s, %s) " + f"ON CONFLICT (id) " + f"DO UPDATE SET id = %s, embedding = %s, " + f"metadata = %s, document = %s;\n" + ) + logger.debug(f"Upsert SQL String:\n{sql_string}\n") + cursor.executemany(sql_string, sql_values) + cursor.close() + + @staticmethod + def euclidean_distance(arr1: List[float], arr2: List[float]) -> float: + """ + Calculate the Euclidean distance between two vectors. + + Parameters: + - arr1 (List[float]): The first vector. + - arr2 (List[float]): The second vector. + + Returns: + - float: The Euclidean distance between arr1 and arr2. + """ + dist = np.linalg.norm(arr1 - arr2) + return dist + + @staticmethod + def cosine_distance(arr1: List[float], arr2: List[float]) -> float: + """ + Calculate the cosine distance between two vectors. + + Parameters: + - arr1 (List[float]): The first vector. + - arr2 (List[float]): The second vector. + + Returns: + - float: The cosine distance between arr1 and arr2. + """ + dist = np.dot(arr1, arr2) / (np.linalg.norm(arr1) * np.linalg.norm(arr2)) + return dist + + @staticmethod + def inner_product_distance(arr1: List[float], arr2: List[float]) -> float: + """ + Calculate the Euclidean distance between two vectors. + + Parameters: + - arr1 (List[float]): The first vector. + - arr2 (List[float]): The second vector. + + Returns: + - float: The Euclidean distance between arr1 and arr2. + """ + dist = np.linalg.norm(arr1 - arr2) + return dist + + def query( + self, + query_texts: List[str], + collection_name: str = None, + n_results: int = 10, + distance_type: str = "euclidean", + distance_threshold: float = -1, + ) -> QueryResults: + """ + Query documents in the collection. + + Args: + query_texts (List[str]): A list of query texts. + collection_name (Optional[str]): The name of the collection. + n_results (int): The maximum number of results to return. + distance_type (Optional[str]): Distance search type - euclidean or cosine + distance_threshold (Optional[float]): Distance threshold to limit searches + Returns: + QueryResults: The query results. + """ + if collection_name: + self.name = collection_name + + if distance_threshold == -1: + distance_threshold = "" + elif distance_threshold > 0: + distance_threshold = f"< {distance_threshold}" + + cursor = self.client.cursor() + results = [] + for query in query_texts: + vector = self.embedding_function.encode(query, convert_to_tensor=False).tolist() + if distance_type.lower() == "cosine": + index_function = "<=>" + elif distance_type.lower() == "euclidean": + index_function = "<->" + elif distance_type.lower() == "inner-product": + index_function = "<#>" + else: + index_function = "<->" + + query = ( + f"SELECT id, documents, embedding, metadatas FROM {self.name}\n" + f"ORDER BY embedding {index_function} '{str(vector)}'::vector {distance_threshold}\n" + f"LIMIT {n_results}" + ) + cursor.execute(query) + for row in cursor.fetchall(): + fetched_document = Document(id=row[0], content=row[1], embedding=row[2], metadata=row[3]) + fetched_document_array = self.convert_string_to_array(array_string=fetched_document.get("embedding")) + if distance_type.lower() == "cosine": + distance = self.cosine_distance(fetched_document_array, vector) + elif distance_type.lower() == "euclidean": + distance = self.euclidean_distance(fetched_document_array, vector) + elif distance_type.lower() == "inner-product": + distance = self.inner_product_distance(fetched_document_array, vector) + else: + distance = self.euclidean_distance(fetched_document_array, vector) + results.append((fetched_document, distance)) + cursor.close() + results = [results] + logger.debug(f"Query Results: {results}") + return results + + @staticmethod + def convert_string_to_array(array_string) -> List[float]: + """ + Convert a string representation of an array to a list of floats. + + Parameters: + - array_string (str): The string representation of the array. + + Returns: + - list: A list of floats parsed from the input string. If the input is + not a string, it returns the input itself. + """ + if not isinstance(array_string, str): + return array_string + array_string = array_string.strip("[]") + array = [float(num) for num in array_string.split()] + return array + + def modify(self, metadata, collection_name: str = None): + """ + Modify metadata for the collection. + + Args: + collection_name: The name of the collection. + metadata: The new metadata. + + Returns: + None + """ + if collection_name: + self.name = collection_name + cursor = self.client.cursor() + cursor.execute( + "UPDATE collections" "SET metadata = '%s'" "WHERE collection_name = '%s';", (metadata, self.name) + ) + cursor.close() + + def delete(self, ids: List[ItemID], collection_name: str = None): + """ + Delete documents from the collection. + + Args: + ids (List[ItemID]): A list of document IDs to delete. + collection_name (str): The name of the collection to delete. + + Returns: + None + """ + if collection_name: + self.name = collection_name + cursor = self.client.cursor() + cursor.execute(f"DELETE FROM {self.name} WHERE id IN ({ids});") + cursor.close() + + def delete_collection(self, collection_name: str = None): + """ + Delete the entire collection. + + Args: + collection_name (Optional[str]): The name of the collection to delete. + + Returns: + None + """ + if collection_name: + self.name = collection_name + cursor = self.client.cursor() + cursor.execute(f"DROP TABLE IF EXISTS {self.name}") + cursor.close() + + def create_collection(self, collection_name: str = None): + """ + Create a new collection. + + Args: + collection_name (Optional[str]): The name of the new collection. + + Returns: + None + """ + if collection_name: + self.name = collection_name + cursor = self.client.cursor() + cursor.execute( + f"CREATE TABLE {self.name} (" + f"documents text, id CHAR(8) PRIMARY KEY, metadatas JSONB, embedding vector(384));" + f"CREATE INDEX " + f'ON {self.name} USING hnsw (embedding vector_l2_ops) WITH (m = {self.metadata["hnsw:M"]}, ' + f'ef_construction = {self.metadata["hnsw:construction_ef"]});' + f"CREATE INDEX " + f'ON {self.name} USING hnsw (embedding vector_cosine_ops) WITH (m = {self.metadata["hnsw:M"]}, ' + f'ef_construction = {self.metadata["hnsw:construction_ef"]});' + f"CREATE INDEX " + f'ON {self.name} USING hnsw (embedding vector_ip_ops) WITH (m = {self.metadata["hnsw:M"]}, ' + f'ef_construction = {self.metadata["hnsw:construction_ef"]});' + ) + cursor.close() + + +class PGVectorDB(VectorDB): + """ + A vector database that uses PGVector as the backend. + """ + + def __init__( + self, + *, + connection_string: str = None, + host: str = None, + port: int = None, + dbname: str = None, + connect_timeout: int = 10, + embedding_function: Callable = None, + metadata: dict = None, + ) -> None: + """ + Initialize the vector database. + + Note: connection_string or host + port + dbname must be specified + + Args: + connection_string: "postgresql://username:password@hostname:port/database" | The PGVector connection string. Default is None. + host: str | The host to connect to. Default is None. + port: int | The port to connect to. Default is None. + dbname: str | The database name to connect to. Default is None. + connect_timeout: int | The timeout to set for the connection. Default is 10. + embedding_function: Callable | The embedding function used to generate the vector representation + of the documents. Default is None. + metadata: dict | The metadata of the vector database. Default is None. If None, it will use this + setting: {"hnsw:space": "ip", "hnsw:construction_ef": 30, "hnsw:M": 16}. Creates Index on table + using hnsw (embedding vector_l2_ops) WITH (m = hnsw:M) ef_construction = "hnsw:construction_ef". + For more info: https://github.com/pgvector/pgvector?tab=readme-ov-file#hnsw + kwargs: dict | Additional keyword arguments. + + Returns: + None + """ + if connection_string: + self.client = psycopg.connect(conninfo=connection_string, autocommit=True) + elif host and port and dbname: + self.client = psycopg.connect( + host=host, port=port, dbname=dbname, connect_timeout=connect_timeout, autocommit=True + ) + self.embedding_function = ( + SentenceTransformer("all-MiniLM-L6-v2") if embedding_function is None else embedding_function + ) + self.metadata = metadata + self.client.execute("CREATE EXTENSION IF NOT EXISTS vector") + register_vector(self.client) + self.active_collection = None + + def create_collection( + self, collection_name: str, overwrite: bool = False, get_or_create: bool = True + ) -> Collection: + """ + Create a collection in the vector database. + Case 1. if the collection does not exist, create the collection. + Case 2. the collection exists, if overwrite is True, it will overwrite the collection. + Case 3. the collection exists and overwrite is False, if get_or_create is True, it will get the collection, + otherwise it raise a ValueError. + + Args: + collection_name: str | The name of the collection. + overwrite: bool | Whether to overwrite the collection if it exists. Default is False. + get_or_create: bool | Whether to get the collection if it exists. Default is True. + + Returns: + Collection | The collection object. + """ + try: + if self.active_collection and self.active_collection.name == collection_name: + collection = self.active_collection + else: + collection = self.get_collection(collection_name) + except ValueError: + collection = None + if collection is None: + collection = Collection( + collection_name=collection_name, + embedding_function=self.embedding_function, + get_or_create=get_or_create, + metadata=self.metadata, + ) + collection.set_collection_name(collection_name=collection_name) + collection.create_collection(collection_name=collection_name) + return collection + elif overwrite: + self.delete_collection(collection_name) + collection = Collection( + collection_name=collection_name, + embedding_function=self.embedding_function, + get_or_create=get_or_create, + metadata=self.metadata, + ) + collection.set_collection_name(collection_name=collection_name) + collection.create_collection(collection_name=collection_name) + return collection + elif get_or_create: + return collection + else: + raise ValueError(f"Collection {collection_name} already exists.") + + def get_collection(self, collection_name: str = None) -> Collection: + """ + Get the collection from the vector database. + + Args: + collection_name: str | The name of the collection. Default is None. If None, return the + current active collection. + + Returns: + Collection | The collection object. + """ + if collection_name is None: + if self.active_collection is None: + raise ValueError("No collection is specified.") + else: + logger.debug( + f"No collection is specified. Using current active collection {self.active_collection.name}." + ) + else: + self.active_collection = Collection( + client=self.client, collection_name=collection_name, embedding_function=self.embedding_function + ) + return self.active_collection + + def delete_collection(self, collection_name: str) -> None: + """ + Delete the collection from the vector database. + + Args: + collection_name: str | The name of the collection. + + Returns: + None + """ + self.active_collection.delete_collection(collection_name) + if self.active_collection and self.active_collection.name == collection_name: + self.active_collection = None + + def _batch_insert( + self, collection: Collection, embeddings=None, ids=None, metadatas=None, documents=None, upsert=False + ): + batch_size = int(PGVECTOR_MAX_BATCH_SIZE) + default_metadata = {"hnsw:space": "ip", "hnsw:construction_ef": 32, "hnsw:M": 16} + default_metadatas = [default_metadata] + for i in range(0, len(documents), min(batch_size, len(documents))): + end_idx = i + min(batch_size, len(documents) - i) + collection_kwargs = { + "documents": documents[i:end_idx], + "ids": ids[i:end_idx], + "metadatas": metadatas[i:end_idx] if metadatas else default_metadatas, + "embeddings": embeddings[i:end_idx] if embeddings else None, + } + if upsert: + collection.upsert(**collection_kwargs) + else: + collection.add(**collection_kwargs) + + def insert_docs(self, docs: List[Document], collection_name: str = None, upsert: bool = False) -> None: + """ + Insert documents into the collection of the vector database. + + Args: + docs: List[Document] | A list of documents. Each document is a TypedDict `Document`. + collection_name: str | The name of the collection. Default is None. + upsert: bool | Whether to update the document if it exists. Default is False. + kwargs: Dict | Additional keyword arguments. + + Returns: + None + """ + if not docs: + return + if docs[0].get("content") is None: + raise ValueError("The document content is required.") + if docs[0].get("id") is None: + raise ValueError("The document id is required.") + documents = [doc.get("content") for doc in docs] + ids = [doc.get("id") for doc in docs] + + collection = self.get_collection(collection_name) + if docs[0].get("embedding") is None: + logger.debug( + "No content embedding is provided. " + "Will use the VectorDB's embedding function to generate the content embedding." + ) + embeddings = None + else: + embeddings = [doc.get("embedding") for doc in docs] + if docs[0].get("metadata") is None: + metadatas = None + else: + metadatas = [doc.get("metadata") for doc in docs] + + self._batch_insert(collection, embeddings, ids, metadatas, documents, upsert) + + def update_docs(self, docs: List[Document], collection_name: str = None) -> None: + """ + Update documents in the collection of the vector database. + + Args: + docs: List[Document] | A list of documents. + collection_name: str | The name of the collection. Default is None. + + Returns: + None + """ + self.insert_docs(docs, collection_name, upsert=True) + + def delete_docs(self, ids: List[ItemID], collection_name: str = None) -> None: + """ + Delete documents from the collection of the vector database. + + Args: + ids: List[ItemID] | A list of document ids. Each id is a typed `ItemID`. + collection_name: str | The name of the collection. Default is None. + kwargs: Dict | Additional keyword arguments. + + Returns: + None + """ + collection = self.get_collection(collection_name) + collection.delete(ids=ids, collection_name=collection_name) + + def retrieve_docs( + self, + queries: List[str], + collection_name: str = None, + n_results: int = 10, + distance_threshold: float = -1, + ) -> QueryResults: + """ + Retrieve documents from the collection of the vector database based on the queries. + + Args: + queries: List[str] | A list of queries. Each query is a string. + collection_name: str | The name of the collection. Default is None. + n_results: int | The number of relevant documents to return. Default is 10. + distance_threshold: float | The threshold for the distance score, only distance smaller than it will be + returned. Don't filter with it if < 0. Default is -1. + kwargs: Dict | Additional keyword arguments. + + Returns: + QueryResults | The query results. Each query result is a list of list of tuples containing the document and + the distance. + """ + collection = self.get_collection(collection_name) + if isinstance(queries, str): + queries = [queries] + results = collection.query( + query_texts=queries, + n_results=n_results, + distance_threshold=distance_threshold, + ) + logger.debug(f"Retrieve Docs Results:\n{results}") + return results + + def get_docs_by_ids(self, ids: List[ItemID], collection_name: str = None, include=None, **kwargs) -> List[Document]: + """ + Retrieve documents from the collection of the vector database based on the ids. + + Args: + ids: List[ItemID] | A list of document ids. + collection_name: str | The name of the collection. Default is None. + include: List[str] | The fields to include. Default is None. + If None, will include ["metadatas", "documents"], ids will always be included. + kwargs: dict | Additional keyword arguments. + + Returns: + List[Document] | The results. + """ + collection = self.get_collection(collection_name) + include = include if include else ["metadatas", "documents"] + results = collection.get(ids, include=include, **kwargs) + logger.debug(f"Retrieve Documents by ID Results:\n{results}") + return results diff --git a/notebook/agentchat_pgvector_RetrieveChat.ipynb b/notebook/agentchat_pgvector_RetrieveChat.ipynb new file mode 100644 index 000000000000..c0c681350f2f --- /dev/null +++ b/notebook/agentchat_pgvector_RetrieveChat.ipynb @@ -0,0 +1,2848 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Using RetrieveChat for Retrieve Augmented Code Generation and Question Answering\n", + "\n", + "AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", + "Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "\n", + "RetrieveChat is a conversational system for retrieval-augmented code generation and question answering. In this notebook, we demonstrate how to utilize RetrieveChat to generate code and answer questions based on customized documentations that are not present in the LLM's training dataset. RetrieveChat uses the `RetrieveAssistantAgent` and `RetrieveUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `RetrieveAssistantAgent` and `RetrieveUserProxyAgent` implement a different auto-reply mechanism corresponding to the RetrieveChat prompts.\n", + "\n", + "## Table of Contents\n", + "We'll demonstrate six examples of using RetrieveChat for code generation and question answering:\n", + "\n", + "- [Example 1: Generate code based off docstrings w/o human feedback](#example-1)\n", + "- [Example 2: Answer a question based off docstrings w/o human feedback](#example-2)\n", + "- [Example 3: Generate code based off docstrings w/ human feedback](#example-3)\n", + "- [Example 4: Answer a question based off docstrings w/ human feedback](#example-4)\n", + "- [Example 5: Solve comprehensive QA problems with RetrieveChat's unique feature `Update Context`](#example-5)\n", + "- [Example 6: Solve comprehensive QA problems with customized prompt and few-shot learning](#example-6)\n", + "\n", + "\n", + "````{=mdx}\n", + ":::info Requirements\n", + "Some extra dependencies are needed for this notebook, which can be installed via pip:\n", + "\n", + "```bash\n", + "pip install pyautogen[retrievechat] flaml[automl]\n", + "```\n", + "\n", + "For more information, please refer to the [installation guide](/docs/installation/).\n", + ":::\n", + "````" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Set your API Endpoint\n", + "\n", + "The [`config_list_from_json`](https://microsoft.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "models to use: ['gpt-35-turbo', 'gpt-35-turbo-0613']\n" + ] + } + ], + "source": [ + "import json\n", + "import os\n", + "\n", + "import chromadb\n", + "\n", + "import autogen\n", + "from autogen.agentchat.contrib.retrieve_assistant_agent import RetrieveAssistantAgent\n", + "from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent\n", + "\n", + "# Accepted file formats for that can be stored in\n", + "# a vector database instance\n", + "from autogen.retrieve_utils import TEXT_FORMATS\n", + "\n", + "config_list = autogen.config_list_from_json(env_or_file=\"OAI_CONFIG_LIST\")\n", + "\n", + "assert len(config_list) > 0\n", + "print(\"models to use: \", [config_list[i][\"model\"] for i in range(len(config_list))])" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "````{=mdx}\n", + ":::tip\n", + "Learn more about configuring LLMs for agents [here](/docs/topics/llm_configuration).\n", + ":::\n", + "````\n", + "\n", + "## Construct agents for RetrieveChat\n", + "\n", + "We start by initializing the `RetrieveAssistantAgent` and `RetrieveUserProxyAgent`. The system message needs to be set to \"You are a helpful assistant.\" for RetrieveAssistantAgent. The detailed instructions are given in the user message. Later we will use the `RetrieveUserProxyAgent.message_generator` to combine the instructions and a retrieval augmented generation task for an initial prompt to be sent to the LLM assistant." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accepted file formats for `docs_path`:\n", + "['ppt', 'jsonl', 'csv', 'yaml', 'rst', 'htm', 'pdf', 'tsv', 'doc', 'docx', 'pptx', 'msg', 'yml', 'xml', 'md', 'json', 'txt', 'epub', 'org', 'xlsx', 'log', 'html', 'odt', 'rtf']\n" + ] + } + ], + "source": [ + "print(\"Accepted file formats for `docs_path`:\")\n", + "print(TEXT_FORMATS)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/lijiang1/anaconda3/envs/autogen/lib/python3.10/site-packages/transformers/utils/generic.py:311: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.\n", + " torch.utils._pytree._register_pytree_node(\n" + ] + } + ], + "source": [ + "# 1. create an RetrieveAssistantAgent instance named \"assistant\"\n", + "assistant = RetrieveAssistantAgent(\n", + " name=\"assistant\",\n", + " system_message=\"You are a helpful assistant.\",\n", + " llm_config={\n", + " \"timeout\": 600,\n", + " \"cache_seed\": 42,\n", + " \"config_list\": config_list,\n", + " },\n", + ")\n", + "\n", + "# 2. create the RetrieveUserProxyAgent instance named \"ragproxyagent\"\n", + "# By default, the human_input_mode is \"ALWAYS\", which means the agent will ask for human input at every step. We set it to \"NEVER\" here.\n", + "# `docs_path` is the path to the docs directory. It can also be the path to a single file, or the url to a single file. By default,\n", + "# it is set to None, which works only if the collection is already created.\n", + "# `task` indicates the kind of task we're working on. In this example, it's a `code` task.\n", + "# `chunk_token_size` is the chunk token size for the retrieve chat. By default, it is set to `max_tokens * 0.6`, here we set it to 2000.\n", + "# `custom_text_types` is a list of file types to be processed. Default is `autogen.retrieve_utils.TEXT_FORMATS`.\n", + "# This only applies to files under the directories in `docs_path`. Explicitly included files and urls will be chunked regardless of their types.\n", + "# In this example, we set it to [\"non-existent-type\"] to only process markdown files. Since no \"non-existent-type\" files are included in the `websit/docs`,\n", + "# no files there will be processed. However, the explicitly included urls will still be processed.\n", + "ragproxyagent = RetrieveUserProxyAgent(\n", + " name=\"ragproxyagent\",\n", + " human_input_mode=\"NEVER\",\n", + " max_consecutive_auto_reply=3,\n", + " retrieve_config={\n", + " \"task\": \"code\",\n", + " \"docs_path\": [\n", + " \"https://raw.githubusercontent.com/microsoft/FLAML/main/website/docs/Examples/Integrate%20-%20Spark.md\",\n", + " \"https://raw.githubusercontent.com/microsoft/FLAML/main/website/docs/Research.md\",\n", + " os.path.join(os.path.abspath(\"\"), \"..\", \"website\", \"docs\"),\n", + " ],\n", + " \"custom_text_types\": [\"non-existent-type\"],\n", + " \"chunk_token_size\": 2000,\n", + " \"model\": config_list[0][\"model\"],\n", + " # \"client\": chromadb.PersistentClient(path=\"/tmp/chromadb\"), # deprecated, use \"vector_db\" instead\n", + " \"vector_db\": \"chroma\", # to use the deprecated `client` parameter, set to None and uncomment the line above\n", + " \"overwrite\": False, # set to True if you want to overwrite an existing collection\n", + " },\n", + " code_execution_config=False, # set to False if you don't want to execute the code\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Example 1\n", + "\n", + "[Back to top](#table-of-contents)\n", + "\n", + "Use RetrieveChat to help generate sample code and automatically run the code and fix errors if there is any.\n", + "\n", + "Problem: Which API should I use if I want to use FLAML for a classification task and I want to train the model in 30 seconds. Use spark to parallel the training. Force cancel jobs if time limit is reached." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2024-04-07 17:30:56,955 - autogen.agentchat.contrib.retrieve_user_proxy_agent - INFO - \u001b[32mUse the existing collection `autogen-docs`.\u001b[0m\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Trying to create collection.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2024-04-07 17:30:59,609 - autogen.agentchat.contrib.retrieve_user_proxy_agent - INFO - Found 2 chunks.\u001b[0m\n", + "Number of requested results 20 is greater than number of elements in index 2, updating n_results = 2\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "VectorDB returns doc_ids: [['bdfbc921']]\n", + "\u001b[32mAdding content of doc bdfbc921 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented coding assistant. You answer user's questions based on your own knowledge and the\n", + "context provided by the user.\n", + "If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.\n", + "For code generation, you must obey the following rules:\n", + "Rule 1. You MUST NOT install any packages because all the packages needed are already installed.\n", + "Rule 2. You must follow the formats below to write your code:\n", + "```language\n", + "# your code\n", + "```\n", + "\n", + "User's question is: How can I use FLAML to perform a classification task and use spark to do parallel training. Train 30 seconds and force cancel jobs if time limit is reached.\n", + "\n", + "Context is: # Integrate - Spark\n", + "\n", + "FLAML has integrated Spark for distributed training. There are two main aspects of integration with Spark:\n", + "\n", + "- Use Spark ML estimators for AutoML.\n", + "- Use Spark to run training in parallel spark jobs.\n", + "\n", + "## Spark ML Estimators\n", + "\n", + "FLAML integrates estimators based on Spark ML models. These models are trained in parallel using Spark, so we called them Spark estimators. To use these models, you first need to organize your data in the required format.\n", + "\n", + "### Data\n", + "\n", + "For Spark estimators, AutoML only consumes Spark data. FLAML provides a convenient function `to_pandas_on_spark` in the `flaml.automl.spark.utils` module to convert your data into a pandas-on-spark (`pyspark.pandas`) dataframe/series, which Spark estimators require.\n", + "\n", + "This utility function takes data in the form of a `pandas.Dataframe` or `pyspark.sql.Dataframe` and converts it into a pandas-on-spark dataframe. It also takes `pandas.Series` or `pyspark.sql.Dataframe` and converts it into a [pandas-on-spark](https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/index.html) series. If you pass in a `pyspark.pandas.Dataframe`, it will not make any changes.\n", + "\n", + "This function also accepts optional arguments `index_col` and `default_index_type`.\n", + "\n", + "- `index_col` is the column name to use as the index, default is None.\n", + "- `default_index_type` is the default index type, default is \"distributed-sequence\". More info about default index type could be found on Spark official [documentation](https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/options.html#default-index-type)\n", + "\n", + "Here is an example code snippet for Spark Data:\n", + "\n", + "```python\n", + "import pandas as pd\n", + "from flaml.automl.spark.utils import to_pandas_on_spark\n", + "\n", + "# Creating a dictionary\n", + "data = {\n", + " \"Square_Feet\": [800, 1200, 1800, 1500, 850],\n", + " \"Age_Years\": [20, 15, 10, 7, 25],\n", + " \"Price\": [100000, 200000, 300000, 240000, 120000],\n", + "}\n", + "\n", + "# Creating a pandas DataFrame\n", + "dataframe = pd.DataFrame(data)\n", + "label = \"Price\"\n", + "\n", + "# Convert to pandas-on-spark dataframe\n", + "psdf = to_pandas_on_spark(dataframe)\n", + "```\n", + "\n", + "To use Spark ML models you need to format your data appropriately. Specifically, use [`VectorAssembler`](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.feature.VectorAssembler.html) to merge all feature columns into a single vector column.\n", + "\n", + "Here is an example of how to use it:\n", + "\n", + "```python\n", + "from pyspark.ml.feature import VectorAssembler\n", + "\n", + "columns = psdf.columns\n", + "feature_cols = [col for col in columns if col != label]\n", + "featurizer = VectorAssembler(inputCols=feature_cols, outputCol=\"features\")\n", + "psdf = featurizer.transform(psdf.to_spark(index_col=\"index\"))[\"index\", \"features\"]\n", + "```\n", + "\n", + "Later in conducting the experiment, use your pandas-on-spark data like non-spark data and pass them using `X_train, y_train` or `dataframe, label`.\n", + "\n", + "### Estimators\n", + "\n", + "#### Model List\n", + "\n", + "- `lgbm_spark`: The class for fine-tuning Spark version LightGBM models, using [SynapseML](https://microsoft.github.io/SynapseML/docs/features/lightgbm/about/) API.\n", + "\n", + "#### Usage\n", + "\n", + "First, prepare your data in the required format as described in the previous section.\n", + "\n", + "By including the models you intend to try in the `estimators_list` argument to `flaml.automl`, FLAML will start trying configurations for these models. If your input is Spark data, FLAML will also use estimators with the `_spark` postfix by default, even if you haven't specified them.\n", + "\n", + "Here is an example code snippet using SparkML models in AutoML:\n", + "\n", + "```python\n", + "import flaml\n", + "\n", + "# prepare your data in pandas-on-spark format as we previously mentioned\n", + "\n", + "automl = flaml.AutoML()\n", + "settings = {\n", + " \"time_budget\": 30,\n", + " \"metric\": \"r2\",\n", + " \"estimator_list\": [\"lgbm_spark\"], # this setting is optional\n", + " \"task\": \"regression\",\n", + "}\n", + "\n", + "automl.fit(\n", + " dataframe=psdf,\n", + " label=label,\n", + " **settings,\n", + ")\n", + "```\n", + "\n", + "[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/automl_bankrupt_synapseml.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/automl_bankrupt_synapseml.ipynb)\n", + "\n", + "## Parallel Spark Jobs\n", + "\n", + "You can activate Spark as the parallel backend during parallel tuning in both [AutoML](/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning) and [Hyperparameter Tuning](/docs/Use-Cases/Tune-User-Defined-Function#parallel-tuning), by setting the `use_spark` to `true`. FLAML will dispatch your job to the distributed Spark backend using [`joblib-spark`](https://github.com/joblib/joblib-spark).\n", + "\n", + "Please note that you should not set `use_spark` to `true` when applying AutoML and Tuning for Spark Data. This is because only SparkML models will be used for Spark Data in AutoML and Tuning. As SparkML models run in parallel, there is no need to distribute them with `use_spark` again.\n", + "\n", + "All the Spark-related arguments are stated below. These arguments are available in both Hyperparameter Tuning and AutoML:\n", + "\n", + "- `use_spark`: boolean, default=False | Whether to use spark to run the training in parallel spark jobs. This can be used to accelerate training on large models and large datasets, but will incur more overhead in time and thus slow down training in some cases. GPU training is not supported yet when use_spark is True. For Spark clusters, by default, we will launch one trial per executor. However, sometimes we want to launch more trials than the number of executors (e.g., local mode). In this case, we can set the environment variable `FLAML_MAX_CONCURRENT` to override the detected `num_executors`. The final number of concurrent trials will be the minimum of `n_concurrent_trials` and `num_executors`.\n", + "- `n_concurrent_trials`: int, default=1 | The number of concurrent trials. When n_concurrent_trials > 1, FLAML performes parallel tuning.\n", + "- `force_cancel`: boolean, default=False | Whether to forcely cancel Spark jobs if the search time exceeded the time budget. Spark jobs include parallel tuning jobs and Spark-based model training jobs.\n", + "\n", + "An example code snippet for using parallel Spark jobs:\n", + "\n", + "```python\n", + "import flaml\n", + "\n", + "automl_experiment = flaml.AutoML()\n", + "automl_settings = {\n", + " \"time_budget\": 30,\n", + " \"metric\": \"r2\",\n", + " \"task\": \"regression\",\n", + " \"n_concurrent_trials\": 2,\n", + " \"use_spark\": True,\n", + " \"force_cancel\": True, # Activating the force_cancel option can immediately halt Spark jobs once they exceed the allocated time_budget.\n", + "}\n", + "\n", + "automl.fit(\n", + " dataframe=dataframe,\n", + " label=label,\n", + " **automl_settings,\n", + ")\n", + "```\n", + "\n", + "[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/integrate_spark.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/integrate_spark.ipynb)\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "To perform a classification task using FLAML and use Spark to do parallel training for 30 seconds and force cancel jobs if the time limit is reached, you can follow these steps:\n", + "\n", + "1. First, convert your data into Spark dataframe format using `to_pandas_on_spark` function from `flaml.automl.spark.utils` module.\n", + "2. Then, format your data for use SparkML models by using `VectorAssembler`.\n", + "3. Define your AutoML settings, including the `metric`, `time_budget`, and `task`.\n", + "4. Use `AutoML` from `flaml` to run AutoML with SparkML models by setting `use_spark` to `true`, and `estimator_list` to a list of spark-based estimators, like `[\"lgbm_spark\"]`.\n", + "5. Set `n_concurrent_trials` to the desired number of parallel jobs and `force_cancel` to `True` to cancel the jobs if the time limit is reached.\n", + "\n", + "Here's an example code snippet for performing classification using FLAML and Spark:\n", + "\n", + "```python\n", + "import pandas as pd\n", + "from flaml.automl.spark.utils import to_pandas_on_spark\n", + "from pyspark.ml.feature import VectorAssembler\n", + "import flaml\n", + "\n", + "# Creating a dictionary\n", + "data = {\n", + " \"sepal_length\": [5.1, 4.9, 4.7, 4.6, 5.0],\n", + " \"sepal_width\": [3.5, 3.0, 3.2, 3.1, 3.6],\n", + " \"petal_length\": [1.4, 1.4, 1.3, 1.5, 1.4],\n", + " \"petal_width\": [0.2, 0.2, 0.2, 0.2, 0.2],\n", + " \"species\": [\"setosa\", \"setosa\", \"setosa\", \"setosa\", \"setosa\"]\n", + "}\n", + "\n", + "# Creating a pandas DataFrame\n", + "dataframe = pd.DataFrame(data)\n", + "label = \"species\"\n", + "\n", + "# Convert to pandas-on-spark dataframe\n", + "psdf = to_pandas_on_spark(dataframe)\n", + "\n", + "# Format data for SparkML models\n", + "columns = psdf.columns\n", + "feature_cols = [col for col in columns if col != label]\n", + "featurizer = VectorAssembler(inputCols=feature_cols, outputCol=\"features\")\n", + "psdf = featurizer.transform(psdf.to_spark(index_col=\"index\"))[\"index\", \"features\"]\n", + "\n", + "# Define AutoML settings\n", + "settings = {\n", + " \"time_budget\": 30,\n", + " \"metric\": \"accuracy\",\n", + " \"task\": \"classification\",\n", + "}\n", + "\n", + "# Use AutoML with SparkML models and parallel jobs\n", + "automl = flaml.AutoML()\n", + "automl.fit(\n", + " dataframe=psdf,\n", + " label=label,\n", + " estimator_list=[\"lgbm_spark\"],\n", + " use_spark=True,\n", + " n_concurrent_trials=2,\n", + " force_cancel=True,\n", + " **settings,\n", + ")\n", + "```\n", + "\n", + "Note that the above code assumes the data is small enough to train within 30 seconds. If you have a larger dataset, you may need to increase the `time_budget` and adjust the number of parallel jobs accordingly.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "UPDATE CONTEXT\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[32mUpdating context and resetting conversation.\u001b[0m\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Number of requested results 60 is greater than number of elements in index 2, updating n_results = 2\n", + "Number of requested results 100 is greater than number of elements in index 2, updating n_results = 2\n", + "Number of requested results 140 is greater than number of elements in index 2, updating n_results = 2\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "VectorDB returns doc_ids: [['bdfbc921']]\n", + "VectorDB returns doc_ids: [['bdfbc921']]\n", + "VectorDB returns doc_ids: [['bdfbc921']]\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Number of requested results 180 is greater than number of elements in index 2, updating n_results = 2\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "VectorDB returns doc_ids: [['bdfbc921']]\n", + "\u001b[32mNo more context, will terminate.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "TERMINATE\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + }, + { + "data": { + "text/plain": [ + "ChatResult(chat_id=None, chat_history=[{'content': 'TERMINATE', 'role': 'assistant'}], summary='', cost=({'total_cost': 0.007691, 'gpt-35-turbo': {'cost': 0.007691, 'prompt_tokens': 4242, 'completion_tokens': 664, 'total_tokens': 4906}}, {'total_cost': 0}), human_input=[])" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# reset the assistant. Always reset the assistant before starting a new conversation.\n", + "assistant.reset()\n", + "\n", + "# given a problem, we use the ragproxyagent to generate a prompt to be sent to the assistant as the initial message.\n", + "# the assistant receives the message and generates a response. The response will be sent back to the ragproxyagent for processing.\n", + "# The conversation continues until the termination condition is met, in RetrieveChat, the termination condition when no human-in-loop is no code block detected.\n", + "# With human-in-loop, the conversation will continue until the user says \"exit\".\n", + "code_problem = \"How can I use FLAML to perform a classification task and use spark to do parallel training. Train 30 seconds and force cancel jobs if time limit is reached.\"\n", + "ragproxyagent.initiate_chat(\n", + " assistant, message=ragproxyagent.message_generator, problem=code_problem, search_string=\"spark\"\n", + ") # search_string is used as an extra filter for the embeddings search, in this case, we only want to search documents that contain \"spark\"." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Example 2\n", + "\n", + "[Back to top](#table-of-contents)\n", + "\n", + "Use RetrieveChat to answer a question that is not related to code generation.\n", + "\n", + "Problem: Who is the author of FLAML?" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Number of requested results 20 is greater than number of elements in index 2, updating n_results = 2\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "VectorDB returns doc_ids: [['7968cf3c', 'bdfbc921']]\n", + "\u001b[32mAdding content of doc 7968cf3c to context.\u001b[0m\n", + "\u001b[32mAdding content of doc bdfbc921 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented coding assistant. You answer user's questions based on your own knowledge and the\n", + "context provided by the user.\n", + "If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.\n", + "For code generation, you must obey the following rules:\n", + "Rule 1. You MUST NOT install any packages because all the packages needed are already installed.\n", + "Rule 2. You must follow the formats below to write your code:\n", + "```language\n", + "# your code\n", + "```\n", + "\n", + "User's question is: Who is the author of FLAML?\n", + "\n", + "Context is: # Research\n", + "\n", + "For technical details, please check our research publications.\n", + "\n", + "- [FLAML: A Fast and Lightweight AutoML Library](https://www.microsoft.com/en-us/research/publication/flaml-a-fast-and-lightweight-automl-library/). Chi Wang, Qingyun Wu, Markus Weimer, Erkang Zhu. MLSys 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{wang2021flaml,\n", + " title={FLAML: A Fast and Lightweight AutoML Library},\n", + " author={Chi Wang and Qingyun Wu and Markus Weimer and Erkang Zhu},\n", + " year={2021},\n", + " booktitle={MLSys},\n", + "}\n", + "```\n", + "\n", + "- [Frugal Optimization for Cost-related Hyperparameters](https://arxiv.org/abs/2005.01571). Qingyun Wu, Chi Wang, Silu Huang. AAAI 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{wu2021cfo,\n", + " title={Frugal Optimization for Cost-related Hyperparameters},\n", + " author={Qingyun Wu and Chi Wang and Silu Huang},\n", + " year={2021},\n", + " booktitle={AAAI},\n", + "}\n", + "```\n", + "\n", + "- [Economical Hyperparameter Optimization With Blended Search Strategy](https://www.microsoft.com/en-us/research/publication/economical-hyperparameter-optimization-with-blended-search-strategy/). Chi Wang, Qingyun Wu, Silu Huang, Amin Saied. ICLR 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{wang2021blendsearch,\n", + " title={Economical Hyperparameter Optimization With Blended Search Strategy},\n", + " author={Chi Wang and Qingyun Wu and Silu Huang and Amin Saied},\n", + " year={2021},\n", + " booktitle={ICLR},\n", + "}\n", + "```\n", + "\n", + "- [An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models](https://aclanthology.org/2021.acl-long.178.pdf). Susan Xueqing Liu, Chi Wang. ACL 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{liuwang2021hpolm,\n", + " title={An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models},\n", + " author={Susan Xueqing Liu and Chi Wang},\n", + " year={2021},\n", + " booktitle={ACL},\n", + "}\n", + "```\n", + "\n", + "- [ChaCha for Online AutoML](https://www.microsoft.com/en-us/research/publication/chacha-for-online-automl/). Qingyun Wu, Chi Wang, John Langford, Paul Mineiro and Marco Rossi. ICML 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{wu2021chacha,\n", + " title={ChaCha for Online AutoML},\n", + " author={Qingyun Wu and Chi Wang and John Langford and Paul Mineiro and Marco Rossi},\n", + " year={2021},\n", + " booktitle={ICML},\n", + "}\n", + "```\n", + "\n", + "- [Fair AutoML](https://arxiv.org/abs/2111.06495). Qingyun Wu, Chi Wang. ArXiv preprint arXiv:2111.06495 (2021).\n", + "\n", + "```bibtex\n", + "@inproceedings{wuwang2021fairautoml,\n", + " title={Fair AutoML},\n", + " author={Qingyun Wu and Chi Wang},\n", + " year={2021},\n", + " booktitle={ArXiv preprint arXiv:2111.06495},\n", + "}\n", + "```\n", + "\n", + "- [Mining Robust Default Configurations for Resource-constrained AutoML](https://arxiv.org/abs/2202.09927). Moe Kayali, Chi Wang. ArXiv preprint arXiv:2202.09927 (2022).\n", + "\n", + "```bibtex\n", + "@inproceedings{kayaliwang2022default,\n", + " title={Mining Robust Default Configurations for Resource-constrained AutoML},\n", + " author={Moe Kayali and Chi Wang},\n", + " year={2022},\n", + " booktitle={ArXiv preprint arXiv:2202.09927},\n", + "}\n", + "```\n", + "\n", + "- [Targeted Hyperparameter Optimization with Lexicographic Preferences Over Multiple Objectives](https://openreview.net/forum?id=0Ij9_q567Ma). Shaokun Zhang, Feiran Jia, Chi Wang, Qingyun Wu. ICLR 2023 (notable-top-5%).\n", + "\n", + "```bibtex\n", + "@inproceedings{zhang2023targeted,\n", + " title={Targeted Hyperparameter Optimization with Lexicographic Preferences Over Multiple Objectives},\n", + " author={Shaokun Zhang and Feiran Jia and Chi Wang and Qingyun Wu},\n", + " booktitle={International Conference on Learning Representations},\n", + " year={2023},\n", + " url={https://openreview.net/forum?id=0Ij9_q567Ma},\n", + "}\n", + "```\n", + "\n", + "- [Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference](https://arxiv.org/abs/2303.04673). Chi Wang, Susan Xueqing Liu, Ahmed H. Awadallah. ArXiv preprint arXiv:2303.04673 (2023).\n", + "\n", + "```bibtex\n", + "@inproceedings{wang2023EcoOptiGen,\n", + " title={Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference},\n", + " author={Chi Wang and Susan Xueqing Liu and Ahmed H. Awadallah},\n", + " year={2023},\n", + " booktitle={ArXiv preprint arXiv:2303.04673},\n", + "}\n", + "```\n", + "\n", + "- [An Empirical Study on Challenging Math Problem Solving with GPT-4](https://arxiv.org/abs/2306.01337). Yiran Wu, Feiran Jia, Shaokun Zhang, Hangyu Li, Erkang Zhu, Yue Wang, Yin Tat Lee, Richard Peng, Qingyun Wu, Chi Wang. ArXiv preprint arXiv:2306.01337 (2023).\n", + "\n", + "```bibtex\n", + "@inproceedings{wu2023empirical,\n", + " title={An Empirical Study on Challenging Math Problem Solving with GPT-4},\n", + " author={Yiran Wu and Feiran Jia and Shaokun Zhang and Hangyu Li and Erkang Zhu and Yue Wang and Yin Tat Lee and Richard Peng and Qingyun Wu and Chi Wang},\n", + " year={2023},\n", + " booktitle={ArXiv preprint arXiv:2306.01337},\n", + "}\n", + "```\n", + "# Integrate - Spark\n", + "\n", + "FLAML has integrated Spark for distributed training. There are two main aspects of integration with Spark:\n", + "\n", + "- Use Spark ML estimators for AutoML.\n", + "- Use Spark to run training in parallel spark jobs.\n", + "\n", + "## Spark ML Estimators\n", + "\n", + "FLAML integrates estimators based on Spark ML models. These models are trained in parallel using Spark, so we called them Spark estimators. To use these models, you first need to organize your data in the required format.\n", + "\n", + "### Data\n", + "\n", + "For Spark estimators, AutoML only consumes Spark data. FLAML provides a convenient function `to_pandas_on_spark` in the `flaml.automl.spark.utils` module to convert your data into a pandas-on-spark (`pyspark.pandas`) dataframe/series, which Spark estimators require.\n", + "\n", + "This utility function takes data in the form of a `pandas.Dataframe` or `pyspark.sql.Dataframe` and converts it into a pandas-on-spark dataframe. It also takes `pandas.Series` or `pyspark.sql.Dataframe` and converts it into a [pandas-on-spark](https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/index.html) series. If you pass in a `pyspark.pandas.Dataframe`, it will not make any changes.\n", + "\n", + "This function also accepts optional arguments `index_col` and `default_index_type`.\n", + "\n", + "- `index_col` is the column name to use as the index, default is None.\n", + "- `default_index_type` is the default index type, default is \"distributed-sequence\". More info about default index type could be found on Spark official [documentation](https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/options.html#default-index-type)\n", + "\n", + "Here is an example code snippet for Spark Data:\n", + "\n", + "```python\n", + "import pandas as pd\n", + "from flaml.automl.spark.utils import to_pandas_on_spark\n", + "\n", + "# Creating a dictionary\n", + "data = {\n", + " \"Square_Feet\": [800, 1200, 1800, 1500, 850],\n", + " \"Age_Years\": [20, 15, 10, 7, 25],\n", + " \"Price\": [100000, 200000, 300000, 240000, 120000],\n", + "}\n", + "\n", + "# Creating a pandas DataFrame\n", + "dataframe = pd.DataFrame(data)\n", + "label = \"Price\"\n", + "\n", + "# Convert to pandas-on-spark dataframe\n", + "psdf = to_pandas_on_spark(dataframe)\n", + "```\n", + "\n", + "To use Spark ML models you need to format your data appropriately. Specifically, use [`VectorAssembler`](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.feature.VectorAssembler.html) to merge all feature columns into a single vector column.\n", + "\n", + "Here is an example of how to use it:\n", + "\n", + "```python\n", + "from pyspark.ml.feature import VectorAssembler\n", + "\n", + "columns = psdf.columns\n", + "feature_cols = [col for col in columns if col != label]\n", + "featurizer = VectorAssembler(inputCols=feature_cols, outputCol=\"features\")\n", + "psdf = featurizer.transform(psdf.to_spark(index_col=\"index\"))[\"index\", \"features\"]\n", + "```\n", + "\n", + "Later in conducting the experiment, use your pandas-on-spark data like non-spark data and pass them using `X_train, y_train` or `dataframe, label`.\n", + "\n", + "### Estimators\n", + "\n", + "#### Model List\n", + "\n", + "- `lgbm_spark`: The class for fine-tuning Spark version LightGBM models, using [SynapseML](https://microsoft.github.io/SynapseML/docs/features/lightgbm/about/) API.\n", + "\n", + "#### Usage\n", + "\n", + "First, prepare your data in the required format as described in the previous section.\n", + "\n", + "By including the models you intend to try in the `estimators_list` argument to `flaml.automl`, FLAML will start trying configurations for these models. If your input is Spark data, FLAML will also use estimators with the `_spark` postfix by default, even if you haven't specified them.\n", + "\n", + "Here is an example code snippet using SparkML models in AutoML:\n", + "\n", + "```python\n", + "import flaml\n", + "\n", + "# prepare your data in pandas-on-spark format as we previously mentioned\n", + "\n", + "automl = flaml.AutoML()\n", + "settings = {\n", + " \"time_budget\": 30,\n", + " \"metric\": \"r2\",\n", + " \"estimator_list\": [\"lgbm_spark\"], # this setting is optional\n", + " \"task\": \"regression\",\n", + "}\n", + "\n", + "automl.fit(\n", + " dataframe=psdf,\n", + " label=label,\n", + " **settings,\n", + ")\n", + "```\n", + "\n", + "[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/automl_bankrupt_synapseml.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/automl_bankrupt_synapseml.ipynb)\n", + "\n", + "## Parallel Spark Jobs\n", + "\n", + "You can activate Spark as the parallel backend during parallel tuning in both [AutoML](/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning) and [Hyperparameter Tuning](/docs/Use-Cases/Tune-User-Defined-Function#parallel-tuning), by setting the `use_spark` to `true`. FLAML will dispatch your job to the distributed Spark backend using [`joblib-spark`](https://github.com/joblib/joblib-spark).\n", + "\n", + "Please note that you should not set `use_spark` to `true` when applying AutoML and Tuning for Spark Data. This is because only SparkML models will be used for Spark Data in AutoML and Tuning. As SparkML models run in parallel, there is no need to distribute them with `use_spark` again.\n", + "\n", + "All the Spark-related arguments are stated below. These arguments are available in both Hyperparameter Tuning and AutoML:\n", + "\n", + "- `use_spark`: boolean, default=False | Whether to use spark to run the training in parallel spark jobs. This can be used to accelerate training on large models and large datasets, but will incur more overhead in time and thus slow down training in some cases. GPU training is not supported yet when use_spark is True. For Spark clusters, by default, we will launch one trial per executor. However, sometimes we want to launch more trials than the number of executors (e.g., local mode). In this case, we can set the environment variable `FLAML_MAX_CONCURRENT` to override the detected `num_executors`. The final number of concurrent trials will be the minimum of `n_concurrent_trials` and `num_executors`.\n", + "- `n_concurrent_trials`: int, default=1 | The number of concurrent trials. When n_concurrent_trials > 1, FLAML performes parallel tuning.\n", + "- `force_cancel`: boolean, default=False | Whether to forcely cancel Spark jobs if the search time exceeded the time budget. Spark jobs include parallel tuning jobs and Spark-based model training jobs.\n", + "\n", + "An example code snippet for using parallel Spark jobs:\n", + "\n", + "```python\n", + "import flaml\n", + "\n", + "automl_experiment = flaml.AutoML()\n", + "automl_settings = {\n", + " \"time_budget\": 30,\n", + " \"metric\": \"r2\",\n", + " \"task\": \"regression\",\n", + " \"n_concurrent_trials\": 2,\n", + " \"use_spark\": True,\n", + " \"force_cancel\": True, # Activating the force_cancel option can immediately halt Spark jobs once they exceed the allocated time_budget.\n", + "}\n", + "\n", + "automl.fit(\n", + " dataframe=dataframe,\n", + " label=label,\n", + " **automl_settings,\n", + ")\n", + "```\n", + "\n", + "[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/integrate_spark.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/integrate_spark.ipynb)\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "The author of FLAML is Chi Wang, along with several co-authors for various publications related to FLAML.\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + }, + { + "data": { + "text/plain": [ + "ChatResult(chat_id=None, chat_history=[{'content': 'You\\'re a retrieve augmented coding assistant. You answer user\\'s questions based on your own knowledge and the\\ncontext provided by the user.\\nIf you can\\'t answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.\\nFor code generation, you must obey the following rules:\\nRule 1. You MUST NOT install any packages because all the packages needed are already installed.\\nRule 2. You must follow the formats below to write your code:\\n```language\\n# your code\\n```\\n\\nUser\\'s question is: Who is the author of FLAML?\\n\\nContext is: # Research\\n\\nFor technical details, please check our research publications.\\n\\n- [FLAML: A Fast and Lightweight AutoML Library](https://www.microsoft.com/en-us/research/publication/flaml-a-fast-and-lightweight-automl-library/). Chi Wang, Qingyun Wu, Markus Weimer, Erkang Zhu. MLSys 2021.\\n\\n```bibtex\\n@inproceedings{wang2021flaml,\\n title={FLAML: A Fast and Lightweight AutoML Library},\\n author={Chi Wang and Qingyun Wu and Markus Weimer and Erkang Zhu},\\n year={2021},\\n booktitle={MLSys},\\n}\\n```\\n\\n- [Frugal Optimization for Cost-related Hyperparameters](https://arxiv.org/abs/2005.01571). Qingyun Wu, Chi Wang, Silu Huang. AAAI 2021.\\n\\n```bibtex\\n@inproceedings{wu2021cfo,\\n title={Frugal Optimization for Cost-related Hyperparameters},\\n author={Qingyun Wu and Chi Wang and Silu Huang},\\n year={2021},\\n booktitle={AAAI},\\n}\\n```\\n\\n- [Economical Hyperparameter Optimization With Blended Search Strategy](https://www.microsoft.com/en-us/research/publication/economical-hyperparameter-optimization-with-blended-search-strategy/). Chi Wang, Qingyun Wu, Silu Huang, Amin Saied. ICLR 2021.\\n\\n```bibtex\\n@inproceedings{wang2021blendsearch,\\n title={Economical Hyperparameter Optimization With Blended Search Strategy},\\n author={Chi Wang and Qingyun Wu and Silu Huang and Amin Saied},\\n year={2021},\\n booktitle={ICLR},\\n}\\n```\\n\\n- [An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models](https://aclanthology.org/2021.acl-long.178.pdf). Susan Xueqing Liu, Chi Wang. ACL 2021.\\n\\n```bibtex\\n@inproceedings{liuwang2021hpolm,\\n title={An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models},\\n author={Susan Xueqing Liu and Chi Wang},\\n year={2021},\\n booktitle={ACL},\\n}\\n```\\n\\n- [ChaCha for Online AutoML](https://www.microsoft.com/en-us/research/publication/chacha-for-online-automl/). Qingyun Wu, Chi Wang, John Langford, Paul Mineiro and Marco Rossi. ICML 2021.\\n\\n```bibtex\\n@inproceedings{wu2021chacha,\\n title={ChaCha for Online AutoML},\\n author={Qingyun Wu and Chi Wang and John Langford and Paul Mineiro and Marco Rossi},\\n year={2021},\\n booktitle={ICML},\\n}\\n```\\n\\n- [Fair AutoML](https://arxiv.org/abs/2111.06495). Qingyun Wu, Chi Wang. ArXiv preprint arXiv:2111.06495 (2021).\\n\\n```bibtex\\n@inproceedings{wuwang2021fairautoml,\\n title={Fair AutoML},\\n author={Qingyun Wu and Chi Wang},\\n year={2021},\\n booktitle={ArXiv preprint arXiv:2111.06495},\\n}\\n```\\n\\n- [Mining Robust Default Configurations for Resource-constrained AutoML](https://arxiv.org/abs/2202.09927). Moe Kayali, Chi Wang. ArXiv preprint arXiv:2202.09927 (2022).\\n\\n```bibtex\\n@inproceedings{kayaliwang2022default,\\n title={Mining Robust Default Configurations for Resource-constrained AutoML},\\n author={Moe Kayali and Chi Wang},\\n year={2022},\\n booktitle={ArXiv preprint arXiv:2202.09927},\\n}\\n```\\n\\n- [Targeted Hyperparameter Optimization with Lexicographic Preferences Over Multiple Objectives](https://openreview.net/forum?id=0Ij9_q567Ma). Shaokun Zhang, Feiran Jia, Chi Wang, Qingyun Wu. ICLR 2023 (notable-top-5%).\\n\\n```bibtex\\n@inproceedings{zhang2023targeted,\\n title={Targeted Hyperparameter Optimization with Lexicographic Preferences Over Multiple Objectives},\\n author={Shaokun Zhang and Feiran Jia and Chi Wang and Qingyun Wu},\\n booktitle={International Conference on Learning Representations},\\n year={2023},\\n url={https://openreview.net/forum?id=0Ij9_q567Ma},\\n}\\n```\\n\\n- [Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference](https://arxiv.org/abs/2303.04673). Chi Wang, Susan Xueqing Liu, Ahmed H. Awadallah. ArXiv preprint arXiv:2303.04673 (2023).\\n\\n```bibtex\\n@inproceedings{wang2023EcoOptiGen,\\n title={Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference},\\n author={Chi Wang and Susan Xueqing Liu and Ahmed H. Awadallah},\\n year={2023},\\n booktitle={ArXiv preprint arXiv:2303.04673},\\n}\\n```\\n\\n- [An Empirical Study on Challenging Math Problem Solving with GPT-4](https://arxiv.org/abs/2306.01337). Yiran Wu, Feiran Jia, Shaokun Zhang, Hangyu Li, Erkang Zhu, Yue Wang, Yin Tat Lee, Richard Peng, Qingyun Wu, Chi Wang. ArXiv preprint arXiv:2306.01337 (2023).\\n\\n```bibtex\\n@inproceedings{wu2023empirical,\\n title={An Empirical Study on Challenging Math Problem Solving with GPT-4},\\n author={Yiran Wu and Feiran Jia and Shaokun Zhang and Hangyu Li and Erkang Zhu and Yue Wang and Yin Tat Lee and Richard Peng and Qingyun Wu and Chi Wang},\\n year={2023},\\n booktitle={ArXiv preprint arXiv:2306.01337},\\n}\\n```\\n# Integrate - Spark\\n\\nFLAML has integrated Spark for distributed training. There are two main aspects of integration with Spark:\\n\\n- Use Spark ML estimators for AutoML.\\n- Use Spark to run training in parallel spark jobs.\\n\\n## Spark ML Estimators\\n\\nFLAML integrates estimators based on Spark ML models. These models are trained in parallel using Spark, so we called them Spark estimators. To use these models, you first need to organize your data in the required format.\\n\\n### Data\\n\\nFor Spark estimators, AutoML only consumes Spark data. FLAML provides a convenient function `to_pandas_on_spark` in the `flaml.automl.spark.utils` module to convert your data into a pandas-on-spark (`pyspark.pandas`) dataframe/series, which Spark estimators require.\\n\\nThis utility function takes data in the form of a `pandas.Dataframe` or `pyspark.sql.Dataframe` and converts it into a pandas-on-spark dataframe. It also takes `pandas.Series` or `pyspark.sql.Dataframe` and converts it into a [pandas-on-spark](https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/index.html) series. If you pass in a `pyspark.pandas.Dataframe`, it will not make any changes.\\n\\nThis function also accepts optional arguments `index_col` and `default_index_type`.\\n\\n- `index_col` is the column name to use as the index, default is None.\\n- `default_index_type` is the default index type, default is \"distributed-sequence\". More info about default index type could be found on Spark official [documentation](https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/options.html#default-index-type)\\n\\nHere is an example code snippet for Spark Data:\\n\\n```python\\nimport pandas as pd\\nfrom flaml.automl.spark.utils import to_pandas_on_spark\\n\\n# Creating a dictionary\\ndata = {\\n \"Square_Feet\": [800, 1200, 1800, 1500, 850],\\n \"Age_Years\": [20, 15, 10, 7, 25],\\n \"Price\": [100000, 200000, 300000, 240000, 120000],\\n}\\n\\n# Creating a pandas DataFrame\\ndataframe = pd.DataFrame(data)\\nlabel = \"Price\"\\n\\n# Convert to pandas-on-spark dataframe\\npsdf = to_pandas_on_spark(dataframe)\\n```\\n\\nTo use Spark ML models you need to format your data appropriately. Specifically, use [`VectorAssembler`](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.feature.VectorAssembler.html) to merge all feature columns into a single vector column.\\n\\nHere is an example of how to use it:\\n\\n```python\\nfrom pyspark.ml.feature import VectorAssembler\\n\\ncolumns = psdf.columns\\nfeature_cols = [col for col in columns if col != label]\\nfeaturizer = VectorAssembler(inputCols=feature_cols, outputCol=\"features\")\\npsdf = featurizer.transform(psdf.to_spark(index_col=\"index\"))[\"index\", \"features\"]\\n```\\n\\nLater in conducting the experiment, use your pandas-on-spark data like non-spark data and pass them using `X_train, y_train` or `dataframe, label`.\\n\\n### Estimators\\n\\n#### Model List\\n\\n- `lgbm_spark`: The class for fine-tuning Spark version LightGBM models, using [SynapseML](https://microsoft.github.io/SynapseML/docs/features/lightgbm/about/) API.\\n\\n#### Usage\\n\\nFirst, prepare your data in the required format as described in the previous section.\\n\\nBy including the models you intend to try in the `estimators_list` argument to `flaml.automl`, FLAML will start trying configurations for these models. If your input is Spark data, FLAML will also use estimators with the `_spark` postfix by default, even if you haven\\'t specified them.\\n\\nHere is an example code snippet using SparkML models in AutoML:\\n\\n```python\\nimport flaml\\n\\n# prepare your data in pandas-on-spark format as we previously mentioned\\n\\nautoml = flaml.AutoML()\\nsettings = {\\n \"time_budget\": 30,\\n \"metric\": \"r2\",\\n \"estimator_list\": [\"lgbm_spark\"], # this setting is optional\\n \"task\": \"regression\",\\n}\\n\\nautoml.fit(\\n dataframe=psdf,\\n label=label,\\n **settings,\\n)\\n```\\n\\n[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/automl_bankrupt_synapseml.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/automl_bankrupt_synapseml.ipynb)\\n\\n## Parallel Spark Jobs\\n\\nYou can activate Spark as the parallel backend during parallel tuning in both [AutoML](/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning) and [Hyperparameter Tuning](/docs/Use-Cases/Tune-User-Defined-Function#parallel-tuning), by setting the `use_spark` to `true`. FLAML will dispatch your job to the distributed Spark backend using [`joblib-spark`](https://github.com/joblib/joblib-spark).\\n\\nPlease note that you should not set `use_spark` to `true` when applying AutoML and Tuning for Spark Data. This is because only SparkML models will be used for Spark Data in AutoML and Tuning. As SparkML models run in parallel, there is no need to distribute them with `use_spark` again.\\n\\nAll the Spark-related arguments are stated below. These arguments are available in both Hyperparameter Tuning and AutoML:\\n\\n- `use_spark`: boolean, default=False | Whether to use spark to run the training in parallel spark jobs. This can be used to accelerate training on large models and large datasets, but will incur more overhead in time and thus slow down training in some cases. GPU training is not supported yet when use_spark is True. For Spark clusters, by default, we will launch one trial per executor. However, sometimes we want to launch more trials than the number of executors (e.g., local mode). In this case, we can set the environment variable `FLAML_MAX_CONCURRENT` to override the detected `num_executors`. The final number of concurrent trials will be the minimum of `n_concurrent_trials` and `num_executors`.\\n- `n_concurrent_trials`: int, default=1 | The number of concurrent trials. When n_concurrent_trials > 1, FLAML performes parallel tuning.\\n- `force_cancel`: boolean, default=False | Whether to forcely cancel Spark jobs if the search time exceeded the time budget. Spark jobs include parallel tuning jobs and Spark-based model training jobs.\\n\\nAn example code snippet for using parallel Spark jobs:\\n\\n```python\\nimport flaml\\n\\nautoml_experiment = flaml.AutoML()\\nautoml_settings = {\\n \"time_budget\": 30,\\n \"metric\": \"r2\",\\n \"task\": \"regression\",\\n \"n_concurrent_trials\": 2,\\n \"use_spark\": True,\\n \"force_cancel\": True, # Activating the force_cancel option can immediately halt Spark jobs once they exceed the allocated time_budget.\\n}\\n\\nautoml.fit(\\n dataframe=dataframe,\\n label=label,\\n **automl_settings,\\n)\\n```\\n\\n[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/integrate_spark.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/integrate_spark.ipynb)\\n\\n', 'role': 'assistant'}, {'content': 'The author of FLAML is Chi Wang, along with several co-authors for various publications related to FLAML.', 'role': 'user'}], summary='The author of FLAML is Chi Wang, along with several co-authors for various publications related to FLAML.', cost=({'total_cost': 0.004711, 'gpt-35-turbo': {'cost': 0.004711, 'prompt_tokens': 3110, 'completion_tokens': 23, 'total_tokens': 3133}}, {'total_cost': 0}), human_input=[])" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# reset the assistant. Always reset the assistant before starting a new conversation.\n", + "assistant.reset()\n", + "\n", + "qa_problem = \"Who is the author of FLAML?\"\n", + "ragproxyagent.initiate_chat(assistant, message=ragproxyagent.message_generator, problem=qa_problem)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Example 3\n", + "\n", + "[Back to top](#table-of-contents)\n", + "\n", + "Use RetrieveChat to help generate sample code and ask for human-in-loop feedbacks.\n", + "\n", + "Problem: how to build a time series forecasting model for stock price using FLAML?" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:chromadb.segment.impl.vector.local_persistent_hnsw:Number of requested results 20 is greater than number of elements in index 2, updating n_results = 2\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "doc_ids: [['doc_0', 'doc_1']]\n", + "\u001b[32mAdding doc_id doc_0 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented coding assistant. You answer user's questions based on your own knowledge and the\n", + "context provided by the user.\n", + "If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.\n", + "For code generation, you must obey the following rules:\n", + "Rule 1. You MUST NOT install any packages because all the packages needed are already installed.\n", + "Rule 2. You must follow the formats below to write your code:\n", + "```language\n", + "# your code\n", + "```\n", + "\n", + "User's question is: how to build a time series forecasting model for stock price using FLAML?\n", + "\n", + "Context is: # Integrate - Spark\n", + "\n", + "FLAML has integrated Spark for distributed training. There are two main aspects of integration with Spark:\n", + "- Use Spark ML estimators for AutoML.\n", + "- Use Spark to run training in parallel spark jobs.\n", + "\n", + "## Spark ML Estimators\n", + "\n", + "FLAML integrates estimators based on Spark ML models. These models are trained in parallel using Spark, so we called them Spark estimators. To use these models, you first need to organize your data in the required format.\n", + "\n", + "### Data\n", + "\n", + "For Spark estimators, AutoML only consumes Spark data. FLAML provides a convenient function `to_pandas_on_spark` in the `flaml.automl.spark.utils` module to convert your data into a pandas-on-spark (`pyspark.pandas`) dataframe/series, which Spark estimators require.\n", + "\n", + "This utility function takes data in the form of a `pandas.Dataframe` or `pyspark.sql.Dataframe` and converts it into a pandas-on-spark dataframe. It also takes `pandas.Series` or `pyspark.sql.Dataframe` and converts it into a [pandas-on-spark](https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/index.html) series. If you pass in a `pyspark.pandas.Dataframe`, it will not make any changes.\n", + "\n", + "This function also accepts optional arguments `index_col` and `default_index_type`.\n", + "- `index_col` is the column name to use as the index, default is None.\n", + "- `default_index_type` is the default index type, default is \"distributed-sequence\". More info about default index type could be found on Spark official [documentation](https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/options.html#default-index-type)\n", + "\n", + "Here is an example code snippet for Spark Data:\n", + "\n", + "```python\n", + "import pandas as pd\n", + "from flaml.automl.spark.utils import to_pandas_on_spark\n", + "# Creating a dictionary\n", + "data = {\"Square_Feet\": [800, 1200, 1800, 1500, 850],\n", + " \"Age_Years\": [20, 15, 10, 7, 25],\n", + " \"Price\": [100000, 200000, 300000, 240000, 120000]}\n", + "\n", + "# Creating a pandas DataFrame\n", + "dataframe = pd.DataFrame(data)\n", + "label = \"Price\"\n", + "\n", + "# Convert to pandas-on-spark dataframe\n", + "psdf = to_pandas_on_spark(dataframe)\n", + "```\n", + "\n", + "To use Spark ML models you need to format your data appropriately. Specifically, use [`VectorAssembler`](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.feature.VectorAssembler.html) to merge all feature columns into a single vector column.\n", + "\n", + "Here is an example of how to use it:\n", + "```python\n", + "from pyspark.ml.feature import VectorAssembler\n", + "columns = psdf.columns\n", + "feature_cols = [col for col in columns if col != label]\n", + "featurizer = VectorAssembler(inputCols=feature_cols, outputCol=\"features\")\n", + "psdf = featurizer.transform(psdf.to_spark(index_col=\"index\"))[\"index\", \"features\"]\n", + "```\n", + "\n", + "Later in conducting the experiment, use your pandas-on-spark data like non-spark data and pass them using `X_train, y_train` or `dataframe, label`.\n", + "\n", + "### Estimators\n", + "#### Model List\n", + "- `lgbm_spark`: The class for fine-tuning Spark version LightGBM models, using [SynapseML](https://microsoft.github.io/SynapseML/docs/features/lightgbm/about/) API.\n", + "\n", + "#### Usage\n", + "First, prepare your data in the required format as described in the previous section.\n", + "\n", + "By including the models you intend to try in the `estimators_list` argument to `flaml.automl`, FLAML will start trying configurations for these models. If your input is Spark data, FLAML will also use estimators with the `_spark` postfix by default, even if you haven't specified them.\n", + "\n", + "Here is an example code snippet using SparkML models in AutoML:\n", + "\n", + "```python\n", + "import flaml\n", + "# prepare your data in pandas-on-spark format as we previously mentioned\n", + "\n", + "automl = flaml.AutoML()\n", + "settings = {\n", + " \"time_budget\": 30,\n", + " \"metric\": \"r2\",\n", + " \"estimator_list\": [\"lgbm_spark\"], # this setting is optional\n", + " \"task\": \"regression\",\n", + "}\n", + "\n", + "automl.fit(\n", + " dataframe=psdf,\n", + " label=label,\n", + " **settings,\n", + ")\n", + "```\n", + "\n", + "\n", + "[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/automl_bankrupt_synapseml.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/automl_bankrupt_synapseml.ipynb)\n", + "\n", + "## Parallel Spark Jobs\n", + "You can activate Spark as the parallel backend during parallel tuning in both [AutoML](/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning) and [Hyperparameter Tuning](/docs/Use-Cases/Tune-User-Defined-Function#parallel-tuning), by setting the `use_spark` to `true`. FLAML will dispatch your job to the distributed Spark backend using [`joblib-spark`](https://github.com/joblib/joblib-spark).\n", + "\n", + "Please note that you should not set `use_spark` to `true` when applying AutoML and Tuning for Spark Data. This is because only SparkML models will be used for Spark Data in AutoML and Tuning. As SparkML models run in parallel, there is no need to distribute them with `use_spark` again.\n", + "\n", + "All the Spark-related arguments are stated below. These arguments are available in both Hyperparameter Tuning and AutoML:\n", + "\n", + "\n", + "- `use_spark`: boolean, default=False | Whether to use spark to run the training in parallel spark jobs. This can be used to accelerate training on large models and large datasets, but will incur more overhead in time and thus slow down training in some cases. GPU training is not supported yet when use_spark is True. For Spark clusters, by default, we will launch one trial per executor. However, sometimes we want to launch more trials than the number of executors (e.g., local mode). In this case, we can set the environment variable `FLAML_MAX_CONCURRENT` to override the detected `num_executors`. The final number of concurrent trials will be the minimum of `n_concurrent_trials` and `num_executors`.\n", + "- `n_concurrent_trials`: int, default=1 | The number of concurrent trials. When n_concurrent_trials > 1, FLAML performs parallel tuning.\n", + "- `force_cancel`: boolean, default=False | Whether to forcely cancel Spark jobs if the search time exceeded the time budget. Spark jobs include parallel tuning jobs and Spark-based model training jobs.\n", + "\n", + "An example code snippet for using parallel Spark jobs:\n", + "```python\n", + "import flaml\n", + "automl_experiment = flaml.AutoML()\n", + "automl_settings = {\n", + " \"time_budget\": 30,\n", + " \"metric\": \"r2\",\n", + " \"task\": \"regression\",\n", + " \"n_concurrent_trials\": 2,\n", + " \"use_spark\": True,\n", + " \"force_cancel\": True, # Activating the force_cancel option can immediately halt Spark jobs once they exceed the allocated time_budget.\n", + "}\n", + "\n", + "automl.fit(\n", + " dataframe=dataframe,\n", + " label=label,\n", + " **automl_settings,\n", + ")\n", + "```\n", + "\n", + "\n", + "[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/integrate_spark.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/integrate_spark.ipynb)\n", + "\n", + "# Research\n", + "\n", + "For technical details, please check our research publications.\n", + "\n", + "* [FLAML: A Fast and Lightweight AutoML Library](https://www.microsoft.com/en-us/research/publication/flaml-a-fast-and-lightweight-automl-library/). Chi Wang, Qingyun Wu, Markus Weimer, Erkang Zhu. MLSys 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{wang2021flaml,\n", + " title={FLAML: A Fast and Lightweight AutoML Library},\n", + " author={Chi Wang and Qingyun Wu and Markus Weimer and Erkang Zhu},\n", + " year={2021},\n", + " booktitle={MLSys},\n", + "}\n", + "```\n", + "\n", + "* [Frugal Optimization for Cost-related Hyperparameters](https://arxiv.org/abs/2005.01571). Qingyun Wu, Chi Wang, Silu Huang. AAAI 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{wu2021cfo,\n", + " title={Frugal Optimization for Cost-related Hyperparameters},\n", + " author={Qingyun Wu and Chi Wang and Silu Huang},\n", + " year={2021},\n", + " booktitle={AAAI},\n", + "}\n", + "```\n", + "\n", + "* [Economical Hyperparameter Optimization With Blended Search Strategy](https://www.microsoft.com/en-us/research/publication/economical-hyperparameter-optimization-with-blended-search-strategy/). Chi Wang, Qingyun Wu, Silu Huang, Amin Saied. ICLR 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{wang2021blendsearch,\n", + " title={Economical Hyperparameter Optimization With Blended Search Strategy},\n", + " author={Chi Wang and Qingyun Wu and Silu Huang and Amin Saied},\n", + " year={2021},\n", + " booktitle={ICLR},\n", + "}\n", + "```\n", + "\n", + "* [An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models](https://aclanthology.org/2021.acl-long.178.pdf). Susan Xueqing Liu, Chi Wang. ACL 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{liuwang2021hpolm,\n", + " title={An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models},\n", + " author={Susan Xueqing Liu and Chi Wang},\n", + " year={2021},\n", + " booktitle={ACL},\n", + "}\n", + "```\n", + "\n", + "* [ChaCha for Online AutoML](https://www.microsoft.com/en-us/research/publication/chacha-for-online-automl/). Qingyun Wu, Chi Wang, John Langford, Paul Mineiro and Marco Rossi. ICML 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{wu2021chacha,\n", + " title={ChaCha for Online AutoML},\n", + " author={Qingyun Wu and Chi Wang and John Langford and Paul Mineiro and Marco Rossi},\n", + " year={2021},\n", + " booktitle={ICML},\n", + "}\n", + "```\n", + "\n", + "* [Fair AutoML](https://arxiv.org/abs/2111.06495). Qingyun Wu, Chi Wang. ArXiv preprint arXiv:2111.06495 (2021).\n", + "\n", + "```bibtex\n", + "@inproceedings{wuwang2021fairautoml,\n", + " title={Fair AutoML},\n", + " author={Qingyun Wu and Chi Wang},\n", + " year={2021},\n", + " booktitle={ArXiv preprint arXiv:2111.06495},\n", + "}\n", + "```\n", + "\n", + "* [Mining Robust Default Configurations for Resource-constrained AutoML](https://arxiv.org/abs/2202.09927). Moe Kayali, Chi Wang. ArXiv preprint arXiv:2202.09927 (2022).\n", + "\n", + "```bibtex\n", + "@inproceedings{kayaliwang2022default,\n", + " title={Mining Robust Default Configurations for Resource-constrained AutoML},\n", + " author={Moe Kayali and Chi Wang},\n", + " year={2022},\n", + " booktitle={ArXiv preprint arXiv:2202.09927},\n", + "}\n", + "```\n", + "\n", + "* [Targeted Hyperparameter Optimization with Lexicographic Preferences Over Multiple Objectives](https://openreview.net/forum?id=0Ij9_q567Ma). Shaokun Zhang, Feiran Jia, Chi Wang, Qingyun Wu. ICLR 2023 (notable-top-5%).\n", + "\n", + "```bibtex\n", + "@inproceedings{zhang2023targeted,\n", + " title={Targeted Hyperparameter Optimization with Lexicographic Preferences Over Multiple Objectives},\n", + " author={Shaokun Zhang and Feiran Jia and Chi Wang and Qingyun Wu},\n", + " booktitle={International Conference on Learning Representations},\n", + " year={2023},\n", + " url={https://openreview.net/forum?id=0Ij9_q567Ma},\n", + "}\n", + "```\n", + "\n", + "* [Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference](https://arxiv.org/abs/2303.04673). Chi Wang, Susan Xueqing Liu, Ahmed H. Awadallah. ArXiv preprint arXiv:2303.04673 (2023).\n", + "\n", + "```bibtex\n", + "@inproceedings{wang2023EcoOptiGen,\n", + " title={Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference},\n", + " author={Chi Wang and Susan Xueqing Liu and Ahmed H. Awadallah},\n", + " year={2023},\n", + " booktitle={ArXiv preprint arXiv:2303.04673},\n", + "}\n", + "```\n", + "\n", + "* [An Empirical Study on Challenging Math Problem Solving with GPT-4](https://arxiv.org/abs/2306.01337). Yiran Wu, Feiran Jia, Shaokun Zhang, Hangyu Li, Erkang Zhu, Yue Wang, Yin Tat Lee, Richard Peng, Qingyun Wu, Chi Wang. ArXiv preprint arXiv:2306.01337 (2023).\n", + "\n", + "```bibtex\n", + "@inproceedings{wu2023empirical,\n", + " title={An Empirical Study on Challenging Math Problem Solving with GPT-4},\n", + " author={Yiran Wu and Feiran Jia and Shaokun Zhang and Hangyu Li and Erkang Zhu and Yue Wang and Yin Tat Lee and Richard Peng and Qingyun Wu and Chi Wang},\n", + " year={2023},\n", + " booktitle={ArXiv preprint arXiv:2306.01337},\n", + "}\n", + "```\n", + "\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "To build a time series forecasting model for stock price using FLAML, you can use the `lgbm_spark` estimator and organize your data in the required format. First, use `to_pandas_on_spark` function to convert your data into a pandas-on-spark dataframe/series, which Spark estimators require. Next, you should use `VectorAssembler` to merge all feature columns into a single vector column. Finally, use `flaml.AutoML` to try different configurations for the `lgbm_spark` model. Here is an example code snippet: \n", + "\n", + "```python\n", + "import flaml\n", + "import pandas as pd\n", + "from flaml.automl.spark.utils import to_pandas_on_spark\n", + "from pyspark.ml.feature import VectorAssembler\n", + "\n", + "# load your stock price data into a pandas dataframe\n", + "data = pd.read_csv('stock_price.csv')\n", + "\n", + "# specify label column name\n", + "label = 'price'\n", + "\n", + "# convert pandas dataframe to pandas-on-spark dataframe\n", + "psdf = to_pandas_on_spark(data)\n", + "\n", + "# merge feature columns as a single vector column\n", + "feature_cols = [col for col in psdf.columns if col != label]\n", + "featurizer = VectorAssembler(inputCols=feature_cols, outputCol=\"features\")\n", + "psdf = featurizer.transform(psdf.to_spark(index_col=\"index\"))[\"index\", \"features\"]\n", + "\n", + "# start an AutoML experiment with lgbm_spark estimator\n", + "automl = flaml.AutoML()\n", + "settings = {\n", + " \"time_budget\": 30,\n", + " \"metric\": \"r2\",\n", + " \"estimator_list\": [\"lgbm_spark\"],\n", + " \"task\": \"regression\",\n", + "}\n", + "\n", + "automl.fit(\n", + " dataframe=psdf,\n", + " label=label,\n", + " **settings,\n", + ")\n", + "```\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "I want the time_budget to be 10 mins\n", + "\n", + "--------------------------------------------------------------------------------\n", + "I want the time_budget to be 10 mins\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "You can change the `time_budget` parameter in the `settings` dictionary to 10 minutes (600 seconds) like this:\n", + "\n", + "```python\n", + "import flaml\n", + "import pandas as pd\n", + "from flaml.automl.spark.utils import to_pandas_on_spark\n", + "from pyspark.ml.feature import VectorAssembler\n", + "\n", + "# load your stock price data into a pandas dataframe\n", + "data = pd.read_csv('stock_price.csv')\n", + "\n", + "# specify label column name\n", + "label = 'price'\n", + "\n", + "# convert pandas dataframe to pandas-on-spark dataframe\n", + "psdf = to_pandas_on_spark(data)\n", + "\n", + "# merge feature columns as a single vector column\n", + "feature_cols = [col for col in psdf.columns if col != label]\n", + "featurizer = VectorAssembler(inputCols=feature_cols, outputCol=\"features\")\n", + "psdf = featurizer.transform(psdf.to_spark(index_col=\"index\"))[\"index\", \"features\"]\n", + "\n", + "# start an AutoML experiment with lgbm_spark estimator and time_budget of 10 mins\n", + "automl = flaml.AutoML()\n", + "settings = {\n", + " \"time_budget\": 600, # time_budget in seconds\n", + " \"metric\": \"r2\",\n", + " \"estimator_list\": [\"lgbm_spark\"],\n", + " \"task\": \"regression\",\n", + "}\n", + "\n", + "automl.fit(\n", + " dataframe=psdf,\n", + " label=label,\n", + " **settings,\n", + ")\n", + "```\n", + "\n", + "\n", + "In this example, the `time_budget` parameter is set to 600, which represents the number of seconds the FLAML AutoML experiment will run. You can adjust this value to control the total time spent on the experiment.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> NO HUMAN INPUT RECEIVED.\u001b[0m\n", + "\u001b[31m\n", + ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "Is there anything else I can help you with?\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[31m\n", + ">>>>>>>> NO HUMAN INPUT RECEIVED.\u001b[0m\n" + ] + } + ], + "source": [ + "# reset the assistant. Always reset the assistant before starting a new conversation.\n", + "assistant.reset()\n", + "\n", + "# set `human_input_mode` to be `ALWAYS`, so the agent will ask for human input at every step.\n", + "ragproxyagent.human_input_mode = \"ALWAYS\"\n", + "code_problem = \"how to build a time series forecasting model for stock price using FLAML?\"\n", + "ragproxyagent.initiate_chat(assistant, message=ragproxyagent.message_generator, problem=code_problem)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Example 4\n", + "\n", + "[Back to top](#table-of-contents)\n", + "\n", + "Use RetrieveChat to answer a question and ask for human-in-loop feedbacks.\n", + "\n", + "Problem: Is there a function named `tune_automl` in FLAML?" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:chromadb.segment.impl.vector.local_persistent_hnsw:Number of requested results 20 is greater than number of elements in index 2, updating n_results = 2\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "doc_ids: [['doc_0', 'doc_1']]\n", + "\u001b[32mAdding doc_id doc_0 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented coding assistant. You answer user's questions based on your own knowledge and the\n", + "context provided by the user.\n", + "If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.\n", + "For code generation, you must obey the following rules:\n", + "Rule 1. You MUST NOT install any packages because all the packages needed are already installed.\n", + "Rule 2. You must follow the formats below to write your code:\n", + "```language\n", + "# your code\n", + "```\n", + "\n", + "User's question is: Is there a function named `tune_automl` in FLAML?\n", + "\n", + "Context is: # Integrate - Spark\n", + "\n", + "FLAML has integrated Spark for distributed training. There are two main aspects of integration with Spark:\n", + "- Use Spark ML estimators for AutoML.\n", + "- Use Spark to run training in parallel spark jobs.\n", + "\n", + "## Spark ML Estimators\n", + "\n", + "FLAML integrates estimators based on Spark ML models. These models are trained in parallel using Spark, so we called them Spark estimators. To use these models, you first need to organize your data in the required format.\n", + "\n", + "### Data\n", + "\n", + "For Spark estimators, AutoML only consumes Spark data. FLAML provides a convenient function `to_pandas_on_spark` in the `flaml.automl.spark.utils` module to convert your data into a pandas-on-spark (`pyspark.pandas`) dataframe/series, which Spark estimators require.\n", + "\n", + "This utility function takes data in the form of a `pandas.Dataframe` or `pyspark.sql.Dataframe` and converts it into a pandas-on-spark dataframe. It also takes `pandas.Series` or `pyspark.sql.Dataframe` and converts it into a [pandas-on-spark](https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/index.html) series. If you pass in a `pyspark.pandas.Dataframe`, it will not make any changes.\n", + "\n", + "This function also accepts optional arguments `index_col` and `default_index_type`.\n", + "- `index_col` is the column name to use as the index, default is None.\n", + "- `default_index_type` is the default index type, default is \"distributed-sequence\". More info about default index type could be found on Spark official [documentation](https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/options.html#default-index-type)\n", + "\n", + "Here is an example code snippet for Spark Data:\n", + "\n", + "```python\n", + "import pandas as pd\n", + "from flaml.automl.spark.utils import to_pandas_on_spark\n", + "# Creating a dictionary\n", + "data = {\"Square_Feet\": [800, 1200, 1800, 1500, 850],\n", + " \"Age_Years\": [20, 15, 10, 7, 25],\n", + " \"Price\": [100000, 200000, 300000, 240000, 120000]}\n", + "\n", + "# Creating a pandas DataFrame\n", + "dataframe = pd.DataFrame(data)\n", + "label = \"Price\"\n", + "\n", + "# Convert to pandas-on-spark dataframe\n", + "psdf = to_pandas_on_spark(dataframe)\n", + "```\n", + "\n", + "To use Spark ML models you need to format your data appropriately. Specifically, use [`VectorAssembler`](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.feature.VectorAssembler.html) to merge all feature columns into a single vector column.\n", + "\n", + "Here is an example of how to use it:\n", + "```python\n", + "from pyspark.ml.feature import VectorAssembler\n", + "columns = psdf.columns\n", + "feature_cols = [col for col in columns if col != label]\n", + "featurizer = VectorAssembler(inputCols=feature_cols, outputCol=\"features\")\n", + "psdf = featurizer.transform(psdf.to_spark(index_col=\"index\"))[\"index\", \"features\"]\n", + "```\n", + "\n", + "Later in conducting the experiment, use your pandas-on-spark data like non-spark data and pass them using `X_train, y_train` or `dataframe, label`.\n", + "\n", + "### Estimators\n", + "#### Model List\n", + "- `lgbm_spark`: The class for fine-tuning Spark version LightGBM models, using [SynapseML](https://microsoft.github.io/SynapseML/docs/features/lightgbm/about/) API.\n", + "\n", + "#### Usage\n", + "First, prepare your data in the required format as described in the previous section.\n", + "\n", + "By including the models you intend to try in the `estimators_list` argument to `flaml.automl`, FLAML will start trying configurations for these models. If your input is Spark data, FLAML will also use estimators with the `_spark` postfix by default, even if you haven't specified them.\n", + "\n", + "Here is an example code snippet using SparkML models in AutoML:\n", + "\n", + "```python\n", + "import flaml\n", + "# prepare your data in pandas-on-spark format as we previously mentioned\n", + "\n", + "automl = flaml.AutoML()\n", + "settings = {\n", + " \"time_budget\": 30,\n", + " \"metric\": \"r2\",\n", + " \"estimator_list\": [\"lgbm_spark\"], # this setting is optional\n", + " \"task\": \"regression\",\n", + "}\n", + "\n", + "automl.fit(\n", + " dataframe=psdf,\n", + " label=label,\n", + " **settings,\n", + ")\n", + "```\n", + "\n", + "\n", + "[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/automl_bankrupt_synapseml.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/automl_bankrupt_synapseml.ipynb)\n", + "\n", + "## Parallel Spark Jobs\n", + "You can activate Spark as the parallel backend during parallel tuning in both [AutoML](/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning) and [Hyperparameter Tuning](/docs/Use-Cases/Tune-User-Defined-Function#parallel-tuning), by setting the `use_spark` to `true`. FLAML will dispatch your job to the distributed Spark backend using [`joblib-spark`](https://github.com/joblib/joblib-spark).\n", + "\n", + "Please note that you should not set `use_spark` to `true` when applying AutoML and Tuning for Spark Data. This is because only SparkML models will be used for Spark Data in AutoML and Tuning. As SparkML models run in parallel, there is no need to distribute them with `use_spark` again.\n", + "\n", + "All the Spark-related arguments are stated below. These arguments are available in both Hyperparameter Tuning and AutoML:\n", + "\n", + "\n", + "- `use_spark`: boolean, default=False | Whether to use spark to run the training in parallel spark jobs. This can be used to accelerate training on large models and large datasets, but will incur more overhead in time and thus slow down training in some cases. GPU training is not supported yet when use_spark is True. For Spark clusters, by default, we will launch one trial per executor. However, sometimes we want to launch more trials than the number of executors (e.g., local mode). In this case, we can set the environment variable `FLAML_MAX_CONCURRENT` to override the detected `num_executors`. The final number of concurrent trials will be the minimum of `n_concurrent_trials` and `num_executors`.\n", + "- `n_concurrent_trials`: int, default=1 | The number of concurrent trials. When n_concurrent_trials > 1, FLAML performs parallel tuning.\n", + "- `force_cancel`: boolean, default=False | Whether to forcely cancel Spark jobs if the search time exceeded the time budget. Spark jobs include parallel tuning jobs and Spark-based model training jobs.\n", + "\n", + "An example code snippet for using parallel Spark jobs:\n", + "```python\n", + "import flaml\n", + "automl_experiment = flaml.AutoML()\n", + "automl_settings = {\n", + " \"time_budget\": 30,\n", + " \"metric\": \"r2\",\n", + " \"task\": \"regression\",\n", + " \"n_concurrent_trials\": 2,\n", + " \"use_spark\": True,\n", + " \"force_cancel\": True, # Activating the force_cancel option can immediately halt Spark jobs once they exceed the allocated time_budget.\n", + "}\n", + "\n", + "automl.fit(\n", + " dataframe=dataframe,\n", + " label=label,\n", + " **automl_settings,\n", + ")\n", + "```\n", + "\n", + "\n", + "[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/integrate_spark.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/integrate_spark.ipynb)\n", + "\n", + "# Research\n", + "\n", + "For technical details, please check our research publications.\n", + "\n", + "* [FLAML: A Fast and Lightweight AutoML Library](https://www.microsoft.com/en-us/research/publication/flaml-a-fast-and-lightweight-automl-library/). Chi Wang, Qingyun Wu, Markus Weimer, Erkang Zhu. MLSys 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{wang2021flaml,\n", + " title={FLAML: A Fast and Lightweight AutoML Library},\n", + " author={Chi Wang and Qingyun Wu and Markus Weimer and Erkang Zhu},\n", + " year={2021},\n", + " booktitle={MLSys},\n", + "}\n", + "```\n", + "\n", + "* [Frugal Optimization for Cost-related Hyperparameters](https://arxiv.org/abs/2005.01571). Qingyun Wu, Chi Wang, Silu Huang. AAAI 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{wu2021cfo,\n", + " title={Frugal Optimization for Cost-related Hyperparameters},\n", + " author={Qingyun Wu and Chi Wang and Silu Huang},\n", + " year={2021},\n", + " booktitle={AAAI},\n", + "}\n", + "```\n", + "\n", + "* [Economical Hyperparameter Optimization With Blended Search Strategy](https://www.microsoft.com/en-us/research/publication/economical-hyperparameter-optimization-with-blended-search-strategy/). Chi Wang, Qingyun Wu, Silu Huang, Amin Saied. ICLR 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{wang2021blendsearch,\n", + " title={Economical Hyperparameter Optimization With Blended Search Strategy},\n", + " author={Chi Wang and Qingyun Wu and Silu Huang and Amin Saied},\n", + " year={2021},\n", + " booktitle={ICLR},\n", + "}\n", + "```\n", + "\n", + "* [An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models](https://aclanthology.org/2021.acl-long.178.pdf). Susan Xueqing Liu, Chi Wang. ACL 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{liuwang2021hpolm,\n", + " title={An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models},\n", + " author={Susan Xueqing Liu and Chi Wang},\n", + " year={2021},\n", + " booktitle={ACL},\n", + "}\n", + "```\n", + "\n", + "* [ChaCha for Online AutoML](https://www.microsoft.com/en-us/research/publication/chacha-for-online-automl/). Qingyun Wu, Chi Wang, John Langford, Paul Mineiro and Marco Rossi. ICML 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{wu2021chacha,\n", + " title={ChaCha for Online AutoML},\n", + " author={Qingyun Wu and Chi Wang and John Langford and Paul Mineiro and Marco Rossi},\n", + " year={2021},\n", + " booktitle={ICML},\n", + "}\n", + "```\n", + "\n", + "* [Fair AutoML](https://arxiv.org/abs/2111.06495). Qingyun Wu, Chi Wang. ArXiv preprint arXiv:2111.06495 (2021).\n", + "\n", + "```bibtex\n", + "@inproceedings{wuwang2021fairautoml,\n", + " title={Fair AutoML},\n", + " author={Qingyun Wu and Chi Wang},\n", + " year={2021},\n", + " booktitle={ArXiv preprint arXiv:2111.06495},\n", + "}\n", + "```\n", + "\n", + "* [Mining Robust Default Configurations for Resource-constrained AutoML](https://arxiv.org/abs/2202.09927). Moe Kayali, Chi Wang. ArXiv preprint arXiv:2202.09927 (2022).\n", + "\n", + "```bibtex\n", + "@inproceedings{kayaliwang2022default,\n", + " title={Mining Robust Default Configurations for Resource-constrained AutoML},\n", + " author={Moe Kayali and Chi Wang},\n", + " year={2022},\n", + " booktitle={ArXiv preprint arXiv:2202.09927},\n", + "}\n", + "```\n", + "\n", + "* [Targeted Hyperparameter Optimization with Lexicographic Preferences Over Multiple Objectives](https://openreview.net/forum?id=0Ij9_q567Ma). Shaokun Zhang, Feiran Jia, Chi Wang, Qingyun Wu. ICLR 2023 (notable-top-5%).\n", + "\n", + "```bibtex\n", + "@inproceedings{zhang2023targeted,\n", + " title={Targeted Hyperparameter Optimization with Lexicographic Preferences Over Multiple Objectives},\n", + " author={Shaokun Zhang and Feiran Jia and Chi Wang and Qingyun Wu},\n", + " booktitle={International Conference on Learning Representations},\n", + " year={2023},\n", + " url={https://openreview.net/forum?id=0Ij9_q567Ma},\n", + "}\n", + "```\n", + "\n", + "* [Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference](https://arxiv.org/abs/2303.04673). Chi Wang, Susan Xueqing Liu, Ahmed H. Awadallah. ArXiv preprint arXiv:2303.04673 (2023).\n", + "\n", + "```bibtex\n", + "@inproceedings{wang2023EcoOptiGen,\n", + " title={Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference},\n", + " author={Chi Wang and Susan Xueqing Liu and Ahmed H. Awadallah},\n", + " year={2023},\n", + " booktitle={ArXiv preprint arXiv:2303.04673},\n", + "}\n", + "```\n", + "\n", + "* [An Empirical Study on Challenging Math Problem Solving with GPT-4](https://arxiv.org/abs/2306.01337). Yiran Wu, Feiran Jia, Shaokun Zhang, Hangyu Li, Erkang Zhu, Yue Wang, Yin Tat Lee, Richard Peng, Qingyun Wu, Chi Wang. ArXiv preprint arXiv:2306.01337 (2023).\n", + "\n", + "```bibtex\n", + "@inproceedings{wu2023empirical,\n", + " title={An Empirical Study on Challenging Math Problem Solving with GPT-4},\n", + " author={Yiran Wu and Feiran Jia and Shaokun Zhang and Hangyu Li and Erkang Zhu and Yue Wang and Yin Tat Lee and Richard Peng and Qingyun Wu and Chi Wang},\n", + " year={2023},\n", + " booktitle={ArXiv preprint arXiv:2306.01337},\n", + "}\n", + "```\n", + "\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[32mAdding doc_id doc_1 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented coding assistant. You answer user's questions based on your own knowledge and the\n", + "context provided by the user.\n", + "If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.\n", + "For code generation, you must obey the following rules:\n", + "Rule 1. You MUST NOT install any packages because all the packages needed are already installed.\n", + "Rule 2. You must follow the formats below to write your code:\n", + "```language\n", + "# your code\n", + "```\n", + "\n", + "User's question is: Is there a function named `tune_automl` in FLAML?\n", + "\n", + "Context is: # Integrate - Spark\n", + "\n", + "FLAML has integrated Spark for distributed training. There are two main aspects of integration with Spark:\n", + "- Use Spark ML estimators for AutoML.\n", + "- Use Spark to run training in parallel spark jobs.\n", + "\n", + "## Spark ML Estimators\n", + "\n", + "FLAML integrates estimators based on Spark ML models. These models are trained in parallel using Spark, so we called them Spark estimators. To use these models, you first need to organize your data in the required format.\n", + "\n", + "### Data\n", + "\n", + "For Spark estimators, AutoML only consumes Spark data. FLAML provides a convenient function `to_pandas_on_spark` in the `flaml.automl.spark.utils` module to convert your data into a pandas-on-spark (`pyspark.pandas`) dataframe/series, which Spark estimators require.\n", + "\n", + "This utility function takes data in the form of a `pandas.Dataframe` or `pyspark.sql.Dataframe` and converts it into a pandas-on-spark dataframe. It also takes `pandas.Series` or `pyspark.sql.Dataframe` and converts it into a [pandas-on-spark](https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/index.html) series. If you pass in a `pyspark.pandas.Dataframe`, it will not make any changes.\n", + "\n", + "This function also accepts optional arguments `index_col` and `default_index_type`.\n", + "- `index_col` is the column name to use as the index, default is None.\n", + "- `default_index_type` is the default index type, default is \"distributed-sequence\". More info about default index type could be found on Spark official [documentation](https://spark.apache.org/docs/latest/api/python/user_guide/pandas_on_spark/options.html#default-index-type)\n", + "\n", + "Here is an example code snippet for Spark Data:\n", + "\n", + "```python\n", + "import pandas as pd\n", + "from flaml.automl.spark.utils import to_pandas_on_spark\n", + "# Creating a dictionary\n", + "data = {\"Square_Feet\": [800, 1200, 1800, 1500, 850],\n", + " \"Age_Years\": [20, 15, 10, 7, 25],\n", + " \"Price\": [100000, 200000, 300000, 240000, 120000]}\n", + "\n", + "# Creating a pandas DataFrame\n", + "dataframe = pd.DataFrame(data)\n", + "label = \"Price\"\n", + "\n", + "# Convert to pandas-on-spark dataframe\n", + "psdf = to_pandas_on_spark(dataframe)\n", + "```\n", + "\n", + "To use Spark ML models you need to format your data appropriately. Specifically, use [`VectorAssembler`](https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.ml.feature.VectorAssembler.html) to merge all feature columns into a single vector column.\n", + "\n", + "Here is an example of how to use it:\n", + "```python\n", + "from pyspark.ml.feature import VectorAssembler\n", + "columns = psdf.columns\n", + "feature_cols = [col for col in columns if col != label]\n", + "featurizer = VectorAssembler(inputCols=feature_cols, outputCol=\"features\")\n", + "psdf = featurizer.transform(psdf.to_spark(index_col=\"index\"))[\"index\", \"features\"]\n", + "```\n", + "\n", + "Later in conducting the experiment, use your pandas-on-spark data like non-spark data and pass them using `X_train, y_train` or `dataframe, label`.\n", + "\n", + "### Estimators\n", + "#### Model List\n", + "- `lgbm_spark`: The class for fine-tuning Spark version LightGBM models, using [SynapseML](https://microsoft.github.io/SynapseML/docs/features/lightgbm/about/) API.\n", + "\n", + "#### Usage\n", + "First, prepare your data in the required format as described in the previous section.\n", + "\n", + "By including the models you intend to try in the `estimators_list` argument to `flaml.automl`, FLAML will start trying configurations for these models. If your input is Spark data, FLAML will also use estimators with the `_spark` postfix by default, even if you haven't specified them.\n", + "\n", + "Here is an example code snippet using SparkML models in AutoML:\n", + "\n", + "```python\n", + "import flaml\n", + "# prepare your data in pandas-on-spark format as we previously mentioned\n", + "\n", + "automl = flaml.AutoML()\n", + "settings = {\n", + " \"time_budget\": 30,\n", + " \"metric\": \"r2\",\n", + " \"estimator_list\": [\"lgbm_spark\"], # this setting is optional\n", + " \"task\": \"regression\",\n", + "}\n", + "\n", + "automl.fit(\n", + " dataframe=psdf,\n", + " label=label,\n", + " **settings,\n", + ")\n", + "```\n", + "\n", + "\n", + "[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/automl_bankrupt_synapseml.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/automl_bankrupt_synapseml.ipynb)\n", + "\n", + "## Parallel Spark Jobs\n", + "You can activate Spark as the parallel backend during parallel tuning in both [AutoML](/docs/Use-Cases/Task-Oriented-AutoML#parallel-tuning) and [Hyperparameter Tuning](/docs/Use-Cases/Tune-User-Defined-Function#parallel-tuning), by setting the `use_spark` to `true`. FLAML will dispatch your job to the distributed Spark backend using [`joblib-spark`](https://github.com/joblib/joblib-spark).\n", + "\n", + "Please note that you should not set `use_spark` to `true` when applying AutoML and Tuning for Spark Data. This is because only SparkML models will be used for Spark Data in AutoML and Tuning. As SparkML models run in parallel, there is no need to distribute them with `use_spark` again.\n", + "\n", + "All the Spark-related arguments are stated below. These arguments are available in both Hyperparameter Tuning and AutoML:\n", + "\n", + "\n", + "- `use_spark`: boolean, default=False | Whether to use spark to run the training in parallel spark jobs. This can be used to accelerate training on large models and large datasets, but will incur more overhead in time and thus slow down training in some cases. GPU training is not supported yet when use_spark is True. For Spark clusters, by default, we will launch one trial per executor. However, sometimes we want to launch more trials than the number of executors (e.g., local mode). In this case, we can set the environment variable `FLAML_MAX_CONCURRENT` to override the detected `num_executors`. The final number of concurrent trials will be the minimum of `n_concurrent_trials` and `num_executors`.\n", + "- `n_concurrent_trials`: int, default=1 | The number of concurrent trials. When n_concurrent_trials > 1, FLAML performs parallel tuning.\n", + "- `force_cancel`: boolean, default=False | Whether to forcely cancel Spark jobs if the search time exceeded the time budget. Spark jobs include parallel tuning jobs and Spark-based model training jobs.\n", + "\n", + "An example code snippet for using parallel Spark jobs:\n", + "```python\n", + "import flaml\n", + "automl_experiment = flaml.AutoML()\n", + "automl_settings = {\n", + " \"time_budget\": 30,\n", + " \"metric\": \"r2\",\n", + " \"task\": \"regression\",\n", + " \"n_concurrent_trials\": 2,\n", + " \"use_spark\": True,\n", + " \"force_cancel\": True, # Activating the force_cancel option can immediately halt Spark jobs once they exceed the allocated time_budget.\n", + "}\n", + "\n", + "automl.fit(\n", + " dataframe=dataframe,\n", + " label=label,\n", + " **automl_settings,\n", + ")\n", + "```\n", + "\n", + "\n", + "[Link to notebook](https://github.com/microsoft/FLAML/blob/main/notebook/integrate_spark.ipynb) | [Open in colab](https://colab.research.google.com/github/microsoft/FLAML/blob/main/notebook/integrate_spark.ipynb)\n", + "\n", + "# Research\n", + "\n", + "For technical details, please check our research publications.\n", + "\n", + "* [FLAML: A Fast and Lightweight AutoML Library](https://www.microsoft.com/en-us/research/publication/flaml-a-fast-and-lightweight-automl-library/). Chi Wang, Qingyun Wu, Markus Weimer, Erkang Zhu. MLSys 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{wang2021flaml,\n", + " title={FLAML: A Fast and Lightweight AutoML Library},\n", + " author={Chi Wang and Qingyun Wu and Markus Weimer and Erkang Zhu},\n", + " year={2021},\n", + " booktitle={MLSys},\n", + "}\n", + "```\n", + "\n", + "* [Frugal Optimization for Cost-related Hyperparameters](https://arxiv.org/abs/2005.01571). Qingyun Wu, Chi Wang, Silu Huang. AAAI 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{wu2021cfo,\n", + " title={Frugal Optimization for Cost-related Hyperparameters},\n", + " author={Qingyun Wu and Chi Wang and Silu Huang},\n", + " year={2021},\n", + " booktitle={AAAI},\n", + "}\n", + "```\n", + "\n", + "* [Economical Hyperparameter Optimization With Blended Search Strategy](https://www.microsoft.com/en-us/research/publication/economical-hyperparameter-optimization-with-blended-search-strategy/). Chi Wang, Qingyun Wu, Silu Huang, Amin Saied. ICLR 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{wang2021blendsearch,\n", + " title={Economical Hyperparameter Optimization With Blended Search Strategy},\n", + " author={Chi Wang and Qingyun Wu and Silu Huang and Amin Saied},\n", + " year={2021},\n", + " booktitle={ICLR},\n", + "}\n", + "```\n", + "\n", + "* [An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models](https://aclanthology.org/2021.acl-long.178.pdf). Susan Xueqing Liu, Chi Wang. ACL 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{liuwang2021hpolm,\n", + " title={An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models},\n", + " author={Susan Xueqing Liu and Chi Wang},\n", + " year={2021},\n", + " booktitle={ACL},\n", + "}\n", + "```\n", + "\n", + "* [ChaCha for Online AutoML](https://www.microsoft.com/en-us/research/publication/chacha-for-online-automl/). Qingyun Wu, Chi Wang, John Langford, Paul Mineiro and Marco Rossi. ICML 2021.\n", + "\n", + "```bibtex\n", + "@inproceedings{wu2021chacha,\n", + " title={ChaCha for Online AutoML},\n", + " author={Qingyun Wu and Chi Wang and John Langford and Paul Mineiro and Marco Rossi},\n", + " year={2021},\n", + " booktitle={ICML},\n", + "}\n", + "```\n", + "\n", + "* [Fair AutoML](https://arxiv.org/abs/2111.06495). Qingyun Wu, Chi Wang. ArXiv preprint arXiv:2111.06495 (2021).\n", + "\n", + "```bibtex\n", + "@inproceedings{wuwang2021fairautoml,\n", + " title={Fair AutoML},\n", + " author={Qingyun Wu and Chi Wang},\n", + " year={2021},\n", + " booktitle={ArXiv preprint arXiv:2111.06495},\n", + "}\n", + "```\n", + "\n", + "* [Mining Robust Default Configurations for Resource-constrained AutoML](https://arxiv.org/abs/2202.09927). Moe Kayali, Chi Wang. ArXiv preprint arXiv:2202.09927 (2022).\n", + "\n", + "```bibtex\n", + "@inproceedings{kayaliwang2022default,\n", + " title={Mining Robust Default Configurations for Resource-constrained AutoML},\n", + " author={Moe Kayali and Chi Wang},\n", + " year={2022},\n", + " booktitle={ArXiv preprint arXiv:2202.09927},\n", + "}\n", + "```\n", + "\n", + "* [Targeted Hyperparameter Optimization with Lexicographic Preferences Over Multiple Objectives](https://openreview.net/forum?id=0Ij9_q567Ma). Shaokun Zhang, Feiran Jia, Chi Wang, Qingyun Wu. ICLR 2023 (notable-top-5%).\n", + "\n", + "```bibtex\n", + "@inproceedings{zhang2023targeted,\n", + " title={Targeted Hyperparameter Optimization with Lexicographic Preferences Over Multiple Objectives},\n", + " author={Shaokun Zhang and Feiran Jia and Chi Wang and Qingyun Wu},\n", + " booktitle={International Conference on Learning Representations},\n", + " year={2023},\n", + " url={https://openreview.net/forum?id=0Ij9_q567Ma},\n", + "}\n", + "```\n", + "\n", + "* [Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference](https://arxiv.org/abs/2303.04673). Chi Wang, Susan Xueqing Liu, Ahmed H. Awadallah. ArXiv preprint arXiv:2303.04673 (2023).\n", + "\n", + "```bibtex\n", + "@inproceedings{wang2023EcoOptiGen,\n", + " title={Cost-Effective Hyperparameter Optimization for Large Language Model Generation Inference},\n", + " author={Chi Wang and Susan Xueqing Liu and Ahmed H. Awadallah},\n", + " year={2023},\n", + " booktitle={ArXiv preprint arXiv:2303.04673},\n", + "}\n", + "```\n", + "\n", + "* [An Empirical Study on Challenging Math Problem Solving with GPT-4](https://arxiv.org/abs/2306.01337). Yiran Wu, Feiran Jia, Shaokun Zhang, Hangyu Li, Erkang Zhu, Yue Wang, Yin Tat Lee, Richard Peng, Qingyun Wu, Chi Wang. ArXiv preprint arXiv:2306.01337 (2023).\n", + "\n", + "```bibtex\n", + "@inproceedings{wu2023empirical,\n", + " title={An Empirical Study on Challenging Math Problem Solving with GPT-4},\n", + " author={Yiran Wu and Feiran Jia and Shaokun Zhang and Hangyu Li and Erkang Zhu and Yue Wang and Yin Tat Lee and Richard Peng and Qingyun Wu and Chi Wang},\n", + " year={2023},\n", + " booktitle={ArXiv preprint arXiv:2306.01337},\n", + "}\n", + "```\n", + "\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "There is no function named `tune_automl` in FLAML. However, FLAML has integrated Spark for distributed training. There are two main aspects of integration with Spark: \n", + "- Use Spark ML Estimators for AutoML.\n", + "- Use Spark to run training in parallel Spark jobs.\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "# reset the assistant. Always reset the assistant before starting a new conversation.\n", + "assistant.reset()\n", + "\n", + "# set `human_input_mode` to be `ALWAYS`, so the agent will ask for human input at every step.\n", + "ragproxyagent.human_input_mode = \"ALWAYS\"\n", + "qa_problem = \"Is there a function named `tune_automl` in FLAML?\"\n", + "ragproxyagent.initiate_chat(\n", + " assistant, message=ragproxyagent.message_generator, problem=qa_problem\n", + ") # type \"exit\" to exit the conversation" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Example 5\n", + "\n", + "[Back to top](#table-of-contents)\n", + "\n", + "Use RetrieveChat to answer questions for [NaturalQuestion](https://ai.google.com/research/NaturalQuestions) dataset.\n", + "\n", + "First, we will create a new document collection which includes all the contextual corpus. Then, we will choose some questions and utilize RetrieveChat to answer them. For this particular example, we will be using the `gpt-3.5-turbo` model, and we will demonstrate RetrieveChat's feature of automatically updating context in case the documents retrieved do not contain sufficient information." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "config_list[0][\"model\"] = \"gpt-35-turbo\" # change model to gpt-35-turbo" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "corpus_file = \"https://huggingface.co/datasets/thinkall/NaturalQuestionsQA/resolve/main/corpus.txt\"\n", + "\n", + "# Create a new collection for NaturalQuestions dataset\n", + "# `task` indicates the kind of task we're working on. In this example, it's a `qa` task.\n", + "ragproxyagent = RetrieveUserProxyAgent(\n", + " name=\"ragproxyagent\",\n", + " human_input_mode=\"NEVER\",\n", + " max_consecutive_auto_reply=10,\n", + " retrieve_config={\n", + " \"task\": \"qa\",\n", + " \"docs_path\": corpus_file,\n", + " \"chunk_token_size\": 2000,\n", + " \"model\": config_list[0][\"model\"],\n", + " \"client\": chromadb.PersistentClient(path=\"/tmp/chromadb\"),\n", + " \"collection_name\": \"natural-questions\",\n", + " \"chunk_mode\": \"one_line\",\n", + " \"embedding_model\": \"all-MiniLM-L6-v2\",\n", + " },\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['what is non controlling interest on balance sheet', 'how many episodes are in chicago fire season 4', 'what are bulls used for on a farm', 'has been honoured with the wisden leading cricketer in the world award for 2016', 'who carried the usa flag in opening ceremony']\n", + "[[\"the portion of a subsidiary corporation 's stock that is not owned by the parent corporation\"], ['23'], ['breeding', 'as work oxen', 'slaughtered for meat'], ['Virat Kohli'], ['Erin Hamlin']]\n" + ] + } + ], + "source": [ + "# queries_file = \"https://huggingface.co/datasets/thinkall/NaturalQuestionsQA/resolve/main/queries.jsonl\"\n", + "queries = \"\"\"{\"_id\": \"ce2342e1feb4e119cb273c05356b33309d38fa132a1cbeac2368a337e38419b8\", \"text\": \"what is non controlling interest on balance sheet\", \"metadata\": {\"answer\": [\"the portion of a subsidiary corporation 's stock that is not owned by the parent corporation\"]}}\n", + "{\"_id\": \"3a10ff0e520530c0aa33b2c7e8d989d78a8cd5d699201fc4b13d3845010994ee\", \"text\": \"how many episodes are in chicago fire season 4\", \"metadata\": {\"answer\": [\"23\"]}}\n", + "{\"_id\": \"fcdb6b11969d5d3b900806f52e3d435e615c333405a1ff8247183e8db6246040\", \"text\": \"what are bulls used for on a farm\", \"metadata\": {\"answer\": [\"breeding\", \"as work oxen\", \"slaughtered for meat\"]}}\n", + "{\"_id\": \"26c3b53ec44533bbdeeccffa32e094cfea0cc2a78c9f6a6c7a008ada1ad0792e\", \"text\": \"has been honoured with the wisden leading cricketer in the world award for 2016\", \"metadata\": {\"answer\": [\"Virat Kohli\"]}}\n", + "{\"_id\": \"0868d0964c719a52cbcfb116971b0152123dad908ac4e0a01bc138f16a907ab3\", \"text\": \"who carried the usa flag in opening ceremony\", \"metadata\": {\"answer\": [\"Erin Hamlin\"]}}\n", + "\"\"\"\n", + "queries = [json.loads(line) for line in queries.split(\"\\n\") if line]\n", + "questions = [q[\"text\"] for q in queries]\n", + "answers = [q[\"metadata\"][\"answer\"] for q in queries]\n", + "print(questions)\n", + "print(answers)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + ">>>>>>>>>>>> Below are outputs of Case 1 <<<<<<<<<<<<\n", + "\n", + "\n", + "Trying to create collection.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
Film Year Fuck count Minutes Uses / mi ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
Character Ultimate Avengers Ultimate Avengers 2 I ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
Position Country Town / City PM2. 5 PM ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
Rank Country ( or dependent territory ) Population
Rank State Gross collections ( in thousands ) Rev ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t < ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
Date Province Mag . MMI Deaths
City River State
Gangakhed ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
Player Pos . Team Career start Career ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t ABO and Rh blood type distribution by country ( population averages )
Country
Total area Land area Performance in the European Cup and UEFA Champions League by club
  • ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
    Rank City State Land area ( sq mi ) La ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
    # Country Name International goals Cap ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
    Rank City Image Population Definition ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
    Rank Team Won Lost Tied Pct ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
    Territory Rights holder Ref
    Asia
    ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
    ( hide ) Rank Nat Name Years Goals
    Total area Land area
    Bids by school Most recent
    Rank Name Nation TP SP
    2014 Rank City 2014 Estimate 2010 Census
    S.No . Year Name
    1961
    Densities of various materials covering a range of values
    Material ρ ( ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
    Club Season League Nation ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
    Rank ( 2016 ) Airports ( large hubs ) IATA Code M ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
    City Region / State Country Park name ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
    Year Winner ( nationally ) Votes Percent
    Compound SERT NET DAT 5 - HT
    Rank Name Industry Revenue ( USD millions )
    ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
    Rank Name Name in Georgian Population 1989
    Country The World Factbook World Res ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
    Rank Country Area ( km2 ) Notes
    ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
    Rank Country Area ( km2 ) Notes
    Date State ( s ) Magnitude Fatalities ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t < ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t
    Artist # Gold # Platinum # Multi-Platinum
    Name Number of locations Revenue
    Name Country Region Depth ( meters ) < ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "UPDATE CONTEXT. The current context does not provide information related to the question.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[32mUpdating context and resetting conversation.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1122 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2398 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_309 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3891 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2087 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_330 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_4844 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the\n", + "context provided by the user.\n", + "If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.\n", + "You must give as short an answer as possible.\n", + "\n", + "User's question is: has been honoured with the wisden leading cricketer in the world award for 2016\n", + "\n", + "Context is:
    Rank Player ( 2017 HRs ) HR
    ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\t ...\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "doc_ids: [['doc_0', 'doc_3334', 'doc_720', 'doc_2732', 'doc_2510', 'doc_5084', 'doc_5068', 'doc_3727', 'doc_1938', 'doc_4689', 'doc_5249', 'doc_1751', 'doc_480', 'doc_3989', 'doc_2115', 'doc_1233', 'doc_2264', 'doc_633', 'doc_2376', 'doc_2293', 'doc_5274', 'doc_5213', 'doc_3991', 'doc_2880', 'doc_2737', 'doc_1257', 'doc_1748', 'doc_2038', 'doc_4073', 'doc_2876']]\n", + "\u001b[32mAdding doc_id doc_0 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3334 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_720 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2732 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2510 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_5084 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_5068 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3727 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1938 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_4689 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_5249 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1751 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_480 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3989 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3334 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_720 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2732 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2510 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_5084 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_5068 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3727 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1938 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_4689 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_5249 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1751 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_480 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3989 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2115 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1233 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2264 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_633 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2376 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the\n", + "context provided by the user.\n", + "If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.\n", + "You must give as short an answer as possible.\n", + "\n", + "User's question is: what is non controlling interest on balance sheet\n", + "\n", + "Context is:

    In accounting , minority interest ( or non-controlling interest ) is the portion of a subsidiary corporation 's stock that is not owned by the parent corporation . The magnitude of the minority interest in the subsidiary company is generally less than 50 % of outstanding shares , or the corporation would generally cease to be a subsidiary of the parent .

    \n", + "

    The balance sheet is the financial statement showing a firm 's assets , liabilities and equity ( capital ) at a set point in time , usually the end of the fiscal year reported on the accompanying income statement . The total assets always equal the total combined liabilities and equity in dollar amount . This statement best demonstrates the basic accounting equation - Assets = Liabilities + Equity . The statement can be used to help show the status of a company .

    \n", + "

    The comptroller ( who is also auditor general and head of the National Audit Office ) controls both the Consolidated Fund and the National Loans Fund . The full official title of the role is Comptroller General of the Receipt and Issue of Her Majesty 's Exchequer .

    \n", + "

    Financing activities include the inflow of cash from investors such as banks and shareholders , as well as the outflow of cash to shareholders as dividends as the company generates income . Other activities which impact the long - term liabilities and equity of the company are also listed in the financing activities section of the cash flow statement .

    \n", + "

    It is frequently claimed that annual accounts have not been certified by the external auditor since 1994 . In its annual report on the implementation of the 2009 EU Budget , the Court of Auditors found that the two biggest areas of the EU budget , agriculture and regional spending , have not been signed off on and remain `` materially affected by error '' .

    \n", + "

    The Ministry of Finance , Government of India announces the rate of interest for PPF account every quarter . The current interest rate effective from 1 January 2018 is 7.6 % Per Annum ' ( compounded annually ) . Interest will be paid on 31 March every year . Interest is calculated on the lowest balance between the close of the fifth day and the last day of every month .

    \n", + "
    No . Athlete Nation Sport Years
    Quarter Interest Rate
    April 2018 - June 2018 7.6 %
    \n", + "

    For a percentage of the settlement amount , Public adjusters work exclusively for the policyholder . This means there should be no inherent conflict of interest when it comes to advocating on the policyholder 's behalf to the insurance company .

    \n", + "

    Accounts receivable is a legally enforceable claim for payment held by a business for goods supplied and / or services rendered that customers / clients have ordered but not paid for . These are generally in the form of invoices raised by a business and delivered to the customer for payment within an agreed time frame . Accounts receivable is shown in a balance sheet as an asset . It is one of a series of accounting transactions dealing with the billing of a customer for goods and services that the customer has ordered . These may be distinguished from notes receivable , which are debts created through formal legal instruments called promissory notes .

    \n", + "

    A common synonym for net profit when discussing financial statements ( which include a balance sheet and an income statement ) is the bottom line . This term results from the traditional appearance of an income statement which shows all allocated revenues and expenses over a specified time period with the resulting summation on the bottom line of the report .

    \n", + " Electronic Fund Transfer Act
    Other short titles
    • Financial Institutions Regulatory and Interest Rate Control Act of 1978
    • Change in Bank Control Act
    • Change in Savings and Loan Control Act
    • Depository Institution Management Interlocks Act
    • Export - Import Bank Act Amendments
    • Federal Financial Institutions Examination Council Act
    • National Credit Union Central Liquidity Facility Act
    • Right to Financial Privacy Act
    Long title An Act to extend the authority for the flexible regulation of interest rates on deposits and accounts in depository institutions .
    Nicknames American Arts Gold Medallion Act
    Enacted by the 95th United States Congress
    Effective November 10 , 1978
    Citations
    Public law 95 - 630
    Statutes at Large 92 Stat. 3641 aka 92 Stat. 3728
    Codification
    Titles amended
    • 12 U.S.C. : Banks and Banking
    • 15 U.S.C. : Commerce and Trade
    U.S.C. sections amended
    • 12 U.S.C. ch. 3 § 226 et seq .
    • 15 U.S.C. ch. 41 § 1601 et seq .
    • 15 U.S.C. ch. 41 § 1693 et seq .
    Legislative history
    • Introduced in the House as H.R. 14279 by Fernand St. Germain ( D - RI ) on October 10 , 1978
    • Committee consideration by House Banking , Finance , and Urban Affairs , Senate Banking , Housing , and Urban Affairs
    • Passed the House on October 11 , 1978 ( passed )
    • Passed the Senate on October 12 , 1978 ( passed ) with amendment
    • House agreed to Senate amendment on October 14 , 1978 ( 341 - 32 , in lieu of H. Res. 1439 ) with further amendment
    • Senate agreed to House amendment on October 14 , 1978 ( agreed )
    • Signed into law by President Jimmy Carter on November 10 , 1978
    Major amendments
    Credit CARD Act of 2009
    \n", + "

    Financial management refers to the efficient and effective management of money ( funds ) in such a manner as to accomplish the objectives of the organization . It is the specialized function directly associated with the top management . The significance of this function is not seen in the ' Line ' but also in the capacity of the ' Staff ' in overall of a company . It has been defined differently by different experts in the field .

    \n", + "

    Form 990 ( officially , the `` Return of Organization Exempt From Income Tax '' ) is a United States Internal Revenue Service form that provides the public with financial information about a nonprofit organization . It is often the only source of such information . It is also used by government agencies to prevent organizations from abusing their tax - exempt status . Certain nonprofits have more comprehensive reporting requirements , such as hospitals and other health care organizations ( Schedule H ) .

    \n", + "

    The Board of Governors of the Federal Reserve System , commonly known as the Federal Reserve Board , is the main governing body of the Federal Reserve System . It is charged with overseeing the Federal Reserve Banks and with helping implement monetary policy of the United States . Governors are appointed by the President of the United States and confirmed by the Senate for staggered 14 - year terms .

    \n", + "

    The International Monetary Fund ( IMF ) is an international organization headquartered in Washington , D.C. , of `` 189 countries working to foster global monetary cooperation , secure financial stability , facilitate international trade , promote high employment and sustainable economic growth , and reduce poverty around the world . '' Formed in 1945 at the Bretton Woods Conference primarily by the ideas of Harry Dexter White and John Maynard Keynes , it came into formal existence in 1945 with 29 member countries and the goal of reconstructing the international payment system . It now plays a central role in the management of balance of payments difficulties and international financial crises . Countries contribute funds to a pool through a quota system from which countries experiencing balance of payments problems can borrow money . As of 2016 , the fund had SDR 477 billion ( about $668 billion ) .

    \n", + "
  • Callability -- Some bonds give the issuer the right to repay the bond before the maturity date on the call dates ; see call option . These bonds are referred to as callable bonds . Most callable bonds allow the issuer to repay the bond at par . With some bonds , the issuer has to pay a premium , the so - called call premium . This is mainly the case for high - yield bonds . These have very strict covenants , restricting the issuer in its operations . To be free from these covenants , the issuer can repay the bonds early , but only at a high cost .
  • \n", + "

    On November 7 , 2016 , debt held by the public was $14.3 trillion or about 76 % of the previous 12 months of GDP . Intragovernmental holdings stood at $5.4 trillion , giving a combined total gross national debt of $19.8 trillion or about 106 % of the previous 12 months of GDP ; $6.2 trillion or approximately 45 % of the debt held by the public was owned by foreign investors , the largest of which were Japan and China at about $1.09 trillion for Japan and $1.06 trillion for China as of December 2016 .

    \n", + "

    A currency transaction report ( CTR ) is a report that U.S. financial institutions are required to file with FinCEN for each deposit , withdrawal , exchange of currency , or other payment or transfer , by , through , or to the financial institution which involves a transaction in currency of more than $10,000 . Used in this context , currency means the coin and / or paper money of any country that is designated as legal tender by the country of issuance . Currency also includes U.S. silver certificates , U.S. notes , Federal Reserve notes , and official foreign bank notes .

    \n", + "

    Checks and balances is the principle that each of the Branches has the power to limit or check the other two and this creates a balance between the three separate powers of the state , this principle induces that the ambitions of one branch prevent that one of the other branches become supreme , and thus be eternally confronting each other and in that process leaving the people free from government abuses . Checks and Balances are designed to maintain the system of separation of powers keeping each branch in its place . This is based on the idea that it is not enough to separate the powers and guarantee their independence but to give the various branches the constitutional means to defend their own legitimate powers from the encroachments of the other branches . They guarantee that the powers of the State have the same weight ( co-equal ) , that is , to be balanced , so that they can limit each other , avoiding the abuse of state power . the origin of checks and balances , like separation of powers itself , is specifically credited to Montesquieu in the Enlightenment ( in The Spirit of the Laws , 1748 ) , under this influence was implemented in 1787 in the Constitution of the United States .

    \n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "Non controlling interest on balance sheet refers to the portion of a subsidiary corporation's stock that is not owned by the parent corporation. It represents ownership of less than 50% of the outstanding shares. It is shown as a separate line item in the equity section of the balance sheet.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\n", + "\n", + ">>>>>>>>>>>> Below are outputs of Case 2 <<<<<<<<<<<<\n", + "\n", + "\n", + "doc_ids: [['doc_1', 'doc_1097', 'doc_4221', 'doc_4972', 'doc_1352', 'doc_96', 'doc_988', 'doc_2370', 'doc_2414', 'doc_5038', 'doc_302', 'doc_1608', 'doc_980', 'doc_2112', 'doc_562', 'doc_4204', 'doc_3298', 'doc_2995', 'doc_3978', 'doc_1258', 'doc_2971', 'doc_2171', 'doc_1065', 'doc_17', 'doc_2683', 'doc_87', 'doc_1767', 'doc_158', 'doc_482', 'doc_3850']]\n", + "\u001b[32mAdding doc_id doc_1 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1097 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_4221 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_4972 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1352 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_96 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_988 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2370 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2414 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_5038 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_302 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1608 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_980 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2112 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_562 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_4204 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3298 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2995 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3978 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1258 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2971 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2171 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1065 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_17 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2683 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the\n", + "context provided by the user.\n", + "If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.\n", + "You must give as short an answer as possible.\n", + "\n", + "User's question is: how many episodes are in chicago fire season 4\n", + "\n", + "Context is:

    The fourth season of Chicago Fire , an American drama television series with executive producer Dick Wolf , and producers Derek Haas , Michael Brandt , and Matt Olmstead , was ordered on February 5 , 2015 , by NBC , and premiered on October 13 , 2015 and concluded on May 17 , 2016 . The season contained 23 episodes .

    \n", + "

    The fourth season began airing on October 10 , 2017 , and is set to run for 23 episodes on The CW until May 22 , 2018 .

    \n", + "

    The fourth season began airing on October 10 , 2017 , on The CW .

    \n", + "

    The fifth season of Chicago P.D. , an American police drama television series with executive producer Dick Wolf , and producers Derek Haas , Michael Brandt , and Rick Eid , premiered on September 27 , 2017 . This season featured its 100th episode .

    \n", + "

    This was the city of Chicago 's first professional sports championship since the Chicago Fire won MLS Cup ' 98 ( which came four months after the Chicago Bulls ' sixth NBA championship that year ) . The next major Chicago sports championship came in 2010 , when the NHL 's Chicago Blackhawks ended a 49 - year Stanley Cup title drought . With the Chicago Bears ' win in Super Bowl XX and the Chicago Cubs ' own World Series championship in 2016 , all Chicago sports teams have won at least one major championship since 1985 . Meanwhile , the Astros themselves made it back to the World Series in 2017 , but this time as an AL team , where they defeated the Los Angeles Dodgers in seven games , resulting in Houston 's first professional sports championship since the 2006 -- 07 Houston Dynamo won their back - to - back MLS Championships .

    \n", + "

    The season was ordered in May 2017 , and production began the following month . Ben McKenzie stars as Gordon , alongside Donal Logue , David Mazouz , Morena Baccarin , Sean Pertwee , Robin Lord Taylor , Erin Richards , Camren Bicondova , Cory Michael Smith , Jessica Lucas , Chris Chalk , Drew Powell , Crystal Reed and Alexander Siddig . The fourth season premiered on September 21 , 2017 , on Fox , while the second half premiered on March 1 , 2018 .

    \n", + "

    As of May 24 , 2017 , 58 episodes of The 100 have aired , concluding the fourth season . In March 2017 , The CW renewed the series for a fifth season , set to premiere on April 24 , 2018 .

    \n", + "

    The fifth book , River of Fire , is scheduled to be released on April 10 , 2018 .

    \n", + "

    On September 10 , 2013 , AMC officially cancelled the series after 38 episodes and three seasons . However , on November 15 , 2013 , Netflix ordered a fourth and final season of six episodes , that was released on Netflix on August 1 , 2014 .

    \n", + "

    The second season of Fargo , an American anthology black comedy -- crime drama television series created by Noah Hawley , premiered on October 12 , 2015 , on the basic cable network FX . Its principal cast consists of Kirsten Dunst , Patrick Wilson , Jesse Plemons , Jean Smart , and Ted Danson . The season had ten episodes , and its initial airing concluded on December 14 , 2015 . As an anthology , each Fargo season possesses its own self - contained narrative , following a disparate set of characters in various settings .

    \n", + "

    The Great Fire of London was a major conflagration that swept through the central parts of the English city of London from Sunday , 2 September to Wednesday , 5 September 1666 . The fire gutted the medieval City of London inside the old Roman city wall . It threatened but did not reach the aristocratic district of Westminster , Charles II 's Palace of Whitehall , and most of the suburban slums . It consumed 13,200 houses , 87 parish churches , St Paul 's Cathedral , and most of the buildings of the City authorities . It is estimated to have destroyed the homes of 70,000 of the City 's 80,000 inhabitants .

    \n", + "

    The first season consisted of eight one - hour - long episodes which were released worldwide on Netflix on July 15 , 2016 , in Ultra HD 4K . The second season , consisting of nine episodes , was released on October 27 , 2017 in HDR . A teaser for the second season , which also announced the release date , aired during Super Bowl LI .

    \n", + "

    `` Two Days Before the Day After Tomorrow '' is the eighth episode in the ninth season of the American animated television series South Park . The 133rd overall episode overall , it originally aired on Comedy Central in the United States on October 19 , 2005 . In the episode , Stan and Cartman accidentally destroy a dam , causing the town of Beaverton to be destroyed .

    \n", + "

    The fourth season consists of a double order of twenty episodes , split into two parts of ten episodes ; the second half premiered on November 30 , 2016 . The season follows the battles between Ragnar and Rollo in Francia , Bjorn 's raid into the Mediterranean , and the Viking invasion of England . It concluded in its entirety on February 1 , 2017 .

    \n", + "

    This is an episode list for Sabrina the Teenage Witch , an American sitcom that debuted on ABC in 1996 . From Season 5 , the program was aired on The WB . The series ran for seven seasons totaling 163 episodes . It originally premiered on September 27 , 1996 on ABC and ended on April 24 , 2003 on The WB .

    \n", + "

    Hart of Dixie was renewed by The CW for 10 episode season on May 8 , 2014 . The show 's fourth and final season premiered on November 15 , 2014 . The series was later cancelled on May 7 , 2015 .

    \n", + "

    The Burning Maze is the third book in the series . It is scheduled to be released on May 1 , 2018 .

    \n", + "
    My Name Is Earl ( season 4 )
    DVD cover
    Country of origin United States
    No. of episodes 27
    Release
    Original network NBC
    Original release September 25 , 2008 -- May 14 , 2009
    Season chronology
    ← Previous Season 3
    List of My Name Is Earl episodes
    \n", + "

    The eighteenth season of Law & Order : Special Victims Unit debuted on Wednesday , September 21 , 2016 , on NBC and finished on Wednesday , May 24 , 2017 , with a two - hour season finale .

    \n", + "

    The eighth and final season of the fantasy drama television series Game of Thrones was announced by HBO in July 2016 . Unlike the first six seasons that each had ten episodes and the seventh that had seven episodes , the eighth season will have only six episodes . Like the previous season , it will largely consist of original content not found currently in George R.R. Martin 's A Song of Ice and Fire series , and will instead adapt material Martin has revealed to showrunners about the upcoming novels in the series , The Winds of Winter and A Dream of Spring .

    \n", + "

    A total of 49 episodes of The Glades were produced and aired over four seasons .

    \n", + "

    Sneaky Pete is an American crime drama series created by David Shore and Bryan Cranston . The series follows Marius Josipović ( Giovanni Ribisi ) , a released convict who adopts the identity of his cell mate , Pete Murphy , in order to avoid his past life . The series also stars Marin Ireland , Shane McRae , Libe Barer , Michael Drayer , Peter Gerety , and Margo Martindale . The pilot debuted on August 7 , 2015 , and was followed by a full series order that September . Shore left the project in early 2016 and was replaced by Graham Yost , who served as executive producer and showrunner for the remaining nine episodes . The first season premiered in its entirety on January 13 , 2017 , exclusively on Amazon Video . On January 19 , 2017 , Amazon announced that Sneaky Pete had been renewed for a second season , which was released on March 9 , 2018 .

    \n", + "

    The eighth season of Blue Bloods , a police procedural drama series created by Robin Green and Mitchell Burgess , premiered on CBS on September 29 , 2017 . The season is set to contain 22 episodes .

    \n", + "

    The first five seasons of Prison Break have been released on DVD and Blu - ray in Regions 1 , 2 , and 4 . Each DVD boxed set includes all of the broadcast episodes from that season , the associated special episode , commentary from cast and crew , and profiles of various parts of Prison Break , such as Fox River State Penitentiary or the tattoo . Prison Break is also available online , including iTunes , Amazon Video , and Netflix . After the premiere of the second season of Prison Break , Fox began online streaming of the prior week 's episode , though it originally restricted viewing to the United States .

    \n", + "

    In June 2017 , Remini was upped to a series regular starting with Season 2 ; shortly after , it was announced that Erinn Hayes would not be returning for the show 's second season . Sources cited in a Variety article confirmed that Remini would be returning as Detective Vanessa Cellucci , the character she portrayed in the first - season finale , and that Hayes ' dismissal was for creative reasons and `` not a reflection '' of the actress ' performance . In August 2017 , it was reported Hayes ' character will be killed off before season two begins and the season will take place 7 -- 10 months after season one ended , in order to make room for Remini .

    \n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "There are 23 episodes in Chicago Fire season 4.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\n", + "\n", + ">>>>>>>>>>>> Below are outputs of Case 3 <<<<<<<<<<<<\n", + "\n", + "\n", + "doc_ids: [['doc_47', 'doc_45', 'doc_2570', 'doc_2851', 'doc_4033', 'doc_5320', 'doc_3849', 'doc_4172', 'doc_3202', 'doc_2282', 'doc_1896', 'doc_949', 'doc_103', 'doc_1552', 'doc_2791', 'doc_392', 'doc_1175', 'doc_5315', 'doc_832', 'doc_3185', 'doc_2532', 'doc_3409', 'doc_824', 'doc_4075', 'doc_1201', 'doc_4116', 'doc_1448', 'doc_2545', 'doc_2251', 'doc_2485']]\n", + "\u001b[32mAdding doc_id doc_47 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_45 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2570 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2851 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_4033 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_5320 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3849 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_4172 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3202 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2282 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1896 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_949 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_103 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1552 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2791 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_392 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1175 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_5315 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_832 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3185 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2532 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the\n", + "context provided by the user.\n", + "If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.\n", + "You must give as short an answer as possible.\n", + "\n", + "User's question is: what are bulls used for on a farm\n", + "\n", + "Context is:

    Many cattle ranches and stations run bulls with cows , and most dairy or beef farms traditionally had at least one , if not several , bulls for purposes of herd maintenance . However , the problems associated with handling a bull ( particularly where cows must be removed from its presence to be worked ) has prompted many dairy farmers to restrict themselves to artificial insemination ( AI ) of the cows . Semen is removed from the bulls and stored in canisters of liquid nitrogen , where it is kept until it can be sold , at which time it can be very profitable , in fact , many ranchers keep bulls specifically for this purpose . AI is also used to increase the quality of a herd , or to introduce an outcross of bloodlines . Some ranchers prefer to use AI to allow them to breed to several different bulls in a season or to breed their best stock to a higher quality bull than they could afford to purchase outright . AI may also be used in conjunction with embryo transfer to allow cattle producers to add new breeding to their herds .

    \n", + "

    Other than the few bulls needed for breeding , the vast majority of male cattle are slaughtered for meat before the age of three years , except where they are needed ( castrated ) as work oxen for haulage . Most of these beef animals are castrated as calves to reduce aggressive behavior and prevent unwanted mating , although some are reared as uncastrated bull beef . A bull is typically ready for slaughter one or two months sooner than a castrated male or a female , and produces proportionately more , leaner muscle .

    \n", + "

    Pastoral farming is the major land use but there are increases in land area devoted to horticulture .

    \n", + "

    Animal fibers are natural fibers that consist largely of particular proteins . Instances are silk , hair / fur ( including wool ) and feathers . The animal fibers used most commonly both in the manufacturing world as well as by the hand spinners are wool from domestic sheep and silk . Also very popular are alpaca fiber and mohair from Angora goats . Unusual fibers such as Angora wool from rabbits and Chiengora from dogs also exist , but are rarely used for mass production .

    \n", + "

    In 2012 , there were 3.2 million farmers , ranchers and other agricultural managers and an estimated 757,900 agricultural workers were legally employed in the US . Animal breeders accounted for 11,500 of those workers with the rest categorized as miscellaneous agricultural workers . The median pay was $9.12 per hour or $18,970 per year . In 2009 , about 519,000 people under age 20 worked on farms owned by their family . In addition to the youth who lived on family farms , an additional 230,000 youth were employed in agriculture . In 2004 , women made up approximately 24 % of farmers ; that year , there were 580,000 women employed in agriculture , forestry , and fishing .

    \n", + "

    The recipe can vary widely . The defining ingredients are minced meat ( commonly beef when named cottage pie or lamb when named shepherd 's pie ) , typically cooked in a gravy with onions and sometimes other vegetables , such as peas , celery or carrots , and topped with mashed potato . The pie is sometimes also topped with grated cheese .

    \n", + "

    The history of the domesticated sheep goes back to between 11000 and 9000 BC , and the domestication of the wild mouflon in ancient Mesopotamia . Sheep are among the first animals to have been domesticated by humans , and there is evidence of sheep farming in Iranian statuary dating to that time period . These sheep were primarily raised for meat , milk , and skins . Woolly sheep began to be developed around 6000 BC in Iran , and cultures such as the Persians relied on sheep 's wool for trading . They were then imported to Africa and Europe via trading .

    \n", + "

    Although large - scale use of wheels did not occur in the Americas prior to European contact , numerous small wheeled artifacts , identified as children 's toys , have been found in Mexican archeological sites , some dating to about 1500 BC . It is thought that the primary obstacle to large - scale development of the wheel in the Americas was the absence of domesticated large animals which could be used to pull wheeled carriages . The closest relative of cattle present in Americas in pre-Columbian times , the American Bison , is difficult to domesticate and was never domesticated by Native Americans ; several horse species existed until about 12,000 years ago , but ultimately became extinct . The only large animal that was domesticated in the Western hemisphere , the llama , did not spread far beyond the Andes by the time of the arrival of Columbus .

    \n", + "

    The Call of the Wild is a short adventure novel by Jack London published in 1903 and set in Yukon , Canada during the 1890s Klondike Gold Rush , when strong sled dogs were in high demand . The central character of the novel is a dog named Buck . The story opens at a ranch in Santa Clara Valley , California , when Buck is stolen from his home and sold into service as a sled dog in Alaska . He becomes progressively feral in the harsh environment , where he is forced to fight to survive and dominate other dogs . By the end , he sheds the veneer of civilization , and relies on primordial instinct and learned experience to emerge as a leader in the wild .

    \n", + "

    The Three Little Pigs was included in The Nursery Rhymes of England ( London and New York , c. 1886 ) , by James Halliwell - Phillipps . The story in its arguably best - known form appeared in English Fairy Tales by Joseph Jacobs , first published in 1890 and crediting Halliwell as his source . The story begins with the title characters being sent out into the world by their mother , to `` seek out their fortune '' . The first little pig builds a house of straw , but a wolf blows it down and devours him . The second little pig builds a house of sticks , which the wolf also blows down , and the second little pig is also devoured . Each exchange between wolf and pig features ringing proverbial phrases , namely :

    \n", + "

    `` How now brown cow '' ( / ˈhaʊ ˈnaʊ ˈbraʊn ˈkaʊ / ) is a phrase used in elocution teaching to demonstrate rounded vowel sounds . Each `` ow '' sound in the phrase represents the diphthong / aʊ / . Although orthographies for each of the four words in this utterance is represented by the English spelling `` ow '' , the articulation required to create this same diphthong represented by the International Phonetic Association 's phonetic alphabet as / aʊ / is also represented by the spelling `` ou '' . Some examples of these homophonic / aʊ / 's are the English words `` house '' , `` blouse '' , `` noun '' , and `` cloud '' . The use of the phrase `` how now brown cow '' in teaching elocution can be dated back to at least 1926 . Although not in use today , the phrase `` how now '' is a greeting , short for `` how say you now '' , and can be found in archaic literature , such as the plays of William Shakespeare .

    \n", + "

    Brisket is a cut of meat from the breast or lower chest of beef or veal . The beef brisket is one of the nine beef primal cuts , though the precise definition of the cut differs internationally . The brisket muscles include the superficial and deep pectorals . As cattle do not have collar bones , these muscles support about 60 % of the body weight of standing / moving cattle . This requires a significant amount of connective tissue , so the resulting meat must be cooked correctly to tenderize the connective tissue .

    \n", + "

    The music to `` Man Gave Names to All the Animals '' is reggae - inspired . The lyrics were inspired by the biblical Book of Genesis , verses 2 : 19 -- 20 in which Adam named the animals and birds . The lyrics have an appeal to children , rhyming the name of the animal with one of its characteristics . So after describing an animal 's `` muddy trail '' and `` curly tail , '' Dylan sings that `` he was n't too small and he was n't too big '' and so that animal was named a pig . Similarly , the cow got its name because Adam `` saw milk comin ' out but he did n't know how '' and the bear got its name because it has a `` great big furry back and furry hair . ''

    \n", + "

    As early as 1671 railed roads were in use in Durham to ease the conveyance of coal ; the first of these was the Tanfield Wagonway . Many of these tramroads or wagon ways were built in the 17th and 18th centuries . They used simply straight and parallel rails of timber on which carts with simple flanged iron wheels were drawn by horses , enabling several wagons to be moved simultaneously .

    \n", + "

    Unicorns are not found in Greek mythology , but rather in the accounts of natural history , for Greek writers of natural history were convinced of the reality of unicorns , which they believed lived in India , a distant and fabulous realm for them . The earliest description is from Ctesias , who in his book Indika ( `` On India '' ) described them as wild asses , fleet of foot , having a horn a cubit and a half ( 700 mm , 28 inches ) in length , and colored white , red and black . Aristotle must be following Ctesias when he mentions two one - horned animals , the oryx ( a kind of antelope ) and the so - called `` Indian ass '' . Strabo says that in the Caucasus there were one - horned horses with stag - like heads . Pliny the Elder mentions the oryx and an Indian ox ( perhaps a rhinoceros ) as one - horned beasts , as well as `` a very fierce animal called the monoceros which has the head of the stag , the feet of the elephant , and the tail of the boar , while the rest of the body is like that of the horse ; it makes a deep lowing noise , and has a single black horn , which projects from the middle of its forehead , two cubits ( 900 mm , 35 inches ) in length . '' In On the Nature of Animals ( Περὶ Ζῴων Ἰδιότητος , De natura animalium ) , Aelian , quoting Ctesias , adds that India produces also a one - horned horse ( iii. 41 ; iv. 52 ) , and says ( xvi. 20 ) that the monoceros ( Greek : μονόκερως ) was sometimes called cartazonos ( Greek : καρτάζωνος ) , which may be a form of the Arabic karkadann , meaning `` rhinoceros '' .

    \n", + "

    The First Battle of Bull Run ( the name used by Union forces ) , also known as the First Battle of Manassas ( the name used by Confederate forces ) , was fought on July 21 , 1861 in Prince William County , Virginia , just north of the city of Manassas and about 25 miles west - southwest of Washington , D.C. It was the first major battle of the American Civil War . The Union 's forces were slow in positioning themselves , allowing Confederate reinforcements time to arrive by rail . Each side had about 18,000 poorly trained and poorly led troops in their first battle . It was a Confederate victory , followed by a disorganized retreat of the Union forces .

    \n", + "

    Hops production is concentrated in moist temperate climates , with much of the world 's production occurring near the 48th parallel north . Hop plants prefer the same soils as potatoes and the leading potato - growing states in the United States are also major hops - producing areas ; however , not all potato - growing areas can produce good hops naturally : soils in the Maritime Provinces of Canada , for example , lack the boron that hops prefer . Historically , hops were not grown in Ireland , but were imported from England . In 1752 more than 500 tons of English hops were imported through Dublin alone .

    \n", + "

    Shepherd 's pie or cottage pie is a meat pie with a crust of mashed potato .

    \n", + "

    Castles served a range of purposes , the most important of which were military , administrative , and domestic . As well as defensive structures , castles were also offensive tools which could be used as a base of operations in enemy territory . Castles were established by Norman invaders of England for both defensive purposes and to pacify the country 's inhabitants . As William the Conqueror advanced through England , he fortified key positions to secure the land he had taken . Between 1066 and 1087 , he established 36 castles such as Warwick Castle , which he used to guard against rebellion in the English Midlands .

    \n", + "

    The Rocky and Bullwinkle Show remained in syndicated reruns and was still available for local television stations through The Program Exchange as late as 2016 ; WBBZ - TV , for instance , aired the show in a strip to counterprogram 10 PM newscasts in the Buffalo , New York market during the summer 2013 season . The underlying rights are now owned by Universal Pictures , which holds the library of predecessor companies DreamWorks Animation and Classic Media , and who in turn with copyright holder Ward Productions forms the joint venture Bullwinkle Studios , which manages the Rocky and Bullwinkle properties ; Universal 's purchase of Classic Media coincided with The Program Exchange 's shutdown .

    \n", + "

    When Yellowstone National Park was created in 1872 , gray wolf ( Canis lupus ) populations were already in decline in Montana , Wyoming and Idaho . The creation of the national park did not provide protection for wolves or other predators , and government predator control programs in the first decades of the 1900s essentially helped eliminate the gray wolf from Yellowstone . The last wolves were killed in Yellowstone in 1926 . After that time , sporadic reports of wolves still occurred , but scientists confirmed that sustainable wolf populations had been extirpated and were absent from Yellowstone during the mid-1900s .

    \n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "Bulls are used for breeding and often kept for their semen to sell for AI purposes. Some male cattle are also kept as work oxen for haulage. The vast majority, however, are slaughtered for meat before the age of three years.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\n", + "\n", + ">>>>>>>>>>>> Below are outputs of Case 4 <<<<<<<<<<<<\n", + "\n", + "\n", + "doc_ids: [['doc_3031', 'doc_819', 'doc_4521', 'doc_3980', 'doc_3423', 'doc_5275', 'doc_745', 'doc_753', 'doc_3562', 'doc_4139', 'doc_3678', 'doc_4931', 'doc_2347', 'doc_1115', 'doc_2806', 'doc_5204', 'doc_2707', 'doc_3653', 'doc_1122', 'doc_2398', 'doc_309', 'doc_3891', 'doc_2087', 'doc_330', 'doc_4844', 'doc_2155', 'doc_2674', 'doc_5357', 'doc_1581', 'doc_9']]\n", + "\u001b[32mAdding doc_id doc_3031 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_819 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_4521 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3980 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3423 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_5275 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_745 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_753 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3562 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the\n", + "context provided by the user.\n", + "If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.\n", + "You must give as short an answer as possible.\n", + "\n", + "User's question is: has been honoured with the wisden leading cricketer in the world award for 2016\n", + "\n", + "Context is:

    The first recipient was Uttam Kumar from Bengali cinema , who was honoured at the 15th National Film Awards in 1968 for his performances in Anthony Firingee and Chiriyakhana . As of 2017 , Amitabh Bachchan is the most honoured actor , with four awards . Two actors -- Kamal Haasan and Mammootty -- have been honoured three times , while six actors -- Sanjeev Kumar , Mithun Chakraborty , Om Puri , Naseeruddin Shah , Mohanlal , and Ajay Devgn -- have won the award two times . Two actors have achieved the honour for performing in two languages -- Mithun Chakraborty ( Hindi and Bengali ) and Mammootty ( Malayalam and English ) . The most recent recipient is Riddhi Sen , who was honoured at the 65th National Film Awards for his performance in the Bengali film Nagarkirtan .

    \n", + "

    There was controversy over the National Film Award for Best Actor , which the committee awarded to Akshay Kumar for his performance in Rustom , snubbing Aamir Khan 's performance for Dangal . Committee member Priyadarshan , who has worked with Kumar on several films , gave the following explanation for awarding Kumar instead of Khan :

    \n", + "

    The 2017 ICC Champions Trophy was the eighth ICC Champions Trophy , a cricket tournament for the eight top - ranked One Day International ( ODI ) teams in the world . It was held in England and Wales from 1 June to 18 June 2017 . Pakistan won the competition for the first time with a 180 - run victory over India in the final at The Oval . The margin of victory was the largest by any team in the final of an ICC ODI tournament in terms of runs .

    \n", + " List of One Day International cricket double centuries
    No . Runs Batsman S / R For Against ODI Venue Date
    200 * Tendulkar , Sachin Sachin Tendulkar 136.05 India South Africa 2962 Captain Roop Singh Stadium , Gwalior , India 24 February 2010
    219 Sehwag , Virender Virender Sehwag 146.98 India West Indies 3223 Holkar Stadium , Indore , India 8 December 2011
    209 Sharma , Rohit Rohit Sharma 132.28 India Australia 3428 M. Chinnaswamy Stadium , Bangalore , India 2 November 2013
    264 Sharma , Rohit Rohit Sharma 152.60 India Sri Lanka 3544 Eden Gardens , India 13 November 2014
    5 215 Gayle , Chris Chris Gayle 146.30 West Indies Zimbabwe 3612 Manuka Oval , Canberra , Australia 24 February 2015
    6 237 * Guptill , Martin Martin Guptill 145.40 New Zealand West Indies 3643 Wellington Regional Stadium , Wellington , New Zealand 22 March 2015
    7 208 * Sharma , Rohit Rohit Sharma 135.95 India Sri Lanka 3941 Punjab Cricket Association IS Bindra Stadium , Mohali , India 13 December 2017
    \n", + "

    G. Sankara Kurup , ( 3 June 1901 , Nayathode , Kingdom of Cochin ( now in Ernakulam district , Kerala , India ) -- 2 February 1978 , Vappalassery , Angamaly , Ernakulam district , Kerala ) , better known as Mahakavi G ( The Great Poet G ) , was the first winner of the Jnanpith Award , India 's highest literary award . He won the prize in 1965 for his collection of poems in Malayalam Odakkuzhal ( The Bamboo Flute , 1950 ) . With part of the prize money he established the literary award Odakkuzhal in 1968 . He was also the recipient of the Soviet Land Nehru Award , in 1967 , and the Padma Bhushan in 1968 . His poetry collection Viswadarshanam won the Kerala Sahitya Akademi Award in 1961 and Kendra Sahitya Akademi Award in 1963 .

    \n", + "

    The 2019 Cricket World Cup ( officially ICC Cricket World Cup 2019 ) is the 12th edition of the Cricket World Cup , scheduled to be hosted by England and Wales , from 30 May to 14 July 2019 .

    \n", + " 2018 Under - 19 Cricket World Cup
    Dates 13 January -- 3 February 2018
    Administrator ( s ) International Cricket Council
    Cricket format 50 overs
    Tournament format ( s ) Round - robin and knockout
    Host ( s ) New Zealand
    Champions India ( 4th title )
    Runners - up Australia
    Participants 16
    Matches played 48
    Player of the series Shubman Gill
    Most runs Alick Athanaze ( 418 )
    Most wickets Anukul Roy ( 14 ) Qais Ahmad ( 14 ) Faisal Jamkhandi ( 14 )
    Official website Official website
    ← 2016 2020 →
    \n", + "

    The 2018 ICC Under - 19 Cricket World Cup was an international limited - overs cricket tournament that was held in New Zealand from 13 January to 3 February 2018 . It was the twelfth edition of the Under - 19 Cricket World Cup , and the third to be held in New Zealand ( after the 2002 and 2010 events ) . New Zealand was the first country to host the event three times . The opening ceremony took place on 7 January 2018 . The West Indies were the defending champions . However , they failed to defend their title , after losing their first two group fixtures .

    \n", + "

    Scoring over 10,000 runs across a playing career in any format of cricket is considered a significant achievement . In the year 2001 , Sachin Tendulkar became the first player to score 10,000 runs in ODIs , while playing a match during the bi-lateral series against Australia at home . In the chase for achieving top scores , West Indies ' Desmond Haynes retired as the most prolific run - scorer in One Day Internationals ( ODIs ) , with a total of 8,648 runs in 1994 . The record stood for four years until it was broken by India 's Mohammed Azharuddin . Azharuddin remained the top - scorer in the format until his compatriot Sachin Tendulkar passed him in October 2000 . As of August 2016 , eleven players -- from six teams that are Full members of the International Cricket Council -- have scored more than 10,000 runs in ODIs . Four of these are from Sri Lanka and three from India . The rest are one player each from Pakistan , Australia , West Indies , and South Africa . Bangladesh , England , New Zealand , and Zimbabwe are yet to have a player reach the 10,000 - run mark in this format .

    \n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "I'm sorry, I couldn't find any information about who has been honoured with the Wisden Leading Cricketer in the World award for 2016. UPDATE CONTEXT.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[32mUpdating context and resetting conversation.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_4139 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3678 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_4931 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2347 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1115 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2806 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_5204 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2707 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3653 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the\n", + "context provided by the user.\n", + "If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.\n", + "You must give as short an answer as possible.\n", + "\n", + "User's question is: has been honoured with the wisden leading cricketer in the world award for 2016\n", + "\n", + "Context is: List of the Indian Oscar nominee ( s ) / recipient ( s ) , also showing the year , film , category , and result
    Year Nominee ( s ) / recipient ( s ) Film Category / Honorary Award Result / received Ref .
    1958 ( 30th ) Mehboob Khan Mother India Best Foreign Language Film Nominated
    1961 ( 33rd ) Ismail Merchant The Creation of Woman Best Short Subject ( Live Action ) Nominated
    1979 ( 51st ) Vidhu Vinod Chopra and K.K. Kapil An Encounter with Faces Best Documentary ( Short Subject ) Nominated
    ( 55th ) Bhanu Athaiya Gandhi Best Costume Design Won
    Ravi Shankar Best Original Score Nominated
    ( 59th ) Ismail Merchant A Room with a View Best Picture Nominated
    ( 61st ) Mira Nair Salaam Bombay ! Best Foreign Language Film Nominated
    1992 ( 64th ) Satyajit Ray Pather Pachali Honorary Award Received
    ( 65th ) Ismail Merchant Howards End Best Picture Nominated
    ( 66th ) Ismail Merchant The Remains of the Day Best Picture Nominated
    2002 ( 74th ) Ashutosh Gowarikar Lagaan Best Foreign Language Film Nominated
    2005 ( 77th ) Ashvin Kumar Little Terrorist Best Short Subject ( Live Action ) Nominated
    2007 ( 79th ) Deepa Mehta Water Best Foreign Language Film Nominated
    2009 ( 81st ) Resul Pookutty Slumdog Millionaire Best Sound Mixing Won
    A.R. Rahman Best Original Score Won
    A.R. Rahman and Gulzar Best Original Song Won
    2011 ( 83rd ) A.R. Rahman 127 Hours Best Original Score Nominated
    A.R. Rahman Best Original Song Nominated
    2013 ( 85th ) Bombay Jayashri Life of Pi Best Original Song Nominated
    2016 Rahul Thakkar n / a Sci - Tech Award Received
    2016 Cottalango Leon n / a Sci - Tech Award Received
    2018 Vikas Sathaye n / a Sci - Tech Award Received
    \n", + "

    The 2017 Nobel Peace Prize was awarded to the International Campaign to Abolish Nuclear Weapons ( ICAN ) `` for its work to draw attention to the catastrophic humanitarian consequences of any use of nuclear weapons and for its ground - breaking efforts to achieve a treaty - based prohibition on such weapons , '' according to the Norwegian Nobel Committee announcement on October 6 , 2017 . The award announcement acknowledged the fact that `` the world 's nine nuclear - armed powers and their allies '' neither signed nor supported the treaty - based prohibition known as the Treaty on the Prohibition of Nuclear Weapons or nuclear ban treaty , yet in an interview Committee Chair Berit Reiss - Andersen told reporters that the award was intended to give `` encouragement to all players in the field '' to disarm . The award was hailed by civil society as well as governmental and intergovernmental representatives who support the nuclear ban treaty , but drew criticism from those opposed . At the Nobel Peace Prize award ceremony held in Oslo City Hall on December 10 , 2017 , Setsuko Thurlow , an 85 - year - old survivor of the 1945 atomic bombing of Hiroshima , and ICAN Executive Director Beatrice Fihn jointly received a medal and diploma of the award on behalf of ICAN and delivered the Nobel lecture .

    \n", + "

    Career records for batting average are usually subject to a minimum qualification of 20 innings played or completed , in order to exclude batsmen who have not played enough games for their skill to be reliably assessed . Under this qualification , the highest Test batting average belongs to Australia 's Sir Donald Bradman , with 99.94 . Given that a career batting average over 50 is exceptional , and that only five other players have averages over 60 , this is an outstanding statistic . The fact that Bradman 's average is so far above that of any other cricketer has led several statisticians to argue that , statistically at least , he was the greatest athlete in any sport .

    \n", + "
    Indian cricket team in South Africa in 2017 -- 18
    South Africa India
    Dates 5 January 2018 -- 24 February 2018
    Captains Faf du Plessis ( Tests and ODIs ) JP Duminy ( T20Is ) Virat Kohli
    Test series
    Result South Africa won the 3 - match series 2 -- 1
    Most runs AB de Villiers ( 211 ) Virat Kohli ( 286 )
    Most wickets Vernon Philander ( 15 ) Kagiso Rabada ( 15 ) Mohammed Shami ( 15 )
    Player of the series Vernon Philander ( SA )
    One Day International series
    Results India won the 6 - match series 5 -- 1
    Most runs Hashim Amla ( 154 ) Virat Kohli ( 558 )
    Most wickets Lungi Ngidi ( 8 ) Kuldeep Yadav ( 17 )
    Player of the series Virat Kohli ( Ind )
    Twenty20 International series
    Results India won the 3 - match series 2 -- 1
    Most runs JP Duminy ( 122 ) Shikhar Dhawan ( 143 )
    Most wickets Junior Dala ( 7 ) Bhuvneshwar Kumar ( 7 )
    Player of the series Bhuvneshwar Kumar ( Ind )
    \n", + "

    Brian Lara took the least number of innings ( 195 ) to reach the 10,000 run mark , later equalled by Sachin Tendulkar and Kumar Sangakkara , while Australia 's Steve Waugh took 244 innings to achieve the feat . Alastair Cook is the fastest in terms of time span , taking 10 years and 87 days . The time taken by Shivnarine Chanderpaul ( 18 years and 37 days ) is the slowest among all . As of May 2017 , Tendulkar leads the list with 15,921 runs followed by Ricky Ponting of Australia with 13,378 .

    \n", + "
    50 + Player Matches Innings
    119 Sachin Tendulkar 200 329
    103 Jacques Kallis 166 280
    103 Ricky Ponting 168 287
    99 Rahul Dravid 164 286
    96 Shivnarine Chanderpaul 164 280

    Last updated : 15 June 2016

    \n", + "

    Chandan Shetty emerged as the winner of this season on 28. January. 2018 with Karthik being the runner up . Other finalists Niveditha , Diwakar , Shruti were eliminated

    \n", + "

    Arthur Chung ( January 10 , 1918 -- June 23 , 2008 ) was the first President of Guyana from 1970 to 1980 . During his time as President of Guyana , the office was that of a ceremonial head of state , with real power in the hands of Prime Minister Forbes Burnham . He was honoured with Guyana 's highest national honour , the Order of Excellence ( O.E. ) .

    \n", + "
    Incumbent Achal Kumar Jyoti since 6 July 2017
    No Name ( birth -- death ) Portrait Elected ( % votes ) Took office Left office Term ( in years ) Notes President ( s ) Candidate of
    Sarvepalli Radhakrishnan ( 1888 -- 1975 ) 1952 ( Unopposed )

    1957 ( Unopposed )

    13 May 1952 12 May 1962 10 Radhakrishnan was a prominent scholar . Besides being awarded the Bharat Ratna he also held the position of vice-chancellor in the Banaras Hindu University and the Andhra college . He served as the Vice-President for two terms . Rajendra Prasad Independent
    Zakir Husain ( 1897 -- 1969 ) -- 1962 ( 97.59 ) 13 May 1962 12 May 1967 5 Sarvepalli Radhakrishnan Independent
    Varahagiri Venkata Giri ( 1894 -- 1980 ) -- 1967 ( 71.45 ) 13 May 1967 3 May 1969 Zakir Husain Independent
    Gopal Swarup Pathak ( 1896 -- 1982 ) -- 1969 -- 31 August 1969 30 August 1974 5 Varahagiri Venkata Giri ( 1969 -- 1974 )

    Fakhruddin Ali Ahmed ( 1974 )

    Independent
    5 Basappa Danappa Jatti ( 1912 -- 2002 ) -- ( 78.70 ) 31 August 1974 30 August 1979 5 Fakhruddin Ali Ahmed ( 1974 -- 1977 ) Neelam Sanjiva Reddy ( 1977 -- 1979 ) Indian National Congress
    6 Mohammad Hidayatullah ( 1905 -- 1992 ) -- 1979 ( Unopposed ) 31 August 1979 30 August 1984 5 Neelam Sanjiva Reddy ( 1979 -- 1982 ) Giani Zail Singh ( 1982 -- 1984 ) Independent
    7 Ramaswamy Venkataraman ( 1910 -- 2009 ) 1984 ( 71.05 ) 31 August 1984 24 July 1987 Giani Zail Singh Indian National Congress
    8 Shankar Dayal Sharma ( 1918 -- 1999 ) ( Unopposed ) 3 September 1987 24 July 1992 5 Ramaswamy Venkataraman Indian National Congress
    9 Kocheril Raman Narayanan ( 1920 -- 2005 ) 1992 ( 99.86 ) 21 August 1992 24 July 1997 5 Shankar Dayal Sharma Indian National Congress
    10 Krishan Kant ( 1927 -- 2002 ) -- 1997 ( 61.76 ) 21 August 1997 27 July 2002 Kocheril Raman Narayanan ( 1997 -- 2002 ) A.P.J. Abdul Kalam ( 2002 ) Janata Dal
    11 Bhairon Singh Shekhawat ( 1923 -- 2010 ) 2002 ( 59.82 ) 19 August 2002 21 July 2007 5 A.P.J. Abdul Kalam Bharatiya Janata Party
    12 Mohammad Hamid Ansari ( 1937 -- ) 2007 ( 60.51 ) 2012 ( 67.31 ) 11 August 2007 11 August 2017 10 Pratibha Patil ( 2007 -- 2012 ) Pranab Mukherjee ( 2012 -- 2017 ) Ram Nath Kovind ( 2017 ) Indian National Congress
    13 Muppavarapu Venkaiah Naidu ( 1949 -- ) 2017 ( 67.89 ) 11 August 2017 Incumbent -- Ram Nath Kovind Bharatiya Janata Party
    \n", + "
    Governor of Maharashtra
    Incumbent Chennamaneni Vidyasagar Rao since 30 August 2014
    Style His Excellency
    Residence Main : Raj Bhavan ( Mumbai ) Additional : Raj Bhavan ( Nagpur ) ; Raj Bhavan ( Pune ) & Raj Bhavan ( Mahabaleshwar )
    Appointer President of India
    Term length Five Years
    Inaugural holder John Colville , PC , GCIE
    Formation 15 August 1947 ; 70 years ago ( 1947 - 08 - 15 )
    \n", + "

    Every player who has won this award and has been eligible for the Naismith Memorial Basketball Hall of Fame has been inducted . Kareem Abdul - Jabbar won the award a record six times . Both Bill Russell and Michael Jordan won the award five times , while Wilt Chamberlain and LeBron James won the award four times . Russell and James are the only players to have won the award four times in five seasons . Moses Malone , Larry Bird and Magic Johnson each won the award three times , while Bob Pettit , Karl Malone , Tim Duncan , Steve Nash and Stephen Curry have each won it twice . Only two rookies have won the award : Wilt Chamberlain in the 1959 -- 60 season and Wes Unseld in the 1968 -- 69 season . Hakeem Olajuwon of Nigeria , Tim Duncan of the U.S. Virgin Islands , Steve Nash of Canada and Dirk Nowitzki of Germany are the only MVP winners considered `` international players '' by the NBA .

    \n", + "

    The Jawaharlal Nehru Centre for Advanced Scientific Research ( JNCASR ) is a multidisciplinary research institute located at Jakkur , Bangalore , India . It was established by the Department of Science and Technology of the Government of India , to mark the birth centenary of Pandit Jawaharlal Nehru .

    \n", + "

    Ajay Tyagi was appointed chairman on 10 January 2017 replacing UK Sinha . And took charge of chairman office on 1 March 2017 . The Board comprises

    \n", + "
    Year Player Country
    2003 Ponting , Ricky Ricky Ponting Australia
    Warne , Shane Shane Warne Australia
    2005 Flintoff , Andrew Andrew Flintoff England
    2006 Muralitharan , Muttiah Muttiah Muralitharan Sri Lanka
    2007 Kallis , Jacques Jacques Kallis South Africa
    2008 Sehwag , Virender Virender Sehwag India
    2009 Sehwag , Virender Virender Sehwag India
    Tendulkar , Sachin Sachin Tendulkar India
    2011 Sangakkara , Kumar Kumar Sangakkara Sri Lanka
    2012 Clarke , Michael Michael Clarke Australia
    2013 Steyn , Dale Dale Steyn South Africa
    2014 Sangakkara , Kumar Kumar Sangakkara Sri Lanka
    2015 Williamson , Kane Kane Williamson New Zealand
    2016 Kohli , Virat Virat Kohli India
    2017 Kohli , Virat Virat Kohli India
    \n", + "

    Mankombu Sambasivan Swaminathan ( born 7 August 1925 ) is an Indian geneticist and international administrator , renowned for his leading role in India 's Green Revolution a program under which high - yield varieties of wheat and rice seedlings were planted in the fields of poor farmers . Swaminathan is known as `` Indian Father of Green Revolution '' for his leadership and success in introducing and further developing high - yielding varieties of wheat in India . He is the founder and chairman of the MS Swaminathan Research Foundation . His stated vision is to rid the world of hunger and poverty . Swaminathan is an advocate of moving India to sustainable development , especially using environmentally sustainable agriculture , sustainable food security and the preservation of biodiversity , which he calls an `` evergreen revolution . ''

    \n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "I'm sorry, the provided context doesn't contain information about any cricketer being honored with the Wisden Leading Cricketer in the World award for 2016. UPDATE CONTEXT if you have any other query.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\n", + "\n", + ">>>>>>>>>>>> Below are outputs of Case 5 <<<<<<<<<<<<\n", + "\n", + "\n", + "doc_ids: [['doc_20', 'doc_2943', 'doc_2059', 'doc_3293', 'doc_4056', 'doc_1914', 'doc_2749', 'doc_1796', 'doc_3468', 'doc_1793', 'doc_876', 'doc_2577', 'doc_27', 'doc_366', 'doc_321', 'doc_3103', 'doc_715', 'doc_3534', 'doc_142', 'doc_5337', 'doc_2426', 'doc_5346', 'doc_3021', 'doc_1596', 'doc_316', 'doc_1103', 'doc_1602', 'doc_1677', 'doc_1670', 'doc_2853']]\n", + "\u001b[32mAdding doc_id doc_20 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2943 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2059 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3293 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_4056 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1914 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2749 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1796 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_3468 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_1793 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_876 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_2577 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_27 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the\n", + "context provided by the user.\n", + "If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.\n", + "You must give as short an answer as possible.\n", + "\n", + "User's question is: who carried the usa flag in opening ceremony\n", + "\n", + "Context is:

    On January 17 , 1899 , under orders from President William McKinley , Commander Edward D. Taussig of USS Bennington landed on Wake and formally took possession of the island for the United States . After a 21 - gun salute , the flag was raised and a brass plate was affixed to the flagstaff with the following inscription :

    \n", + "
  • 1960 Flag with 50 stars ( Hawaii )
  • \n", + "

    The flag of the United States of America , often referred to as the American flag , is the national flag of the United States . It consists of thirteen equal horizontal stripes of red ( top and bottom ) alternating with white , with a blue rectangle in the canton ( referred to specifically as the `` union '' ) bearing fifty small , white , five - pointed stars arranged in nine offset horizontal rows , where rows of six stars ( top and bottom ) alternate with rows of five stars . The 50 stars on the flag represent the 50 states of the United States of America , and the 13 stripes represent the thirteen British colonies that declared independence from the Kingdom of Great Britain , and became the first states in the U.S. Nicknames for the flag include The Stars and Stripes , Old Glory , and The Star - Spangled Banner .

    \n", + "

    The Pledge of Allegiance of the United States is an expression of allegiance to the Flag of the United States and the republic of the United States of America . It was originally composed by Captain George Thatcher Balch , a Union Army Officer during the Civil War and later a teacher of patriotism in New York City schools . The form of the pledge used today was largely devised by Francis Bellamy in 1892 , and formally adopted by Congress as the pledge in 1942 . The official name of The Pledge of Allegiance was adopted in 1945 . The most recent alteration of its wording came on Flag Day in 1954 , when the words `` under God '' were added .

    \n", + "

    In modern times , the U.S. military plays ( or sounds ) `` Reveille '' in the morning , generally near sunrise , though its exact time varies from base to base . On U.S. Army posts and Air Force bases , `` Reveille '' is played by itself or followed by the bugle call `` To the Colors '' at which time the national flag is raised and all U.S. military personnel outdoors are required to come to attention and present a salute in uniform , either to the flag or in the direction of the music if the flag is not visible . While in formation , soldiers are brought to the position of parade rest while `` Reveille '' plays then called to attention and present arms as the national flag is raised . On board U.S. Navy , Marine Corps , and Coast Guard facilities , the flag is generally raised at 0800 ( 8 am ) while `` The Star Spangled Banner '' or the bugle call `` To the Colors '' is played . On some U.S. military bases , `` Reveille '' is accompanied by a cannon shot .

    \n", + "

    When the National Anthem was first recognized by law in 1932 , there was no prescription as to behavior during its playing . On June 22 , 1942 , the law was revised indicating that those in uniform should salute during its playing , while others should simply stand at attention , men removing their hats . ( The same code also required that women should place their hands over their hearts when the flag is displayed during the playing of the Anthem , but not if the flag was not present . ) On December 23 , 1942 the law was again revised instructing men and women to stand at attention and face in the direction of the music when it was played . That revision also directed men and women to place their hands over their hearts only if the flag was displayed . Those in uniform were required to salute . On July 7 , 1976 , the law was simplified . Men and women were instructed to stand with their hands over their hearts , men removing their hats , irrespective of whether or not the flag was displayed and those in uniform saluting . On August 12 , 1998 , the law was rewritten keeping the same instructions , but differentiating between `` those in uniform '' and `` members of the Armed Forces and veterans '' who were both instructed to salute during the playing whether or not the flag was displayed . Because of the changes in law over the years and confusion between instructions for the Pledge of Allegence versus the National Anthem , throughout most of the 20th century many people simply stood at attention or with their hands folded in front of them during the playing of the Anthem , and when reciting the Pledge they would hold their hand ( or hat ) over their heart . After 9 / 11 , the custom of placing the hand over the heart during the playing of the Anthem became nearly universal .

    \n", + "

    A flag designed by John McConnell in 1969 for the first Earth Day is a dark blue field charged with The Blue Marble , a famous NASA photo of the Earth as seen from outer space . The first edition of McConnell 's flag used screen - printing and used different colors : ocean and land were blue and the clouds were white . McConnell presented his flag to the United Nations as a symbol for consideration .

    \n", + "

    The torch - bearing arm was displayed at the Centennial Exposition in Philadelphia in 1876 , and in Madison Square Park in Manhattan from 1876 to 1882 . Fundraising proved difficult , especially for the Americans , and by 1885 work on the pedestal was threatened by lack of funds . Publisher Joseph Pulitzer , of the New York World , started a drive for donations to finish the project and attracted more than 120,000 contributors , most of whom gave less than a dollar . The statue was built in France , shipped overseas in crates , and assembled on the completed pedestal on what was then called Bedloe 's Island . The statue 's completion was marked by New York 's first ticker - tape parade and a dedication ceremony presided over by President Grover Cleveland .

    \n", + "

    The horizontal stripes on the flag represent the nine original departments of Uruguay , based on the U.S flag , where the stripes represent the original 13 colonies . The first flag designed in 1828 had 9 light blue stripes ; this number was reduced to 4 in 1830 due to visibility problems from distance . The Sun of May represents the May Revolution of 1810 ; according to the historian Diego Abad de Santillán , the Sun of May is a figurative sun that represents Inti , the sun god of the Inca religion . It also appears in the Flag of Argentina and the Coat of Arms of Bolivia .

    \n", + "

    The anthem has been recorded and performed in many different languages , usually as a result of the hosting of either form of the Games in various countries . The IOC does n't require that the anthem be performed in either English or Greek . But in the 2008 Olympic opening and closing ceremonies in Beijing , China , Greek was sung instead of the host country 's official language , Mandarin . Also in the 2016 Olympic opening ceremonies in Rio de Janeiro , Brazil , English was also sung instead of host country 's official language , Portuguese .

    \n", + "

    The United States Oath of Allegiance , officially referred to as the `` Oath of Allegiance , '' 8 C.F.R. Part 337 ( 2008 ) , is an allegiance oath that must be taken by all immigrants who wish to become United States citizens .

    \n", + "

    During the first half of the 19th century , seven stars were added to the flag to represent the seven signatories to the Venezuelan declaration of independence , being the provinces of Caracas , Cumaná , Barcelona , Barinas , Margarita , Mérida , and Trujillo .

    \n", + "

    With the annexation of Hawaii in 1898 and the seizure of Guam and the Philippines during the Spanish -- American War that same year , the United States began to consider unclaimed and uninhabited Wake Island , located approximately halfway between Honolulu and Manila , as a good location for a telegraph cable station and coaling station for refueling warships of the rapidly expanding United States Navy and passing merchant and passenger steamships . On July 4 , 1898 , United States Army Brigadier General Francis V. Greene of the 2nd Brigade , Philippine Expeditionary Force , of the Eighth Army Corps , stopped at Wake Island and raised the American flag while en route to the Philippines on the steamship liner SS China .

    \n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "I don't have the answer with the provided context. UPDATE CONTEXT.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[32mUpdating context and resetting conversation.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_366 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the\n", + "context provided by the user.\n", + "If you can't answer the question with or without the current context, you should reply exactly `UPDATE CONTEXT`.\n", + "You must give as short an answer as possible.\n", + "\n", + "User's question is: who carried the usa flag in opening ceremony\n", + "\n", + "Context is: \n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "Erin Hamlin carried the USA flag in the opening ceremony.\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "for i in range(len(questions)):\n", + " print(f\"\\n\\n>>>>>>>>>>>> Below are outputs of Case {i+1} <<<<<<<<<<<<\\n\\n\")\n", + "\n", + " # reset the assistant. Always reset the assistant before starting a new conversation.\n", + " assistant.reset()\n", + "\n", + " qa_problem = questions[i]\n", + " ragproxyagent.initiate_chat(assistant, message=ragproxyagent.message_generator, problem=qa_problem, n_results=30)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, questions were directly selected from the dataset. RetrieveChat was able to answer the questions correctly in the first attempt as the retrieved context contained the necessary information in the first two cases. However, in the last three cases, the context with the highest similarity to the question embedding did not contain the required information to answer the question. As a result, the LLM model responded with `UPDATE CONTEXT`. With the unique and innovative ability to update context in RetrieveChat, the agent automatically updated the context and sent it to the LLM model again. After several rounds of this process, the agent was able to generate the correct answer to the questions." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Example 6\n", + "\n", + "[Back to top](#table-of-contents)\n", + "\n", + "Use RetrieveChat to answer multi-hop questions for [2WikiMultihopQA](https://github.com/Alab-NII/2wikimultihop) dataset with customized prompt and few-shot learning.\n", + "\n", + "First, we will create a new document collection which includes all the contextual corpus. Then, we will choose some questions and utilize RetrieveChat to answer them. For this particular example, we will be using the `gpt-3.5-turbo` model, and we will demonstrate RetrieveChat's feature of automatically updating context in case the documents retrieved do not contain sufficient information. Moreover, we'll demonstrate how to use customized prompt and few-shot learning to address tasks that are not pre-defined in RetrieveChat." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "PROMPT_MULTIHOP = \"\"\"You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the context provided by the user. You must think step-by-step.\n", + "First, please learn the following examples of context and question pairs and their corresponding answers.\n", + "\n", + "Context:\n", + "Kurram Garhi: Kurram Garhi is a small village located near the city of Bannu, which is the part of Khyber Pakhtunkhwa province of Pakistan. Its population is approximately 35000.\n", + "Trojkrsti: Trojkrsti is a village in Municipality of Prilep, Republic of Macedonia.\n", + "Q: Are both Kurram Garhi and Trojkrsti located in the same country?\n", + "A: Kurram Garhi is located in the country of Pakistan. Trojkrsti is located in the country of Republic of Macedonia. Thus, they are not in the same country. So the answer is: no.\n", + "\n", + "\n", + "Context:\n", + "Early Side of Later: Early Side of Later is the third studio album by English singer- songwriter Matt Goss. It was released on 21 June 2004 by Concept Music and reached No. 78 on the UK Albums Chart.\n", + "What's Inside: What's Inside is the fourteenth studio album by British singer- songwriter Joan Armatrading.\n", + "Q: Which album was released earlier, What'S Inside or Cassandra'S Dream (Album)?\n", + "A: What's Inside was released in the year 1995. Cassandra's Dream (album) was released in the year 2008. Thus, of the two, the album to release earlier is What's Inside. So the answer is: What's Inside.\n", + "\n", + "\n", + "Context:\n", + "Maria Alexandrovna (Marie of Hesse): Maria Alexandrovna , born Princess Marie of Hesse and by Rhine (8 August 1824 – 3 June 1880) was Empress of Russia as the first wife of Emperor Alexander II.\n", + "Grand Duke Alexei Alexandrovich of Russia: Grand Duke Alexei Alexandrovich of Russia,(Russian: Алексей Александрович; 14 January 1850 (2 January O.S.) in St. Petersburg – 14 November 1908 in Paris) was the fifth child and the fourth son of Alexander II of Russia and his first wife Maria Alexandrovna (Marie of Hesse).\n", + "Q: What is the cause of death of Grand Duke Alexei Alexandrovich Of Russia's mother?\n", + "A: The mother of Grand Duke Alexei Alexandrovich of Russia is Maria Alexandrovna. Maria Alexandrovna died from tuberculosis. So the answer is: tuberculosis.\n", + "\n", + "\n", + "Context:\n", + "Laughter in Hell: Laughter in Hell is a 1933 American Pre-Code drama film directed by Edward L. Cahn and starring Pat O'Brien. The film's title was typical of the sensationalistic titles of many Pre-Code films.\n", + "Edward L. Cahn: Edward L. Cahn (February 12, 1899 – August 25, 1963) was an American film director.\n", + "Q: When did the director of film Laughter In Hell die?\n", + "A: The film Laughter In Hell was directed by Edward L. Cahn. Edward L. Cahn died on August 25, 1963. So the answer is: August 25, 1963.\n", + "\n", + "Second, please complete the answer by thinking step-by-step.\n", + "\n", + "Context:\n", + "{input_context}\n", + "Q: {input_question}\n", + "A:\n", + "\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "# create the RetrieveUserProxyAgent instance named \"ragproxyagent\"\n", + "corpus_file = \"https://huggingface.co/datasets/thinkall/2WikiMultihopQA/resolve/main/corpus.txt\"\n", + "\n", + "# Create a new collection for NaturalQuestions dataset\n", + "ragproxyagent = RetrieveUserProxyAgent(\n", + " name=\"ragproxyagent\",\n", + " human_input_mode=\"NEVER\",\n", + " max_consecutive_auto_reply=3,\n", + " retrieve_config={\n", + " \"task\": \"qa\",\n", + " \"docs_path\": corpus_file,\n", + " \"chunk_token_size\": 2000,\n", + " \"model\": config_list[0][\"model\"],\n", + " \"client\": chromadb.PersistentClient(path=\"/tmp/chromadb\"),\n", + " \"collection_name\": \"2wikimultihopqa\",\n", + " \"chunk_mode\": \"one_line\",\n", + " \"embedding_model\": \"all-MiniLM-L6-v2\",\n", + " \"customized_prompt\": PROMPT_MULTIHOP,\n", + " \"customized_answer_prefix\": \"the answer is\",\n", + " },\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['Which film came out first, Blind Shaft or The Mask Of Fu Manchu?', 'Are North Marion High School (Oregon) and Seoul High School both located in the same country?']\n", + "[['The Mask Of Fu Manchu'], ['no']]\n" + ] + } + ], + "source": [ + "# queries_file = \"https://huggingface.co/datasets/thinkall/2WikiMultihopQA/resolve/main/queries.jsonl\"\n", + "queries = \"\"\"{\"_id\": \"61a46987092f11ebbdaeac1f6bf848b6\", \"text\": \"Which film came out first, Blind Shaft or The Mask Of Fu Manchu?\", \"metadata\": {\"answer\": [\"The Mask Of Fu Manchu\"]}}\n", + "{\"_id\": \"a7b9672009c311ebbdb0ac1f6bf848b6\", \"text\": \"Are North Marion High School (Oregon) and Seoul High School both located in the same country?\", \"metadata\": {\"answer\": [\"no\"]}}\n", + "\"\"\"\n", + "queries = [json.loads(line) for line in queries.split(\"\\n\") if line]\n", + "questions = [q[\"text\"] for q in queries]\n", + "answers = [q[\"metadata\"][\"answer\"] for q in queries]\n", + "print(questions)\n", + "print(answers)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + ">>>>>>>>>>>> Below are outputs of Case 1 <<<<<<<<<<<<\n", + "\n", + "\n", + "Trying to create collection.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\tClyde Thompson: Clyde Thompson( 1910 – July 1, 1979) was an American prisoner turned chaplain. He is ...\n", + "max_tokens is too small to fit a single line of text. Breaking this line:\n", + "\tAustralian Historical Monographs: The Australian Historical Monographs are a series of Historical st ...\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "doc_ids: [['doc_12', 'doc_11', 'doc_16', 'doc_19', 'doc_13116', 'doc_14', 'doc_13', 'doc_18', 'doc_977', 'doc_10']]\n", + "\u001b[32mAdding doc_id doc_12 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_11 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_16 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_19 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_13116 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_14 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_13 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_18 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_977 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_10 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the context provided by the user. You must think step-by-step.\n", + "First, please learn the following examples of context and question pairs and their corresponding answers.\n", + "\n", + "Context:\n", + "Kurram Garhi: Kurram Garhi is a small village located near the city of Bannu, which is the part of Khyber Pakhtunkhwa province of Pakistan. Its population is approximately 35000.\n", + "Trojkrsti: Trojkrsti is a village in Municipality of Prilep, Republic of Macedonia.\n", + "Q: Are both Kurram Garhi and Trojkrsti located in the same country?\n", + "A: Kurram Garhi is located in the country of Pakistan. Trojkrsti is located in the country of Republic of Macedonia. Thus, they are not in the same country. So the answer is: no.\n", + "\n", + "\n", + "Context:\n", + "Early Side of Later: Early Side of Later is the third studio album by English singer- songwriter Matt Goss. It was released on 21 June 2004 by Concept Music and reached No. 78 on the UK Albums Chart.\n", + "What's Inside: What's Inside is the fourteenth studio album by British singer- songwriter Joan Armatrading.\n", + "Q: Which album was released earlier, What'S Inside or Cassandra'S Dream (Album)?\n", + "A: What's Inside was released in the year 1995. Cassandra's Dream (album) was released in the year 2008. Thus, of the two, the album to release earlier is What's Inside. So the answer is: What's Inside.\n", + "\n", + "\n", + "Context:\n", + "Maria Alexandrovna (Marie of Hesse): Maria Alexandrovna , born Princess Marie of Hesse and by Rhine (8 August 1824 – 3 June 1880) was Empress of Russia as the first wife of Emperor Alexander II.\n", + "Grand Duke Alexei Alexandrovich of Russia: Grand Duke Alexei Alexandrovich of Russia,(Russian: Алексей Александрович; 14 January 1850 (2 January O.S.) in St. Petersburg – 14 November 1908 in Paris) was the fifth child and the fourth son of Alexander II of Russia and his first wife Maria Alexandrovna (Marie of Hesse).\n", + "Q: What is the cause of death of Grand Duke Alexei Alexandrovich Of Russia's mother?\n", + "A: The mother of Grand Duke Alexei Alexandrovich of Russia is Maria Alexandrovna. Maria Alexandrovna died from tuberculosis. So the answer is: tuberculosis.\n", + "\n", + "\n", + "Context:\n", + "Laughter in Hell: Laughter in Hell is a 1933 American Pre-Code drama film directed by Edward L. Cahn and starring Pat O'Brien. The film's title was typical of the sensationalistic titles of many Pre-Code films.\n", + "Edward L. Cahn: Edward L. Cahn (February 12, 1899 – August 25, 1963) was an American film director.\n", + "Q: When did the director of film Laughter In Hell die?\n", + "A: The film Laughter In Hell was directed by Edward L. Cahn. Edward L. Cahn died on August 25, 1963. So the answer is: August 25, 1963.\n", + "\n", + "Second, please complete the answer by thinking step-by-step.\n", + "\n", + "Context:\n", + "The Mask of Fu Manchu: The Mask of Fu Manchu is a 1932 pre-Code adventure film directed by Charles Brabin. It was written by Irene Kuhn, Edgar Allan Woolf and John Willard based on the 1932 novel of the same name by Sax Rohmer. Starring Boris Karloff as Fu Manchu, and featuring Myrna Loy as his depraved daughter, the movie revolves around Fu Manchu's quest for the golden sword and mask of Genghis Khan. Lewis Stone plays his nemesis. Dr. Petrie is absent from this film.\n", + "The Mysterious Dr. Fu Manchu: The Mysterious Dr. Fu Manchu is a 1929 American pre-Code drama film directed by Rowland V. Lee and starring Warner Oland as Dr. Fu Manchu. It was the first Fu Manchu film of the talkie era. Since this was during the transition period to sound, a silent version was also released in the United States.\n", + "The Face of Fu Manchu: The Face of Fu Manchu is a 1965 thriller film directed by Don Sharp and based on the characters created by Sax Rohmer. It stars Christopher Lee as the eponymous villain, a Chinese criminal mastermind, and Nigel Green as his pursuing rival Nayland Smith, a Scotland Yard detective. The film was a British- West German co-production, and was the first in a five- part series starring Lee and produced by Harry Alan Towers for Constantin Film, the second of which was\" The Brides of Fu Manchu\" released the next year, with the final entry being\" The Castle of Fu Manchu\" in 1969. It was shot in Technicolor and Techniscope, on- location in County Dublin, Ireland.\n", + "The Return of Dr. Fu Manchu: The Return of Dr. Fu Manchu is a 1930 American pre-Code film directed by Rowland V. Lee. It is the second of three films starring Warner Oland as the fiendish Fu Manchu, who returns from apparent death in the previous film,\" The Mysterious Dr. Fu Manchu\"( 1929), to seek revenge on those he holds responsible for the death of his wife and child.\n", + "The Vengeance of Fu Manchu: The Vengeance of Fu Manchu is a 1967 British film directed by Jeremy Summers and starring Christopher Lee, Horst Frank, Douglas Wilmer and Tsai Chin. It was the third British/ West German Constantin Film co-production of the Dr. Fu Manchu series and the first to be filmed in Hong Kong. It was generally released in the U.K. through Warner- Pathé( as a support feature to the Lindsay Shonteff film\" The Million Eyes of Sumuru\") on 3 December 1967.\n", + "The Brides of Fu Manchu: The Brides of Fu Manchu is a 1966 British/ West German Constantin Film co-production adventure crime film based on the fictional Chinese villain Dr. Fu Manchu, created by Sax Rohmer. It was the second film in a series, and was preceded by\" The Face of Fu ManchuThe Vengeance of Fu Manchu\" followed in 1967,\" The Blood of Fu Manchu\" in 1968, and\" The Castle of Fu Manchu\" in 1969. It was produced by Harry Alan Towers for Hallam Productions. Like the first film, it was directed by Don Sharp, and starred Christopher Lee as Fu Manchu. Nigel Green was replaced by Douglas Wilmer as Scotland Yard detective Nayland Smith. The action takes place mainly in London, where much of the location filming took place.\n", + "The Castle of Fu Manchu: The Castle of Fu Manchu( also known as The Torture Chamber of Dr. Fu Manchu and also known by its German title Die Folterkammer des Dr. Fu Man Chu) is a 1969 film and the fifth and final Dr. Fu Manchu film with Christopher Lee portraying the title character.\n", + "The Blood of Fu Manchu: The Blood of Fu Manchu, also known as Fu Manchu and the Kiss of Death, Kiss of Death, Kiss and Kill( U.S. title) and Against All Odds( original U.S. video title), is a 1968 British adventure crime film directed by Jesús Franco, based on the fictional Asian villain Dr. Fu Manchu created by Sax Rohmer. It was the fourth film in a series, and was preceded by\" The Vengeance of Fu Manchu The Castle of Fu Manchu\" followed in 1969. It was produced by Harry Alan Towers for Udastex Films. It starred Christopher Lee as Dr. Fu Manchu, Richard Greene as Scotland Yard detective Nayland Smith, and Howard Marion- Crawford as Dr. Petrie. The movie was filmed in Spain and Brazil. Shirley Eaton appears in a scene that she claimed she was never paid for; apparently, the director Jesús Franco had inserted some stock footage of her from one of her films(\" The Girl from Rio\"( 1968)) into the film without telling her. She only found out years later that she had been in a Fu Manchu film.\n", + "Don Sharp: Donald Herman Sharp( 19 April 192114 December 2011) was an Australian- born British film director. His best known films were made for Hammer in the 1960s, and included\" The Kiss of the Vampire\"( 1962) and\" Rasputin, the Mad Monk\"( 1966). In 1965 he directed\" The Face of Fu Manchu\", based on the character created by Sax Rohmer, and starring Christopher Lee. Sharp also directed the sequel\" The Brides of Fu Manchu\"( 1966). In the 1980s he was also responsible for several hugely popular miniseries adapted from the novels of Barbara Taylor Bradford.\n", + "Blind Shaft: Blind Shaft is a 2003 film about a pair of brutal con artists operating in the illegal coal mines of present- day northern China. The film was written and directed by Li Yang( 李杨), and is based on Chinese writer Liu Qingbang's short novel\" Shen MuSacred Wood\").\n", + "\n", + "Q: Which film came out first, Blind Shaft or The Mask Of Fu Manchu?\n", + "A:\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[32mAdding doc_id doc_11 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_16 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_19 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_13116 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_14 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_13 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_18 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_977 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_10 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the context provided by the user. You must think step-by-step.\n", + "First, please learn the following examples of context and question pairs and their corresponding answers.\n", + "\n", + "Context:\n", + "Kurram Garhi: Kurram Garhi is a small village located near the city of Bannu, which is the part of Khyber Pakhtunkhwa province of Pakistan. Its population is approximately 35000.\n", + "Trojkrsti: Trojkrsti is a village in Municipality of Prilep, Republic of Macedonia.\n", + "Q: Are both Kurram Garhi and Trojkrsti located in the same country?\n", + "A: Kurram Garhi is located in the country of Pakistan. Trojkrsti is located in the country of Republic of Macedonia. Thus, they are not in the same country. So the answer is: no.\n", + "\n", + "\n", + "Context:\n", + "Early Side of Later: Early Side of Later is the third studio album by English singer- songwriter Matt Goss. It was released on 21 June 2004 by Concept Music and reached No. 78 on the UK Albums Chart.\n", + "What's Inside: What's Inside is the fourteenth studio album by British singer- songwriter Joan Armatrading.\n", + "Q: Which album was released earlier, What'S Inside or Cassandra'S Dream (Album)?\n", + "A: What's Inside was released in the year 1995. Cassandra's Dream (album) was released in the year 2008. Thus, of the two, the album to release earlier is What's Inside. So the answer is: What's Inside.\n", + "\n", + "\n", + "Context:\n", + "Maria Alexandrovna (Marie of Hesse): Maria Alexandrovna , born Princess Marie of Hesse and by Rhine (8 August 1824 – 3 June 1880) was Empress of Russia as the first wife of Emperor Alexander II.\n", + "Grand Duke Alexei Alexandrovich of Russia: Grand Duke Alexei Alexandrovich of Russia,(Russian: Алексей Александрович; 14 January 1850 (2 January O.S.) in St. Petersburg – 14 November 1908 in Paris) was the fifth child and the fourth son of Alexander II of Russia and his first wife Maria Alexandrovna (Marie of Hesse).\n", + "Q: What is the cause of death of Grand Duke Alexei Alexandrovich Of Russia's mother?\n", + "A: The mother of Grand Duke Alexei Alexandrovich of Russia is Maria Alexandrovna. Maria Alexandrovna died from tuberculosis. So the answer is: tuberculosis.\n", + "\n", + "\n", + "Context:\n", + "Laughter in Hell: Laughter in Hell is a 1933 American Pre-Code drama film directed by Edward L. Cahn and starring Pat O'Brien. The film's title was typical of the sensationalistic titles of many Pre-Code films.\n", + "Edward L. Cahn: Edward L. Cahn (February 12, 1899 – August 25, 1963) was an American film director.\n", + "Q: When did the director of film Laughter In Hell die?\n", + "A: The film Laughter In Hell was directed by Edward L. Cahn. Edward L. Cahn died on August 25, 1963. So the answer is: August 25, 1963.\n", + "\n", + "Second, please complete the answer by thinking step-by-step.\n", + "\n", + "Context:\n", + "The Mask of Fu Manchu: The Mask of Fu Manchu is a 1932 pre-Code adventure film directed by Charles Brabin. It was written by Irene Kuhn, Edgar Allan Woolf and John Willard based on the 1932 novel of the same name by Sax Rohmer. Starring Boris Karloff as Fu Manchu, and featuring Myrna Loy as his depraved daughter, the movie revolves around Fu Manchu's quest for the golden sword and mask of Genghis Khan. Lewis Stone plays his nemesis. Dr. Petrie is absent from this film.\n", + "The Mysterious Dr. Fu Manchu: The Mysterious Dr. Fu Manchu is a 1929 American pre-Code drama film directed by Rowland V. Lee and starring Warner Oland as Dr. Fu Manchu. It was the first Fu Manchu film of the talkie era. Since this was during the transition period to sound, a silent version was also released in the United States.\n", + "The Face of Fu Manchu: The Face of Fu Manchu is a 1965 thriller film directed by Don Sharp and based on the characters created by Sax Rohmer. It stars Christopher Lee as the eponymous villain, a Chinese criminal mastermind, and Nigel Green as his pursuing rival Nayland Smith, a Scotland Yard detective. The film was a British- West German co-production, and was the first in a five- part series starring Lee and produced by Harry Alan Towers for Constantin Film, the second of which was\" The Brides of Fu Manchu\" released the next year, with the final entry being\" The Castle of Fu Manchu\" in 1969. It was shot in Technicolor and Techniscope, on- location in County Dublin, Ireland.\n", + "The Return of Dr. Fu Manchu: The Return of Dr. Fu Manchu is a 1930 American pre-Code film directed by Rowland V. Lee. It is the second of three films starring Warner Oland as the fiendish Fu Manchu, who returns from apparent death in the previous film,\" The Mysterious Dr. Fu Manchu\"( 1929), to seek revenge on those he holds responsible for the death of his wife and child.\n", + "The Vengeance of Fu Manchu: The Vengeance of Fu Manchu is a 1967 British film directed by Jeremy Summers and starring Christopher Lee, Horst Frank, Douglas Wilmer and Tsai Chin. It was the third British/ West German Constantin Film co-production of the Dr. Fu Manchu series and the first to be filmed in Hong Kong. It was generally released in the U.K. through Warner- Pathé( as a support feature to the Lindsay Shonteff film\" The Million Eyes of Sumuru\") on 3 December 1967.\n", + "The Brides of Fu Manchu: The Brides of Fu Manchu is a 1966 British/ West German Constantin Film co-production adventure crime film based on the fictional Chinese villain Dr. Fu Manchu, created by Sax Rohmer. It was the second film in a series, and was preceded by\" The Face of Fu ManchuThe Vengeance of Fu Manchu\" followed in 1967,\" The Blood of Fu Manchu\" in 1968, and\" The Castle of Fu Manchu\" in 1969. It was produced by Harry Alan Towers for Hallam Productions. Like the first film, it was directed by Don Sharp, and starred Christopher Lee as Fu Manchu. Nigel Green was replaced by Douglas Wilmer as Scotland Yard detective Nayland Smith. The action takes place mainly in London, where much of the location filming took place.\n", + "The Castle of Fu Manchu: The Castle of Fu Manchu( also known as The Torture Chamber of Dr. Fu Manchu and also known by its German title Die Folterkammer des Dr. Fu Man Chu) is a 1969 film and the fifth and final Dr. Fu Manchu film with Christopher Lee portraying the title character.\n", + "The Blood of Fu Manchu: The Blood of Fu Manchu, also known as Fu Manchu and the Kiss of Death, Kiss of Death, Kiss and Kill( U.S. title) and Against All Odds( original U.S. video title), is a 1968 British adventure crime film directed by Jesús Franco, based on the fictional Asian villain Dr. Fu Manchu created by Sax Rohmer. It was the fourth film in a series, and was preceded by\" The Vengeance of Fu Manchu The Castle of Fu Manchu\" followed in 1969. It was produced by Harry Alan Towers for Udastex Films. It starred Christopher Lee as Dr. Fu Manchu, Richard Greene as Scotland Yard detective Nayland Smith, and Howard Marion- Crawford as Dr. Petrie. The movie was filmed in Spain and Brazil. Shirley Eaton appears in a scene that she claimed she was never paid for; apparently, the director Jesús Franco had inserted some stock footage of her from one of her films(\" The Girl from Rio\"( 1968)) into the film without telling her. She only found out years later that she had been in a Fu Manchu film.\n", + "Don Sharp: Donald Herman Sharp( 19 April 192114 December 2011) was an Australian- born British film director. His best known films were made for Hammer in the 1960s, and included\" The Kiss of the Vampire\"( 1962) and\" Rasputin, the Mad Monk\"( 1966). In 1965 he directed\" The Face of Fu Manchu\", based on the character created by Sax Rohmer, and starring Christopher Lee. Sharp also directed the sequel\" The Brides of Fu Manchu\"( 1966). In the 1980s he was also responsible for several hugely popular miniseries adapted from the novels of Barbara Taylor Bradford.\n", + "Blind Shaft: Blind Shaft is a 2003 film about a pair of brutal con artists operating in the illegal coal mines of present- day northern China. The film was written and directed by Li Yang( 李杨), and is based on Chinese writer Liu Qingbang's short novel\" Shen MuSacred Wood\").\n", + "\n", + "Q: Which film came out first, Blind Shaft or The Mask Of Fu Manchu?\n", + "A:\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "Blind Shaft is a 2003 film while The Mask of Fu Manchu is a 1932 pre-Code adventure film. Thus, The Mask of Fu Manchu came out earlier than Blind Shaft. So the answer is: The Mask of Fu Manchu.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\n", + "\n", + ">>>>>>>>>>>> Below are outputs of Case 2 <<<<<<<<<<<<\n", + "\n", + "\n", + "doc_ids: [['doc_74', 'doc_76', 'doc_68', 'doc_42890', 'doc_75', 'doc_19596', 'doc_45135', 'doc_995', 'doc_7274', 'doc_23187']]\n", + "\u001b[32mAdding doc_id doc_74 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_76 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_68 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_42890 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_75 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_19596 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_45135 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_995 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_7274 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_23187 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the context provided by the user. You must think step-by-step.\n", + "First, please learn the following examples of context and question pairs and their corresponding answers.\n", + "\n", + "Context:\n", + "Kurram Garhi: Kurram Garhi is a small village located near the city of Bannu, which is the part of Khyber Pakhtunkhwa province of Pakistan. Its population is approximately 35000.\n", + "Trojkrsti: Trojkrsti is a village in Municipality of Prilep, Republic of Macedonia.\n", + "Q: Are both Kurram Garhi and Trojkrsti located in the same country?\n", + "A: Kurram Garhi is located in the country of Pakistan. Trojkrsti is located in the country of Republic of Macedonia. Thus, they are not in the same country. So the answer is: no.\n", + "\n", + "\n", + "Context:\n", + "Early Side of Later: Early Side of Later is the third studio album by English singer- songwriter Matt Goss. It was released on 21 June 2004 by Concept Music and reached No. 78 on the UK Albums Chart.\n", + "What's Inside: What's Inside is the fourteenth studio album by British singer- songwriter Joan Armatrading.\n", + "Q: Which album was released earlier, What'S Inside or Cassandra'S Dream (Album)?\n", + "A: What's Inside was released in the year 1995. Cassandra's Dream (album) was released in the year 2008. Thus, of the two, the album to release earlier is What's Inside. So the answer is: What's Inside.\n", + "\n", + "\n", + "Context:\n", + "Maria Alexandrovna (Marie of Hesse): Maria Alexandrovna , born Princess Marie of Hesse and by Rhine (8 August 1824 – 3 June 1880) was Empress of Russia as the first wife of Emperor Alexander II.\n", + "Grand Duke Alexei Alexandrovich of Russia: Grand Duke Alexei Alexandrovich of Russia,(Russian: Алексей Александрович; 14 January 1850 (2 January O.S.) in St. Petersburg – 14 November 1908 in Paris) was the fifth child and the fourth son of Alexander II of Russia and his first wife Maria Alexandrovna (Marie of Hesse).\n", + "Q: What is the cause of death of Grand Duke Alexei Alexandrovich Of Russia's mother?\n", + "A: The mother of Grand Duke Alexei Alexandrovich of Russia is Maria Alexandrovna. Maria Alexandrovna died from tuberculosis. So the answer is: tuberculosis.\n", + "\n", + "\n", + "Context:\n", + "Laughter in Hell: Laughter in Hell is a 1933 American Pre-Code drama film directed by Edward L. Cahn and starring Pat O'Brien. The film's title was typical of the sensationalistic titles of many Pre-Code films.\n", + "Edward L. Cahn: Edward L. Cahn (February 12, 1899 – August 25, 1963) was an American film director.\n", + "Q: When did the director of film Laughter In Hell die?\n", + "A: The film Laughter In Hell was directed by Edward L. Cahn. Edward L. Cahn died on August 25, 1963. So the answer is: August 25, 1963.\n", + "\n", + "Second, please complete the answer by thinking step-by-step.\n", + "\n", + "Context:\n", + "Seoul High School: Seoul High School( Hangul: 서울고등학교) is a public high school located in the heart of Seoul, South Korea.\n", + "North Marion High School (Oregon): North Marion High School is a public high school in Aurora, Oregon, United States. The school is part of the North Marion School District with all four schools being located on the same campus. The school draws students from the cities of Aurora, Hubbard, and Donald as well as the communities of Broadacres and Butteville.\n", + "Marion High School (Kansas): Marion High School is a public high school in Marion, Kansas, USA. It is one of three schools operated by Marion USD 408, and is the sole high school in the district.\n", + "Northwest High School: Northwest High School or North West High School may refer to:\n", + "Marion High School (Indiana): Marion High School is a high school in Marion, Indiana with more than 1,000 students.\n", + "Macon County High School: Macon County High School is located in Montezuma, Georgia, United States, which is a part of Macon County. Enrollment as of the 2017- 2018 school year is 491.\n", + "Canyon High School (Ogden, Utah): Canyon High School was a high school in Ogden, Utah.\n", + "Northside High School: Northside High School or North Side High School or Northside Christian School or similar can refer to:\n", + "Springs Boys' High School: Springs Boys' High School is a high school in Springs, Gauteng, South Africa.\n", + "International School of Koje: International School of Koje( ISK) is a privately funded international school located in Geoje, South Korea.\n", + "\n", + "Q: Are North Marion High School (Oregon) and Seoul High School both located in the same country?\n", + "A:\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "No, North Marion High School (Oregon) is located in the United States, specifically in the state of Oregon, while Seoul High School is located in South Korea. So they are not in the same country.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[32mUpdating context and resetting conversation.\u001b[0m\n", + "doc_ids: [['doc_76', 'doc_68', 'doc_74', 'doc_75', 'doc_19596', 'doc_42890', 'doc_24819', 'doc_69', 'doc_995', 'doc_7274']]\n", + "\u001b[32mAdding doc_id doc_24819 to context.\u001b[0m\n", + "\u001b[32mAdding doc_id doc_69 to context.\u001b[0m\n", + "\u001b[33mragproxyagent\u001b[0m (to assistant):\n", + "\n", + "You're a retrieve augmented chatbot. You answer user's questions based on your own knowledge and the context provided by the user. You must think step-by-step.\n", + "First, please learn the following examples of context and question pairs and their corresponding answers.\n", + "\n", + "Context:\n", + "Kurram Garhi: Kurram Garhi is a small village located near the city of Bannu, which is the part of Khyber Pakhtunkhwa province of Pakistan. Its population is approximately 35000.\n", + "Trojkrsti: Trojkrsti is a village in Municipality of Prilep, Republic of Macedonia.\n", + "Q: Are both Kurram Garhi and Trojkrsti located in the same country?\n", + "A: Kurram Garhi is located in the country of Pakistan. Trojkrsti is located in the country of Republic of Macedonia. Thus, they are not in the same country. So the answer is: no.\n", + "\n", + "\n", + "Context:\n", + "Early Side of Later: Early Side of Later is the third studio album by English singer- songwriter Matt Goss. It was released on 21 June 2004 by Concept Music and reached No. 78 on the UK Albums Chart.\n", + "What's Inside: What's Inside is the fourteenth studio album by British singer- songwriter Joan Armatrading.\n", + "Q: Which album was released earlier, What'S Inside or Cassandra'S Dream (Album)?\n", + "A: What's Inside was released in the year 1995. Cassandra's Dream (album) was released in the year 2008. Thus, of the two, the album to release earlier is What's Inside. So the answer is: What's Inside.\n", + "\n", + "\n", + "Context:\n", + "Maria Alexandrovna (Marie of Hesse): Maria Alexandrovna , born Princess Marie of Hesse and by Rhine (8 August 1824 – 3 June 1880) was Empress of Russia as the first wife of Emperor Alexander II.\n", + "Grand Duke Alexei Alexandrovich of Russia: Grand Duke Alexei Alexandrovich of Russia,(Russian: Алексей Александрович; 14 January 1850 (2 January O.S.) in St. Petersburg – 14 November 1908 in Paris) was the fifth child and the fourth son of Alexander II of Russia and his first wife Maria Alexandrovna (Marie of Hesse).\n", + "Q: What is the cause of death of Grand Duke Alexei Alexandrovich Of Russia's mother?\n", + "A: The mother of Grand Duke Alexei Alexandrovich of Russia is Maria Alexandrovna. Maria Alexandrovna died from tuberculosis. So the answer is: tuberculosis.\n", + "\n", + "\n", + "Context:\n", + "Laughter in Hell: Laughter in Hell is a 1933 American Pre-Code drama film directed by Edward L. Cahn and starring Pat O'Brien. The film's title was typical of the sensationalistic titles of many Pre-Code films.\n", + "Edward L. Cahn: Edward L. Cahn (February 12, 1899 – August 25, 1963) was an American film director.\n", + "Q: When did the director of film Laughter In Hell die?\n", + "A: The film Laughter In Hell was directed by Edward L. Cahn. Edward L. Cahn died on August 25, 1963. So the answer is: August 25, 1963.\n", + "\n", + "Second, please complete the answer by thinking step-by-step.\n", + "\n", + "Context:\n", + "Seoul High School: Seoul High School( Hangul: 서울고등학교) is a public high school located in the heart of Seoul, South Korea.\n", + "North Marion High School (Oregon): North Marion High School is a public high school in Aurora, Oregon, United States. The school is part of the North Marion School District with all four schools being located on the same campus. The school draws students from the cities of Aurora, Hubbard, and Donald as well as the communities of Broadacres and Butteville.\n", + "Marion High School (Kansas): Marion High School is a public high school in Marion, Kansas, USA. It is one of three schools operated by Marion USD 408, and is the sole high school in the district.\n", + "Northwest High School: Northwest High School or North West High School may refer to:\n", + "Marion High School (Indiana): Marion High School is a high school in Marion, Indiana with more than 1,000 students.\n", + "Macon County High School: Macon County High School is located in Montezuma, Georgia, United States, which is a part of Macon County. Enrollment as of the 2017- 2018 school year is 491.\n", + "Canyon High School (Ogden, Utah): Canyon High School was a high school in Ogden, Utah.\n", + "Northside High School: Northside High School or North Side High School or Northside Christian School or similar can refer to:\n", + "Springs Boys' High School: Springs Boys' High School is a high school in Springs, Gauteng, South Africa.\n", + "International School of Koje: International School of Koje( ISK) is a privately funded international school located in Geoje, South Korea.\n", + "Anderson High School (Anderson, Indiana): Anderson High School is a public high school located in Anderson, Indiana.\n", + "North Marion High School (West Virginia): North Marion High School is a public Double A (\"AA\") high school in the U.S. state of West Virginia, with a current enrollment of 851 students. North Marion High School is located approximately 4 miles from Farmington, West Virginia on US Route 250 north. While it is closer to the city of Mannington, West Virginia, and is often considered to be located in Rachel, West Virginia, the school mailing address is Farmington. Rachel is a small coal mining community located adjacent to the school, and is an unincorporated municipality. North Marion High School is represented as \"Grantville High School\" in the popular alternative history novel \"1632\" by writer Eric Flint. The novel is set in the fictional town of Grantville, which is based on the real town and surroundings of Mannington.\n", + "Q: Are North Marion High School (Oregon) and Seoul High School both located in the same country?\n", + "A:\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33massistant\u001b[0m (to ragproxyagent):\n", + "\n", + "North Marion High School (Oregon) is located in the country of United States. Seoul High School is located in the country of South Korea. Thus, they are not in the same country. So the answer is: no.\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "for i in range(len(questions)):\n", + " print(f\"\\n\\n>>>>>>>>>>>> Below are outputs of Case {i+1} <<<<<<<<<<<<\\n\\n\")\n", + "\n", + " # reset the assistant. Always reset the assistant before starting a new conversation.\n", + " assistant.reset()\n", + "\n", + " qa_problem = questions[i]\n", + " ragproxyagent.initiate_chat(assistant, message=ragproxyagent.message_generator, problem=qa_problem, n_results=10)" + ] + } + ], + "metadata": { + "front_matter": { + "description": "Explore the use of AutoGen's RetrieveChat for tasks like code generation from docstrings, answering complex questions with human feedback, and exploiting features like Update Context, custom prompts, and few-shot learning.", + "tags": [ + "RAG" + ] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + }, + "skip_test": "Requires interactive usage" + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebook/agentchat_qdrant_RetrieveChat.ipynb b/notebook/agentchat_qdrant_RetrieveChat.ipynb index 43272e2793e6..10d6b55e11c6 100644 --- a/notebook/agentchat_qdrant_RetrieveChat.ipynb +++ b/notebook/agentchat_qdrant_RetrieveChat.ipynb @@ -21,7 +21,7 @@ "Some extra dependencies are needed for this notebook, which can be installed via pip:\n", "\n", "```bash\n", - "pip install \"pyautogen[retrievechat]>=0.2.3\" \"flaml[automl]\" \"qdrant_client[fastembed]\"\n", + "pip install \"pyautogen[retrievechat-qdrant]\" \"flaml[automl]\"\n", "```\n", "\n", "For more information, please refer to the [installation guide](/docs/installation/).\n", @@ -196,7 +196,7 @@ } ], "source": [ - "%pip install \"pyautogen[retrievechat]>=0.2.3\" \"flaml[automl]\" \"qdrant_client[fastembed]\"" + "%pip install \"pyautogen[retrievechat-qdrant]\" \"flaml[automl]\"" ] }, { diff --git a/setup.py b/setup.py index f8d0067d959b..cea36b7052d4 100644 --- a/setup.py +++ b/setup.py @@ -60,6 +60,23 @@ "blendsearch": ["flaml[blendsearch]"], "mathchat": ["sympy", "pydantic==1.10.9", "wolframalpha"], "retrievechat": ["chromadb", "sentence_transformers", "pypdf", "ipython", "beautifulsoup4", "markdownify"], + "retrievechat-pgvector": [ + "pgvector>=0.2.5", + "psycopg>=3.1.18", + "sentence_transformers", + "pypdf", + "ipython", + "beautifulsoup4", + "markdownify", + ], + "retrievechat-qdrant": [ + "qdrant_client[fastembed]", + "sentence_transformers", + "pypdf", + "ipython", + "beautifulsoup4", + "markdownify", + ], "autobuild": ["chromadb", "sentence-transformers", "huggingface-hub"], "teachable": ["chromadb"], "lmm": ["replicate", "pillow"], diff --git a/test/agentchat/contrib/test_pgvector_retrievechat.py b/test/agentchat/contrib/test_pgvector_retrievechat.py new file mode 100644 index 000000000000..f4a1247ac652 --- /dev/null +++ b/test/agentchat/contrib/test_pgvector_retrievechat.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python3 -m pytest + +import os +import sys + +import pytest +from sentence_transformers import SentenceTransformer + +from autogen import config_list_from_json +from autogen.agentchat.contrib.retrieve_assistant_agent import RetrieveAssistantAgent + +sys.path.append(os.path.join(os.path.dirname(__file__), "../..")) +from conftest import skip_openai # noqa: E402 + +sys.path.append(os.path.join(os.path.dirname(__file__), "..")) +from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST # noqa: E402 + +try: + import pgvector + + from autogen.agentchat.contrib.retrieve_assistant_agent import ( + RetrieveAssistantAgent, + ) + from autogen.agentchat.contrib.retrieve_user_proxy_agent import ( + RetrieveUserProxyAgent, + ) +except ImportError: + skip = True +else: + skip = False or skip_openai + + +test_dir = os.path.join(os.path.dirname(__file__), "../..", "test_files") + + +@pytest.mark.skipif( + skip, + reason="dependency is not installed OR requested to skip", +) +def test_retrievechat(): + conversations = {} + # ChatCompletion.start_logging(conversations) # deprecated in v0.2 + + config_list = config_list_from_json( + OAI_CONFIG_LIST, + file_location=KEY_LOC, + ) + + assistant = RetrieveAssistantAgent( + name="assistant", + system_message="You are a helpful assistant.", + llm_config={ + "timeout": 600, + "seed": 42, + "config_list": config_list, + }, + ) + + sentence_transformer_ef = SentenceTransformer("all-MiniLM-L6-v2") + ragproxyagent = RetrieveUserProxyAgent( + name="ragproxyagent", + human_input_mode="NEVER", + max_consecutive_auto_reply=3, + retrieve_config={ + "task": "code", + "docs_path": [ + "https://raw.githubusercontent.com/microsoft/FLAML/main/website/docs/Examples/Integrate%20-%20Spark.md", + "https://raw.githubusercontent.com/microsoft/FLAML/main/website/docs/Research.md", + "https://raw.githubusercontent.com/Knuckles-Team/geniusbot/main/README.md", + "https://raw.githubusercontent.com/Knuckles-Team/repository-manager/main/README.md", + "https://raw.githubusercontent.com/Knuckles-Team/gitlab-api/main/README.md", + "https://raw.githubusercontent.com/Knuckles-Team/media-downloader/main/README.md", + os.path.join(os.path.abspath(""), "..", "website", "docs"), + ], + "custom_text_types": ["non-existent-type"], + "chunk_token_size": 2000, + "model": config_list[0]["model"], + "vector_db": "pgvector", # PGVector database + "collection_name": "test_collection", + "db_config": { + "connection_string": "postgresql://postgres:postgres@localhost:5432/postgres", + }, + "embedding_function": sentence_transformer_ef, + "get_or_create": True, # set to False if you don't want to reuse an existing collection + "overwrite": False, # set to True if you want to overwrite an existing collection + }, + code_execution_config=False, # set to False if you don't want to execute the code + ) + + assistant.reset() + + code_problem = "How can I use FLAML to perform a classification task, set use_spark=True, train 30 seconds and force cancel jobs if time limit is reached." + ragproxyagent.initiate_chat( + assistant, message=ragproxyagent.message_generator, problem=code_problem, search_string="spark", silent=True + ) + + print(conversations) + + +@pytest.mark.skipif( + skip, + reason="dependency is not installed", +) +def test_retrieve_config(caplog): + # test warning message when no docs_path is provided + ragproxyagent = RetrieveUserProxyAgent( + name="ragproxyagent", + human_input_mode="NEVER", + max_consecutive_auto_reply=2, + retrieve_config={ + "chunk_token_size": 2000, + "get_or_create": True, + }, + ) + + # Capture the printed content + captured_logs = caplog.records[0] + print(captured_logs) + + # Assert on the printed content + assert ( + f"docs_path is not provided in retrieve_config. Will raise ValueError if the collection `{ragproxyagent._collection_name}` doesn't exist." + in captured_logs.message + ) + assert captured_logs.levelname == "WARNING" + + +if __name__ == "__main__": + test_retrievechat() + test_retrieve_config() diff --git a/test/agentchat/contrib/vectordb/test_pgvectordb.py b/test/agentchat/contrib/vectordb/test_pgvectordb.py new file mode 100644 index 000000000000..3eafd2df2d52 --- /dev/null +++ b/test/agentchat/contrib/vectordb/test_pgvectordb.py @@ -0,0 +1,82 @@ +import os +import sys + +import pytest + +sys.path.append(os.path.join(os.path.dirname(__file__), "..")) + +try: + import pgvector + import sentence_transformers + + from autogen.agentchat.contrib.vectordb.pgvector import PGVector +except ImportError: + skip = True +else: + skip = False + + +@pytest.mark.skipif(skip, reason="dependency is not installed OR requested to skip") +def test_pgvector(): + # test create collection + db_config = { + "connection_string": "postgresql://postgres:postgres@localhost:5432/postgres", + } + + db = PGVector(connection_string=db_config["connection_string"]) + collection_name = "test_collection" + collection = db.create_collection(collection_name, overwrite=True, get_or_create=True) + assert collection.name == collection_name + + # test_delete_collection + db.delete_collection(collection_name) + pytest.raises(ValueError, db.get_collection, collection_name) + + # test more create collection + collection = db.create_collection(collection_name, overwrite=False, get_or_create=False) + assert collection.name == collection_name + pytest.raises(ValueError, db.create_collection, collection_name, overwrite=False, get_or_create=False) + collection = db.create_collection(collection_name, overwrite=True, get_or_create=False) + assert collection.name == collection_name + collection = db.create_collection(collection_name, overwrite=False, get_or_create=True) + assert collection.name == collection_name + + # test_get_collection + collection = db.get_collection(collection_name) + assert collection.name == collection_name + + # test_insert_docs + docs = [{"content": "doc1", "id": "1"}, {"content": "doc2", "id": "2"}, {"content": "doc3", "id": "3"}] + db.insert_docs(docs, collection_name, upsert=False) + res = db.get_collection(collection_name).get(["1", "2"]) + assert res["documents"] == ["doc1", "doc2"] + + # test_update_docs + docs = [{"content": "doc11", "id": "1"}, {"content": "doc2", "id": "2"}, {"content": "doc3", "id": "3"}] + db.update_docs(docs, collection_name) + res = db.get_collection(collection_name).get(["1", "2"]) + assert res["documents"] == ["doc11", "doc2"] + + # test_delete_docs + ids = ["1"] + collection_name = "test_collection" + db.delete_docs(ids, collection_name) + res = db.get_collection(collection_name).get(ids) + assert res["documents"] == [] + + # test_retrieve_docs + queries = ["doc2", "doc3"] + collection_name = "test_collection" + res = db.retrieve_docs(queries, collection_name) + assert [[r[0]["id"] for r in rr] for rr in res] == [["2", "3"], ["3", "2"]] + res = db.retrieve_docs(queries, collection_name, distance_threshold=0.1) + print(res) + assert [[r[0]["id"] for r in rr] for rr in res] == [["2"], ["3"]] + + # test_get_docs_by_ids + res = db.get_docs_by_ids(["1", "2"], collection_name) + assert [r["id"] for r in res] == ["2"] # "1" has been deleted + + +if __name__ == "__main__": + test_pgvector() diff --git a/website/docs/ecosystem/pgvector.md b/website/docs/ecosystem/pgvector.md new file mode 100644 index 000000000000..c455595eda4e --- /dev/null +++ b/website/docs/ecosystem/pgvector.md @@ -0,0 +1,5 @@ +# PGVector + +[PGVector](https://github.com/pgvector/pgvector) is an open-source vector similarity search for Postgres. + +- [PGVector + AutoGen Code Examples](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_pgvector_RetrieveChat.ipynb) diff --git a/website/docs/installation/Optional-Dependencies.md b/website/docs/installation/Optional-Dependencies.md index 6efdfa2e2ab5..617a90aabaee 100644 --- a/website/docs/installation/Optional-Dependencies.md +++ b/website/docs/installation/Optional-Dependencies.md @@ -44,12 +44,22 @@ Example notebooks: ## retrievechat -`pyautogen` supports retrieval-augmented generation tasks such as question answering and code generation with RAG agents. Please install with the [retrievechat] option to use it. +`pyautogen` supports retrieval-augmented generation tasks such as question answering and code generation with RAG agents. Please install with the [retrievechat] option to use it with ChromaDB. ```bash pip install "pyautogen[retrievechat]" ``` +Alternatively `pyautogen` also supports PGVector and Qdrant which can be installed in place of ChromaDB, or alongside it. + +```bash +pip install "pyautogen[retrievechat-pgvector]" +``` + +```bash +pip install "pyautogen[retrievechat-qdrant]" +``` + RetrieveChat can handle various types of documents. By default, it can process plain text and PDF files, including formats such as 'txt', 'json', 'csv', 'tsv', 'md', 'html', 'htm', 'rtf', 'rst', 'jsonl', 'log', 'xml', 'yaml', 'yml' and 'pdf'. diff --git a/website/docs/topics/retrieval_augmentation.md b/website/docs/topics/retrieval_augmentation.md index 7de22cce84f6..ca7222a404f3 100644 --- a/website/docs/topics/retrieval_augmentation.md +++ b/website/docs/topics/retrieval_augmentation.md @@ -53,12 +53,73 @@ ragproxyagent.initiate_chat( ) # search_string is used as an extra filter for the embeddings search, in this case, we only want to search documents that contain "spark". ``` +## Example Setup: RAG with Retrieval Augmented Agents with PGVector +The following is an example setup demonstrating how to create retrieval augmented agents in AutoGen: + +### Step 1. Create an instance of `RetrieveAssistantAgent` and `RetrieveUserProxyAgent`. + +Here `RetrieveUserProxyAgent` instance acts as a proxy agent that retrieves relevant information based on the user's input. + +Specify the connection_string, or the host, port, database, username, and password in the db_config. + +```python +assistant = RetrieveAssistantAgent( + name="assistant", + system_message="You are a helpful assistant.", + llm_config={ + "timeout": 600, + "cache_seed": 42, + "config_list": config_list, + }, +) +ragproxyagent = RetrieveUserProxyAgent( + name="ragproxyagent", + human_input_mode="NEVER", + max_consecutive_auto_reply=3, + retrieve_config={ + "task": "code", + "docs_path": [ + "https://raw.githubusercontent.com/microsoft/FLAML/main/website/docs/Examples/Integrate%20-%20Spark.md", + "https://raw.githubusercontent.com/microsoft/FLAML/main/website/docs/Research.md", + os.path.join(os.path.abspath(""), "..", "website", "docs"), + ], + "vector_db": "pgvector", + "collection_name": "autogen_docs", + "db_config": { + "connection_string": "postgresql://testuser:testpwd@localhost:5432/vectordb", # Optional - connect to an external vector database + # "host": None, # Optional vector database host + # "port": None, # Optional vector database port + # "database": None, # Optional vector database name + # "username": None, # Optional vector database username + # "password": None, # Optional vector database password + }, + "custom_text_types": ["mdx"], + "chunk_token_size": 2000, + "model": config_list[0]["model"], + "get_or_create": True, + }, + code_execution_config=False, +) +``` + +### Step 2. Initiating Agent Chat with Retrieval Augmentation + +Once the retrieval augmented agents are set up, you can initiate a chat with retrieval augmentation using the following code: + +```python +code_problem = "How can I use FLAML to perform a classification task and use spark to do parallel training. Train 30 seconds and force cancel jobs if time limit is reached." +ragproxyagent.initiate_chat( + assistant, message=ragproxyagent.message_generator, problem=code_problem, search_string="spark" +) # search_string is used as an extra filter for the embeddings search, in this case, we only want to search documents that contain "spark". +``` + ## Online Demo [Retrival-Augmented Chat Demo on Huggingface](https://huggingface.co/spaces/thinkall/autogen-demos) ## More Examples and Notebooks For more detailed examples and notebooks showcasing the usage of retrieval augmented agents in AutoGen, refer to the following: - Automated Code Generation and Question Answering with Retrieval Augmented Agents - [View Notebook](/docs/notebooks/agentchat_RetrieveChat) +- Automated Code Generation and Question Answering with [PGVector](https://github.com/pgvector/pgvector) based Retrieval Augmented Agents - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_pgvector_RetrieveChat.ipynb) - Automated Code Generation and Question Answering with [Qdrant](https://qdrant.tech/) based Retrieval Augmented Agents - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_qdrant_RetrieveChat.ipynb) - Chat with OpenAI Assistant with Retrieval Augmentation - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_oai_assistant_retrieval.ipynb) - **RAG**: Group Chat with Retrieval Augmented Generation (with 5 group member agents and 1 manager agent) - [View Notebook](/docs/notebooks/agentchat_groupchat_RAG)
    # Event year Season Ceremony Flag bearer Sex State / Country Sport
    62 2018 Winter Closing Diggins , Jessica Jessica Diggins Minnesota Cross-country skiing
    61 2018 Winter Opening Hamlin , Erin Erin Hamlin New York Luge
    60 2016 Summer Closing Biles , Simone Simone Biles Texas Gymnastics
    59 2016 Summer Opening Phelps , Michael Michael Phelps Maryland Swimming
    58 2014 Winter Closing Chu , Julie Julie Chu Connecticut Hockey
    57 2014 Winter Opening Lodwick , Todd Todd Lodwick Colorado Nordic combined
    56 2012 Summer Closing Nellum , Bryshon Bryshon Nellum California Athletics
    55 2012 Summer Opening Zagunis , Mariel Mariel Zagunis Oregon Fencing
    54 Winter Closing Demong , Bill Bill Demong New York Nordic combined
    53 Winter Opening Grimmette , Mark Mark Grimmette Michigan Luge
    52 2008 Summer Closing Lorig , Khatuna Khatuna Lorig Georgia ( country ) Archery
    51 2008 Summer Opening Lomong , Lopez Lopez Lomong Sudan ( now South Sudan ) Athletics
    50 2006 Winter Closing Cheek , Joey Joey Cheek North Carolina Speed skating
    49 2006 Winter Opening Witty , Chris Chris Witty Wisconsin Speed skating
    48 Summer Closing Hamm , Mia Mia Hamm Texas Women 's soccer
    47 Summer Opening Staley , Dawn Dawn Staley Pennsylvania Basketball
    46 2002 Winter Closing Shimer , Brian Brian Shimer Florida Bobsleigh
    45 2002 Winter Opening Peterson , Amy Amy Peterson Minnesota Short track speed skating
    44 2000 Summer Closing Gardner , Rulon Rulon Gardner Wyoming Wrestling
    43 2000 Summer Opening Meidl , Cliff Cliff Meidl California Canoeing
    42 1998 Winter Closing Granato , Cammi Cammi Granato Illinois Hockey
    41 1998 Winter Opening Flaim , Eric Eric Flaim Massachusetts Speed skating
    40 Summer Closing Matz , Michael Michael Matz Pennsylvania Equestrian
    39 Summer Opening Baumgartner , Bruce Bruce Baumgartner New Jersey Wrestling
    38 1994 Winter Closing Jansen , Dan Dan Jansen Wisconsin Speed skating
    37 1994 Winter Opening Myler , Cammy Cammy Myler New York