Skip to content

Commit

Permalink
fix workflow deployment
Browse files Browse the repository at this point in the history
  • Loading branch information
masci committed Oct 19, 2024
1 parent 5c03629 commit e17a5fc
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 11 deletions.
7 changes: 4 additions & 3 deletions examples/python_fullstack/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,17 +38,18 @@ services:
retries: 5
volumes:
- ./:/opt/app
working_dir: /opt/app

deploy_workflows:
# Init container, it deploys python_fullstack.yaml and exits
image: llamaindex/llama-deploy:main
volumes:
- ./:/opt/app
- ./python_fullstack.yaml:/opt/python_fullstack.yaml
working_dir: /opt/
depends_on:
apiserver:
condition: service_healthy
working_dir: /opt/app
entrypoint: llamactl -s http://apiserver:4501 -t 20 deploy python_fullstack.yaml
entrypoint: llamactl -s http://apiserver:4501 -t 30 deploy python_fullstack.yaml

frontend:
# UI for this deployment, running at http://localhost:3000
Expand Down
10 changes: 8 additions & 2 deletions examples/python_fullstack/python_fullstack.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,18 @@ services:
name: Agentic Workflow
source:
type: local
name: .
name: ./workflows
path: workflows:agentic_w
python-dependencies:
- llama-index-postprocessor-rankgpt-rerank>=0.2.0
- llama-index-vector-stores-qdrant>=0.3.0
- llama-index-llms-openai>=0.2.2
- llama-index-embeddings-openai>=0.2.4
- llama-index-readers-file>=0.2.0

rag_workflow:
name: RAG Workflow
source:
type: local
name: .
name: ./workflows
path: workflows:rag_w
12 changes: 6 additions & 6 deletions examples/python_fullstack/workflows/rag_workflow.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,24 @@
import os
from logging import getLogger

from qdrant_client import QdrantClient, AsyncQdrantClient
from pathlib import Path

from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.node_parser import SemanticSplitterNodeParser, SentenceSplitter
from llama_index.core.response_synthesizers import CompactAndRefine
from llama_index.core.schema import NodeWithScore
from llama_index.core.workflow import (
Context,
Event,
Workflow,
StartEvent,
StopEvent,
Workflow,
step,
)
from llama_index.core.schema import NodeWithScore
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.postprocessor.rankgpt_rerank import RankGPTRerank
from llama_index.vector_stores.qdrant import QdrantVectorStore

from qdrant_client import AsyncQdrantClient, QdrantClient

logger = getLogger(__name__)

Expand Down Expand Up @@ -105,8 +104,9 @@ def build_rag_workflow() -> RAGWorkflow:
)

# ensure the index exists
here = Path(__file__).parent
if not client.collection_exists("papers"):
documents = SimpleDirectoryReader("data").load_data()
documents = SimpleDirectoryReader(here / "data").load_data()

# Double pass chunking
first_node_parser = SemanticSplitterNodeParser(
Expand Down

0 comments on commit e17a5fc

Please sign in to comment.