From cd7c586310f6e53bf9dfa7873e4353b18048a3a8 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 17 Jun 2024 00:26:43 +0800 Subject: [PATCH 01/65] feat: add llm ops tracing --- api/controllers/console/__init__.py | 1 + api/controllers/console/app/error.py | 12 + api/controllers/console/app/ops_trace.py | 79 ++ api/core/agent/cot_agent_runner.py | 24 +- api/core/agent/fc_agent_runner.py | 13 +- api/core/app/app_config/entities.py | 10 +- .../app/apps/advanced_chat/app_generator.py | 26 +- api/core/app/apps/advanced_chat/app_runner.py | 18 +- .../advanced_chat/generate_task_pipeline.py | 74 +- api/core/app/apps/agent_chat/app_generator.py | 21 +- api/core/app/apps/agent_chat/app_runner.py | 17 +- api/core/app/apps/base_app_runner.py | 17 +- api/core/app/apps/chat/app_generator.py | 24 +- api/core/app/apps/chat/app_runner.py | 4 +- api/core/app/apps/completion/app_generator.py | 19 +- api/core/app/apps/completion/app_runner.py | 4 +- .../app/apps/message_based_app_generator.py | 38 +- api/core/app/apps/workflow/app_generator.py | 46 +- .../apps/workflow/generate_task_pipeline.py | 20 +- .../easy_ui_based_generate_task_pipeline.py | 31 +- .../task_pipeline/workflow_cycle_manage.py | 82 +- .../agent_tool_callback_handler.py | 19 + api/core/llm_generator/llm_generator.py | 52 +- api/core/moderation/input_moderation.py | 40 +- api/core/rag/retrieval/dataset_retrieval.py | 141 ++-- api/core/tools/tool/workflow_tool.py | 10 +- api/core/tools/tool_engine.py | 33 +- api/core/workflow/nodes/tool/tool_node.py | 10 +- ...9b_update_appmodelconfig_and_add_table_.py | 49 ++ api/models/model.py | 36 +- api/pyproject.toml | 2 + api/services/app_generate_service.py | 3 +- api/services/conversation_service.py | 2 +- api/services/message_service.py | 27 +- api/services/ops_trace/base_trace_instance.py | 31 + api/services/ops_trace/langfuse_trace.py | 712 ++++++++++++++++++ api/services/ops_trace/langsmith_trace.py | 545 ++++++++++++++ api/services/ops_trace/ops_trace_service.py | 321 ++++++++ api/services/ops_trace/trace_queue_manager.py | 133 ++++ api/services/ops_trace/utils.py | 28 + 40 files changed, 2531 insertions(+), 243 deletions(-) create mode 100644 api/controllers/console/app/ops_trace.py create mode 100644 api/migrations/versions/04c602f5dc9b_update_appmodelconfig_and_add_table_.py create mode 100644 api/services/ops_trace/base_trace_instance.py create mode 100644 api/services/ops_trace/langfuse_trace.py create mode 100644 api/services/ops_trace/langsmith_trace.py create mode 100644 api/services/ops_trace/ops_trace_service.py create mode 100644 api/services/ops_trace/trace_queue_manager.py create mode 100644 api/services/ops_trace/utils.py diff --git a/api/controllers/console/__init__.py b/api/controllers/console/__init__.py index 29eac070a08fcb..8c67fef95f5f4c 100644 --- a/api/controllers/console/__init__.py +++ b/api/controllers/console/__init__.py @@ -20,6 +20,7 @@ generator, message, model_config, + ops_trace, site, statistic, workflow, diff --git a/api/controllers/console/app/error.py b/api/controllers/console/app/error.py index fbe42fbd2a7135..cd7919b5e426bd 100644 --- a/api/controllers/console/app/error.py +++ b/api/controllers/console/app/error.py @@ -97,3 +97,15 @@ class DraftWorkflowNotSync(BaseHTTPException): error_code = 'draft_workflow_not_sync' description = "Workflow graph might have been modified, please refresh and resubmit." code = 400 + + +class TracingConfigNotExist(BaseHTTPException): + error_code = 'trace_config_not_exist' + description = "Trace config not exist." + code = 400 + + +class TracingConfigIsExist(BaseHTTPException): + error_code = 'trace_config_is_exist' + description = "Trace config is exist." + code = 400 diff --git a/api/controllers/console/app/ops_trace.py b/api/controllers/console/app/ops_trace.py new file mode 100644 index 00000000000000..c4b3641b14ffec --- /dev/null +++ b/api/controllers/console/app/ops_trace.py @@ -0,0 +1,79 @@ +from flask_restful import Resource, reqparse + +from controllers.console import api +from controllers.console.app.error import TracingConfigIsExist, TracingConfigNotExist +from controllers.console.setup import setup_required +from controllers.console.wraps import account_initialization_required +from libs.login import login_required +from services.ops_trace.ops_trace_service import OpsTraceService + + +class TraceAppConfigApi(Resource): + """ + Manage trace app configurations + """ + + @setup_required + @login_required + @account_initialization_required + def get(self, app_id): + parser = reqparse.RequestParser() + parser.add_argument('tracing_provider', type=str, required=True, location='args') + args = parser.parse_args() + + try: + trace_config = OpsTraceService.get_tracing_app_config( + app_id=app_id, tracing_provider=args['tracing_provider'] + ) + if not trace_config: + raise TracingConfigNotExist() + return trace_config + except Exception as e: + raise e + + @setup_required + @login_required + @account_initialization_required + def post(self, app_id): + """Create a new trace app configuration""" + parser = reqparse.RequestParser() + parser.add_argument('tracing_provider', type=str, required=True, location='json') + parser.add_argument('tracing_config', type=dict, required=True, location='json') + args = parser.parse_args() + + try: + result = OpsTraceService.create_tracing_app_config( + app_id=app_id, + tracing_provider=args['tracing_provider'], + tracing_config=args['tracing_config'] + ) + if not result: + raise TracingConfigIsExist() + return {"result": "success"} + except Exception as e: + raise e + + @setup_required + @login_required + @account_initialization_required + def put(self, app_id): + """Update an existing trace app configuration""" + parser = reqparse.RequestParser() + parser.add_argument('tracing_provider', type=str, required=True, location='json') + parser.add_argument('tracing_config', type=dict, required=True, location='json') + args = parser.parse_args() + + try: + result = OpsTraceService.update_tracing_app_config( + app_id=app_id, + tracing_provider=args['tracing_provider'], + tracing_config=args['tracing_config'] + ) + if not result: + raise TracingConfigNotExist() + return {"result": "success"} + except Exception as e: + raise e + + +api.add_resource(TraceAppConfigApi, '/apps//trace-config') diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py index 982477138b5261..31d1ec0cdb23df 100644 --- a/api/core/agent/cot_agent_runner.py +++ b/api/core/agent/cot_agent_runner.py @@ -1,7 +1,7 @@ import json from abc import ABC, abstractmethod from collections.abc import Generator -from typing import Union +from typing import Union, Optional from core.agent.base_agent_runner import BaseAgentRunner from core.agent.entities import AgentScratchpadUnit @@ -20,6 +20,7 @@ from core.tools.tool.tool import Tool from core.tools.tool_engine import ToolEngine from models.model import Message +from services.ops_trace.base_trace_instance import BaseTraceInstance class CotAgentRunner(BaseAgentRunner, ABC): @@ -32,9 +33,9 @@ class CotAgentRunner(BaseAgentRunner, ABC): _prompt_messages_tools: list[PromptMessage] = None def run(self, message: Message, - query: str, - inputs: dict[str, str], - ) -> Union[Generator, LLMResult]: + query: str, + inputs: dict[str, str], + ) -> Union[Generator, LLMResult]: """ Run Cot agent application """ @@ -183,7 +184,7 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): messages_ids=[], llm_usage=usage_dict['usage'] ) - + if not scratchpad.is_final(): self.queue_manager.publish(QueueAgentThoughtEvent( agent_thought_id=agent_thought.id @@ -209,7 +210,7 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): function_call_state = True # action is tool call, invoke tool tool_invoke_response, tool_invoke_meta = self._handle_invoke_action( - action=scratchpad.action, + action=scratchpad.action, tool_instances=tool_instances, message_file_ids=message_file_ids ) @@ -257,12 +258,12 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): # save agent thought self.save_agent_thought( - agent_thought=agent_thought, + agent_thought=agent_thought, tool_name='', tool_input={}, tool_invoke_meta={}, thought=final_answer, - observation={}, + observation={}, answer=final_answer, messages_ids=[] ) @@ -282,7 +283,9 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): def _handle_invoke_action(self, action: AgentScratchpadUnit.Action, tool_instances: dict[str, Tool], - message_file_ids: list[str]) -> tuple[str, ToolInvokeMeta]: + message_file_ids: list[str], + tracing_instance: Optional[BaseTraceInstance] = None + ) -> tuple[str, ToolInvokeMeta]: """ handle invoke action :param action: action @@ -312,7 +315,8 @@ def _handle_invoke_action(self, action: AgentScratchpadUnit.Action, tenant_id=self.tenant_id, message=self.message, invoke_from=self.application_generate_entity.invoke_from, - agent_tool_callback=self.agent_callback + agent_tool_callback=self.agent_callback, + tracing_instance=tracing_instance, ) # publish files diff --git a/api/core/agent/fc_agent_runner.py b/api/core/agent/fc_agent_runner.py index d7b063eb92ec55..e64722d22ca58d 100644 --- a/api/core/agent/fc_agent_runner.py +++ b/api/core/agent/fc_agent_runner.py @@ -20,7 +20,9 @@ from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform from core.tools.entities.tool_entities import ToolInvokeMeta from core.tools.tool_engine import ToolEngine -from models.model import Message +from extensions.ext_database import db +from models.model import AppModelConfig, Message +from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) @@ -50,6 +52,14 @@ def run(self, } final_answer = '' + # get tracing instance + app_id = app_config.app_id + app_model_config_id = app_config.app_model_config_id + app_model_config = db.session.query(AppModelConfig).filter_by(id=app_model_config_id).first() + tracing_instance = OpsTraceService.get_ops_trace_instance( + app_id=app_id, app_model_config=app_model_config + ) + def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): if not final_llm_usage_dict['usage']: final_llm_usage_dict['usage'] = usage @@ -243,6 +253,7 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): message=self.message, invoke_from=self.application_generate_entity.invoke_from, agent_tool_callback=self.agent_callback, + tracing_instance=tracing_instance ) # publish files for message_file, save_as in message_files: diff --git a/api/core/app/app_config/entities.py b/api/core/app/app_config/entities.py index d6b6d894166d7e..6b58df617d7825 100644 --- a/api/core/app/app_config/entities.py +++ b/api/core/app/app_config/entities.py @@ -183,6 +183,14 @@ class TextToSpeechEntity(BaseModel): language: Optional[str] = None +class TracingConfigEntity(BaseModel): + """ + Tracing Config Entity. + """ + enabled: bool + tracing_provider: str + + class FileExtraConfig(BaseModel): """ File Upload Entity. @@ -199,7 +207,7 @@ class AppAdditionalFeatures(BaseModel): more_like_this: bool = False speech_to_text: bool = False text_to_speech: Optional[TextToSpeechEntity] = None - + trace_config: Optional[TracingConfigEntity] = None class AppConfig(BaseModel): """ diff --git a/api/core/app/apps/advanced_chat/app_generator.py b/api/core/app/apps/advanced_chat/app_generator.py index 3b1ee3578dea3c..a06ae902fed5db 100644 --- a/api/core/app/apps/advanced_chat/app_generator.py +++ b/api/core/app/apps/advanced_chat/app_generator.py @@ -3,7 +3,7 @@ import threading import uuid from collections.abc import Generator -from typing import Union +from typing import Any, Optional, Union from flask import Flask, current_app from pydantic import ValidationError @@ -29,13 +29,15 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator): - def generate(self, app_model: App, - workflow: Workflow, - user: Union[Account, EndUser], - args: dict, - invoke_from: InvokeFrom, - stream: bool = True) \ - -> Union[dict, Generator[dict, None, None]]: + def generate( + self, app_model: App, + workflow: Workflow, + user: Union[Account, EndUser], + args: dict, + invoke_from: InvokeFrom, + stream: bool = True, + tracing_instance: Optional[Any] = None + ) -> Union[dict, Generator[dict, None, None]]: """ Generate App response. @@ -45,6 +47,7 @@ def generate(self, app_model: App, :param args: request args :param invoke_from: invoke from source :param stream: is stream + :param tracing_instance: tracing instance """ if not args.get('query'): raise ValueError('query is required') @@ -105,7 +108,8 @@ def generate(self, app_model: App, invoke_from=invoke_from, application_generate_entity=application_generate_entity, conversation=conversation, - stream=stream + stream=stream, + tracing_instance=tracing_instance, ) def single_iteration_generate(self, app_model: App, @@ -227,7 +231,7 @@ def _generate(self, app_model: App, conversation=conversation, message=message, user=user, - stream=stream + stream=stream, ) return AdvancedChatAppGenerateResponseConverter.convert( @@ -326,7 +330,7 @@ def _handle_advanced_chat_response(self, application_generate_entity: AdvancedCh ) try: - return generate_task_pipeline.process() + return generate_task_pipeline.process(workflow) except ValueError as e: if e.args[0] == "I/O operation on closed file.": # ignore this error raise GenerateTaskStoppedException() diff --git a/api/core/app/apps/advanced_chat/app_runner.py b/api/core/app/apps/advanced_chat/app_runner.py index de3632894de2ed..96e9319dda58d2 100644 --- a/api/core/app/apps/advanced_chat/app_runner.py +++ b/api/core/app/apps/advanced_chat/app_runner.py @@ -70,7 +70,8 @@ def run(self, application_generate_entity: AdvancedChatAppGenerateEntity, app_record=app_record, app_generate_entity=application_generate_entity, inputs=inputs, - query=query + query=query, + message_id=message.id ): return @@ -156,11 +157,14 @@ def get_workflow(self, app_model: App, workflow_id: str) -> Optional[Workflow]: # return workflow return workflow - def handle_input_moderation(self, queue_manager: AppQueueManager, - app_record: App, - app_generate_entity: AdvancedChatAppGenerateEntity, - inputs: dict, - query: str) -> bool: + def handle_input_moderation( + self, queue_manager: AppQueueManager, + app_record: App, + app_generate_entity: AdvancedChatAppGenerateEntity, + inputs: dict, + query: str, + message_id: str + ) -> bool: """ Handle input moderation :param queue_manager: application queue manager @@ -168,6 +172,7 @@ def handle_input_moderation(self, queue_manager: AppQueueManager, :param app_generate_entity: application generate entity :param inputs: inputs :param query: query + :param message_id: message id :return: """ try: @@ -178,6 +183,7 @@ def handle_input_moderation(self, queue_manager: AppQueueManager, app_generate_entity=app_generate_entity, inputs=inputs, query=query, + message_id=message_id, ) except ModerationException as e: self._stream_output( diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index 7c70afc2ae393c..207e962b376efd 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -54,6 +54,7 @@ WorkflowNodeExecution, WorkflowRunStatus, ) +from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) @@ -69,13 +70,15 @@ class AdvancedChatAppGenerateTaskPipeline(BasedGenerateTaskPipeline, WorkflowCyc _workflow_system_variables: dict[SystemVariable, Any] _iteration_nested_relations: dict[str, list[str]] - def __init__(self, application_generate_entity: AdvancedChatAppGenerateEntity, - workflow: Workflow, - queue_manager: AppQueueManager, - conversation: Conversation, - message: Message, - user: Union[Account, EndUser], - stream: bool) -> None: + def __init__( + self, application_generate_entity: AdvancedChatAppGenerateEntity, + workflow: Workflow, + queue_manager: AppQueueManager, + conversation: Conversation, + message: Message, + user: Union[Account, EndUser], + stream: bool + ) -> None: """ Initialize AdvancedChatAppGenerateTaskPipeline. :param application_generate_entity: application generate entity @@ -111,7 +114,10 @@ def __init__(self, application_generate_entity: AdvancedChatAppGenerateEntity, self._stream_generate_routes = self._get_stream_generate_routes() self._conversation_name_generate_thread = None - def process(self) -> Union[ChatbotAppBlockingResponse, Generator[ChatbotAppStreamResponse, None, None]]: + def process( + self, + workflow: Optional[Workflow] = None + ) -> Union[ChatbotAppBlockingResponse, Generator[ChatbotAppStreamResponse, None, None]]: """ Process generate task pipeline. :return: @@ -126,14 +132,14 @@ def process(self) -> Union[ChatbotAppBlockingResponse, Generator[ChatbotAppStrea self._application_generate_entity.query ) - generator = self._process_stream_response() + generator = self._process_stream_response(workflow) if self._stream: return self._to_stream_response(generator) else: return self._to_blocking_response(generator) def _to_blocking_response(self, generator: Generator[StreamResponse, None, None]) \ - -> ChatbotAppBlockingResponse: + -> ChatbotAppBlockingResponse: """ Process blocking response. :return: @@ -164,7 +170,7 @@ def _to_blocking_response(self, generator: Generator[StreamResponse, None, None] raise Exception('Queue listening stopped unexpectedly.') def _to_stream_response(self, generator: Generator[StreamResponse, None, None]) \ - -> Generator[ChatbotAppStreamResponse, None, None]: + -> Generator[ChatbotAppStreamResponse, None, None]: """ To stream response. :return: @@ -177,11 +183,13 @@ def _to_stream_response(self, generator: Generator[StreamResponse, None, None]) stream_response=stream_response ) - def _process_stream_response(self) -> Generator[StreamResponse, None, None]: + def _process_stream_response(self, workflow: Optional[Workflow] = None) -> Generator[StreamResponse, None, None]: """ Process stream response. :return: """ + app_id = self._conversation.app_id + tracing_instance = OpsTraceService.get_ops_trace_instance(app_id=app_id, workflow=workflow) for message in self._queue_manager.listen(): event = message.event @@ -249,7 +257,7 @@ def _process_stream_response(self) -> Generator[StreamResponse, None, None]: yield self._handle_iteration_to_stream_response(self._application_generate_entity.task_id, event) self._handle_iteration_operation(event) elif isinstance(event, QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent): - workflow_run = self._handle_workflow_finished(event) + workflow_run = self._handle_workflow_finished(event, tracing_instance) if workflow_run: yield self._workflow_finish_to_stream_response( task_id=self._application_generate_entity.task_id, @@ -292,7 +300,7 @@ def _process_stream_response(self) -> Generator[StreamResponse, None, None]: continue if not self._is_stream_out_support( - event=event + event=event ): continue @@ -361,7 +369,7 @@ def _message_end_to_stream_response(self) -> MessageEndStreamResponse: id=self._message.id, **extras ) - + def _get_stream_generate_routes(self) -> dict[str, ChatflowStreamGenerateRoute]: """ Get stream generate routes. @@ -391,9 +399,9 @@ def _get_stream_generate_routes(self) -> dict[str, ChatflowStreamGenerateRoute]: ) return stream_generate_routes - + def _get_answer_start_at_node_ids(self, graph: dict, target_node_id: str) \ - -> list[str]: + -> list[str]: """ Get answer start at node id. :param graph: graph @@ -414,14 +422,14 @@ def _get_answer_start_at_node_ids(self, graph: dict, target_node_id: str) \ target_node = next((node for node in nodes if node.get('id') == target_node_id), None) if not target_node: return [] - + node_iteration_id = target_node.get('data', {}).get('iteration_id') # get iteration start node id for node in nodes: if node.get('id') == node_iteration_id: if node.get('data', {}).get('start_node_id') == target_node_id: return [target_node_id] - + return [] start_node_ids = [] @@ -457,7 +465,7 @@ def _get_answer_start_at_node_ids(self, graph: dict, target_node_id: str) \ start_node_ids.extend(sub_start_node_ids) return start_node_ids - + def _get_iteration_nested_relations(self, graph: dict) -> dict[str, list[str]]: """ Get iteration nested relations. @@ -466,18 +474,18 @@ def _get_iteration_nested_relations(self, graph: dict) -> dict[str, list[str]]: """ nodes = graph.get('nodes') - iteration_ids = [node.get('id') for node in nodes + iteration_ids = [node.get('id') for node in nodes if node.get('data', {}).get('type') in [ NodeType.ITERATION.value, NodeType.LOOP.value, - ]] + ]] return { iteration_id: [ node.get('id') for node in nodes if node.get('data', {}).get('iteration_id') == iteration_id ] for iteration_id in iteration_ids } - + def _generate_stream_outputs_when_node_started(self) -> Generator: """ Generate stream outputs. @@ -485,8 +493,8 @@ def _generate_stream_outputs_when_node_started(self) -> Generator: """ if self._task_state.current_stream_generate_state: route_chunks = self._task_state.current_stream_generate_state.generate_route[ - self._task_state.current_stream_generate_state.current_route_position: - ] + self._task_state.current_stream_generate_state.current_route_position: + ] for route_chunk in route_chunks: if route_chunk.type == 'text': @@ -506,7 +514,8 @@ def _generate_stream_outputs_when_node_started(self) -> Generator: # all route chunks are generated if self._task_state.current_stream_generate_state.current_route_position == len( - self._task_state.current_stream_generate_state.generate_route): + self._task_state.current_stream_generate_state.generate_route + ): self._task_state.current_stream_generate_state = None def _generate_stream_outputs_when_node_finished(self) -> Optional[Generator]: @@ -519,7 +528,7 @@ def _generate_stream_outputs_when_node_finished(self) -> Optional[Generator]: route_chunks = self._task_state.current_stream_generate_state.generate_route[ self._task_state.current_stream_generate_state.current_route_position:] - + for route_chunk in route_chunks: if route_chunk.type == 'text': route_chunk = cast(TextGenerateRouteChunk, route_chunk) @@ -551,7 +560,8 @@ def _generate_stream_outputs_when_node_finished(self) -> Optional[Generator]: value = iteration_state.current_index elif value_selector[1] == 'item': value = iterator_selector[iteration_state.current_index] if iteration_state.current_index < len( - iterator_selector) else None + iterator_selector + ) else None else: # check chunk node id is before current node id or equal to current node id if route_chunk_node_id not in self._task_state.ran_node_execution_infos: @@ -562,14 +572,15 @@ def _generate_stream_outputs_when_node_finished(self) -> Optional[Generator]: # get route chunk node execution info route_chunk_node_execution_info = self._task_state.ran_node_execution_infos[route_chunk_node_id] if (route_chunk_node_execution_info.node_type == NodeType.LLM - and latest_node_execution_info.node_type == NodeType.LLM): + and latest_node_execution_info.node_type == NodeType.LLM): # only LLM support chunk stream output self._task_state.current_stream_generate_state.current_route_position += 1 continue # get route chunk node execution route_chunk_node_execution = db.session.query(WorkflowNodeExecution).filter( - WorkflowNodeExecution.id == route_chunk_node_execution_info.workflow_node_execution_id).first() + WorkflowNodeExecution.id == route_chunk_node_execution_info.workflow_node_execution_id + ).first() outputs = route_chunk_node_execution.outputs_dict @@ -631,7 +642,8 @@ def _generate_stream_outputs_when_node_finished(self) -> Optional[Generator]: # all route chunks are generated if self._task_state.current_stream_generate_state.current_route_position == len( - self._task_state.current_stream_generate_state.generate_route): + self._task_state.current_stream_generate_state.generate_route + ): self._task_state.current_stream_generate_state = None def _is_stream_out_support(self, event: QueueTextChunkEvent) -> bool: diff --git a/api/core/app/apps/agent_chat/app_generator.py b/api/core/app/apps/agent_chat/app_generator.py index 407fb931ecb9bd..ca8fbe138efa37 100644 --- a/api/core/app/apps/agent_chat/app_generator.py +++ b/api/core/app/apps/agent_chat/app_generator.py @@ -3,7 +3,7 @@ import threading import uuid from collections.abc import Generator -from typing import Any, Union +from typing import Any, Optional, Union from flask import Flask, current_app from pydantic import ValidationError @@ -22,6 +22,7 @@ from extensions.ext_database import db from models.account import Account from models.model import App, EndUser +from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) @@ -70,6 +71,12 @@ def generate(self, app_model: App, conversation=conversation ) + # get tracing instance + tracing_instance = OpsTraceService.get_ops_trace_instance( + app_id=app_model.id, + app_model_config=app_model_config, + ) + # validate override model config override_model_config_dict = None if args.get('model_config'): @@ -142,6 +149,7 @@ def generate(self, app_model: App, 'queue_manager': queue_manager, 'conversation_id': conversation.id, 'message_id': message.id, + 'tracing_instance': tracing_instance, }) worker_thread.start() @@ -153,7 +161,8 @@ def generate(self, app_model: App, conversation=conversation, message=message, user=user, - stream=stream + stream=stream, + tracing_instance=tracing_instance, ) return AgentChatAppGenerateResponseConverter.convert( @@ -165,7 +174,9 @@ def _generate_worker(self, flask_app: Flask, application_generate_entity: AgentChatAppGenerateEntity, queue_manager: AppQueueManager, conversation_id: str, - message_id: str) -> None: + message_id: str, + tracing_instance: Optional[Any] = None + ) -> None: """ Generate worker in a new thread. :param flask_app: Flask app @@ -173,6 +184,7 @@ def _generate_worker(self, flask_app: Flask, :param queue_manager: queue manager :param conversation_id: conversation ID :param message_id: message ID + :param tracing_instance: tracing instance :return: """ with flask_app.app_context(): @@ -187,7 +199,8 @@ def _generate_worker(self, flask_app: Flask, application_generate_entity=application_generate_entity, queue_manager=queue_manager, conversation=conversation, - message=message + message=message, + tracing_instance=tracing_instance ) except GenerateTaskStoppedException: pass diff --git a/api/core/app/apps/agent_chat/app_runner.py b/api/core/app/apps/agent_chat/app_runner.py index d6367300de26e3..a942522996ca00 100644 --- a/api/core/app/apps/agent_chat/app_runner.py +++ b/api/core/app/apps/agent_chat/app_runner.py @@ -1,5 +1,5 @@ import logging -from typing import cast +from typing import Any, Optional, cast from core.agent.cot_chat_agent_runner import CotChatAgentRunner from core.agent.cot_completion_agent_runner import CotCompletionAgentRunner @@ -28,16 +28,21 @@ class AgentChatAppRunner(AppRunner): """ Agent Application Runner """ - def run(self, application_generate_entity: AgentChatAppGenerateEntity, - queue_manager: AppQueueManager, - conversation: Conversation, - message: Message) -> None: + + def run( + self, application_generate_entity: AgentChatAppGenerateEntity, + queue_manager: AppQueueManager, + conversation: Conversation, + message: Message, + tracing_instance: Optional[Any] = None + ) -> None: """ Run assistant application :param application_generate_entity: application generate entity :param queue_manager: application queue manager :param conversation: conversation :param message: message + :param tracing_instance: tracing instance :return: """ app_config = application_generate_entity.app_config @@ -100,6 +105,7 @@ def run(self, application_generate_entity: AgentChatAppGenerateEntity, app_generate_entity=application_generate_entity, inputs=inputs, query=query, + message_id=message.id ) except ModerationException as e: self.direct_output( @@ -241,6 +247,7 @@ def run(self, application_generate_entity: AgentChatAppGenerateEntity, message=message, query=query, inputs=inputs, + tracing_instance=tracing_instance, ) # handle invoke result diff --git a/api/core/app/apps/base_app_runner.py b/api/core/app/apps/base_app_runner.py index 53f457cb116c02..1ccc9597cee3a9 100644 --- a/api/core/app/apps/base_app_runner.py +++ b/api/core/app/apps/base_app_runner.py @@ -338,11 +338,14 @@ def _handle_invoke_result_stream(self, invoke_result: Generator, ), PublishFrom.APPLICATION_MANAGER ) - def moderation_for_inputs(self, app_id: str, - tenant_id: str, - app_generate_entity: AppGenerateEntity, - inputs: dict, - query: str) -> tuple[bool, dict, str]: + def moderation_for_inputs( + self, app_id: str, + tenant_id: str, + app_generate_entity: AppGenerateEntity, + inputs: dict, + query: str, + message_id: str, + ) -> tuple[bool, dict, str]: """ Process sensitive_word_avoidance. :param app_id: app id @@ -350,6 +353,7 @@ def moderation_for_inputs(self, app_id: str, :param app_generate_entity: app generate entity :param inputs: inputs :param query: query + :param message_id: message id :return: """ moderation_feature = InputModeration() @@ -358,7 +362,8 @@ def moderation_for_inputs(self, app_id: str, tenant_id=tenant_id, app_config=app_generate_entity.app_config, inputs=inputs, - query=query if query else '' + query=query if query else '', + message_id=message_id, ) def check_hosting_moderation(self, application_generate_entity: EasyUIBasedAppGenerateEntity, diff --git a/api/core/app/apps/chat/app_generator.py b/api/core/app/apps/chat/app_generator.py index 505ada09db5c90..e333e3458cbbc8 100644 --- a/api/core/app/apps/chat/app_generator.py +++ b/api/core/app/apps/chat/app_generator.py @@ -22,17 +22,19 @@ from extensions.ext_database import db from models.account import Account from models.model import App, EndUser +from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) class ChatAppGenerator(MessageBasedAppGenerator): - def generate(self, app_model: App, - user: Union[Account, EndUser], - args: Any, - invoke_from: InvokeFrom, - stream: bool = True) \ - -> Union[dict, Generator[dict, None, None]]: + def generate( + self, app_model: App, + user: Union[Account, EndUser], + args: Any, + invoke_from: InvokeFrom, + stream: bool = True, + ) -> Union[dict, Generator[dict, None, None]]: """ Generate App response. @@ -41,6 +43,7 @@ def generate(self, app_model: App, :param args: request args :param invoke_from: invoke from source :param stream: is stream + :param tracing_instance: tracing instance """ if not args.get('query'): raise ValueError('query is required') @@ -121,6 +124,12 @@ def generate(self, app_model: App, message ) = self._init_generate_records(application_generate_entity, conversation) + # get tracing instance + tracing_instance = OpsTraceService.get_ops_trace_instance( + app_id=app_model.id, + app_model_config=app_model_config, + ) + # init queue manager queue_manager = MessageBasedAppQueueManager( task_id=application_generate_entity.task_id, @@ -149,7 +158,8 @@ def generate(self, app_model: App, conversation=conversation, message=message, user=user, - stream=stream + stream=stream, + tracing_instance=tracing_instance, ) return ChatAppGenerateResponseConverter.convert( diff --git a/api/core/app/apps/chat/app_runner.py b/api/core/app/apps/chat/app_runner.py index 7d243d0726724e..0a029af86a1ce1 100644 --- a/api/core/app/apps/chat/app_runner.py +++ b/api/core/app/apps/chat/app_runner.py @@ -96,6 +96,7 @@ def run(self, application_generate_entity: ChatAppGenerateEntity, app_generate_entity=application_generate_entity, inputs=inputs, query=query, + message_id=message.id ) except ModerationException as e: self.direct_output( @@ -165,7 +166,8 @@ def run(self, application_generate_entity: ChatAppGenerateEntity, invoke_from=application_generate_entity.invoke_from, show_retrieve_source=app_config.additional_features.show_retrieve_source, hit_callback=hit_callback, - memory=memory + memory=memory, + message_id=message.id, ) # reorganize all inputs and template to prompt messages diff --git a/api/core/app/apps/completion/app_generator.py b/api/core/app/apps/completion/app_generator.py index 52d907b5353143..fcf00e685594dd 100644 --- a/api/core/app/apps/completion/app_generator.py +++ b/api/core/app/apps/completion/app_generator.py @@ -24,6 +24,7 @@ from models.model import App, EndUser, Message from services.errors.app import MoreLikeThisDisabledError from services.errors.message import MessageNotExistsError +from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) @@ -114,6 +115,12 @@ def generate(self, app_model: App, message ) = self._init_generate_records(application_generate_entity) + # get tracing instance + tracing_instance = OpsTraceService.get_ops_trace_instance( + app_id=app_model.id, + app_model_config=app_model_config, + ) + # init queue manager queue_manager = MessageBasedAppQueueManager( task_id=application_generate_entity.task_id, @@ -141,7 +148,8 @@ def generate(self, app_model: App, conversation=conversation, message=message, user=user, - stream=stream + stream=stream, + tracing_instance=tracing_instance, ) return CompletionAppGenerateResponseConverter.convert( @@ -273,6 +281,12 @@ def generate_more_like_this(self, app_model: App, message ) = self._init_generate_records(application_generate_entity) + # get tracing instance + tracing_instance = OpsTraceService.get_ops_trace_instance( + app_id=app_model.id, + app_model_config=app_model_config, + ) + # init queue manager queue_manager = MessageBasedAppQueueManager( task_id=application_generate_entity.task_id, @@ -300,7 +314,8 @@ def generate_more_like_this(self, app_model: App, conversation=conversation, message=message, user=user, - stream=stream + stream=stream, + tracing_instance=tracing_instance, ) return CompletionAppGenerateResponseConverter.convert( diff --git a/api/core/app/apps/completion/app_runner.py b/api/core/app/apps/completion/app_runner.py index a3a9945bc0436b..2e701320148408 100644 --- a/api/core/app/apps/completion/app_runner.py +++ b/api/core/app/apps/completion/app_runner.py @@ -77,6 +77,7 @@ def run(self, application_generate_entity: CompletionAppGenerateEntity, app_generate_entity=application_generate_entity, inputs=inputs, query=query, + message_id=message.id ) except ModerationException as e: self.direct_output( @@ -124,7 +125,8 @@ def run(self, application_generate_entity: CompletionAppGenerateEntity, query=query, invoke_from=application_generate_entity.invoke_from, show_retrieve_source=app_config.additional_features.show_retrieve_source, - hit_callback=hit_callback + hit_callback=hit_callback, + message_id=message.id ) # reorganize all inputs and template to prompt messages diff --git a/api/core/app/apps/message_based_app_generator.py b/api/core/app/apps/message_based_app_generator.py index 6acf5da8df4d2a..11763d1c6cde53 100644 --- a/api/core/app/apps/message_based_app_generator.py +++ b/api/core/app/apps/message_based_app_generator.py @@ -1,7 +1,7 @@ import json import logging from collections.abc import Generator -from typing import Optional, Union +from typing import Any, Optional, Union from sqlalchemy import and_ @@ -35,22 +35,24 @@ class MessageBasedAppGenerator(BaseAppGenerator): - def _handle_response(self, application_generate_entity: Union[ - ChatAppGenerateEntity, - CompletionAppGenerateEntity, - AgentChatAppGenerateEntity, - AdvancedChatAppGenerateEntity - ], - queue_manager: AppQueueManager, - conversation: Conversation, - message: Message, - user: Union[Account, EndUser], - stream: bool = False) \ - -> Union[ - ChatbotAppBlockingResponse, - CompletionAppBlockingResponse, - Generator[Union[ChatbotAppStreamResponse, CompletionAppStreamResponse], None, None] - ]: + def _handle_response( + self, application_generate_entity: Union[ + ChatAppGenerateEntity, + CompletionAppGenerateEntity, + AgentChatAppGenerateEntity, + AdvancedChatAppGenerateEntity + ], + queue_manager: AppQueueManager, + conversation: Conversation, + message: Message, + user: Union[Account, EndUser], + stream: bool = False, + tracing_instance: Optional[Any] = None + ) -> Union[ + ChatbotAppBlockingResponse, + CompletionAppBlockingResponse, + Generator[Union[ChatbotAppStreamResponse, CompletionAppStreamResponse], None, None] + ]: """ Handle response. :param application_generate_entity: application generate entity @@ -72,7 +74,7 @@ def _handle_response(self, application_generate_entity: Union[ ) try: - return generate_task_pipeline.process() + return generate_task_pipeline.process(tracing_instance) except ValueError as e: if e.args[0] == "I/O operation on closed file.": # ignore this error raise GenerateTaskStoppedException() diff --git a/api/core/app/apps/workflow/app_generator.py b/api/core/app/apps/workflow/app_generator.py index c4324978d81fc3..829ccc8cab2b5a 100644 --- a/api/core/app/apps/workflow/app_generator.py +++ b/api/core/app/apps/workflow/app_generator.py @@ -3,7 +3,7 @@ import threading import uuid from collections.abc import Generator -from typing import Union +from typing import Any, Optional, Union from flask import Flask, current_app from pydantic import ValidationError @@ -29,14 +29,16 @@ class WorkflowAppGenerator(BaseAppGenerator): - def generate(self, app_model: App, - workflow: Workflow, - user: Union[Account, EndUser], - args: dict, - invoke_from: InvokeFrom, - stream: bool = True, - call_depth: int = 0) \ - -> Union[dict, Generator[dict, None, None]]: + def generate( + self, app_model: App, + workflow: Workflow, + user: Union[Account, EndUser], + args: dict, + invoke_from: InvokeFrom, + stream: bool = True, + call_depth: int = 0, + tracing_instance: Optional[Any] = None + ) -> Union[dict, Generator[dict, None, None]]: """ Generate App response. @@ -46,6 +48,8 @@ def generate(self, app_model: App, :param args: request args :param invoke_from: invoke from source :param stream: is stream + :param call_depth: call depth + :param tracing_instance: ops tracing instance """ inputs = args['inputs'] @@ -87,17 +91,18 @@ def generate(self, app_model: App, application_generate_entity=application_generate_entity, invoke_from=invoke_from, stream=stream, - call_depth=call_depth + call_depth=call_depth, ) - def _generate(self, app_model: App, - workflow: Workflow, - user: Union[Account, EndUser], - application_generate_entity: WorkflowAppGenerateEntity, - invoke_from: InvokeFrom, - stream: bool = True, - call_depth: int = 0) \ - -> Union[dict, Generator[dict, None, None]]: + def _generate( + self, app_model: App, + workflow: Workflow, + user: Union[Account, EndUser], + application_generate_entity: WorkflowAppGenerateEntity, + invoke_from: InvokeFrom, + stream: bool = True, + call_depth: int = 0 + ) -> Union[dict, Generator[dict, None, None]]: """ Generate App response. @@ -131,7 +136,7 @@ def _generate(self, app_model: App, workflow=workflow, queue_manager=queue_manager, user=user, - stream=stream + stream=stream, ) return WorkflowAppGenerateResponseConverter.convert( @@ -271,9 +276,10 @@ def _handle_response(self, application_generate_entity: WorkflowAppGenerateEntit user=user, stream=stream ) + app_id = application_generate_entity.app_config.app_id try: - return generate_task_pipeline.process() + return generate_task_pipeline.process(app_id, workflow) except ValueError as e: if e.args[0] == "I/O operation on closed file.": # ignore this error raise GenerateTaskStoppedException() diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index 8d961e0993b96f..944c3736a140d1 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -1,6 +1,6 @@ import logging from collections.abc import Generator -from typing import Any, Union +from typing import Any, Optional, Union from core.app.apps.base_app_queue_manager import AppQueueManager from core.app.entities.app_invoke_entities import ( @@ -48,6 +48,7 @@ WorkflowNodeExecution, WorkflowRun, ) +from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) @@ -95,7 +96,11 @@ def __init__(self, application_generate_entity: WorkflowAppGenerateEntity, self._stream_generate_nodes = self._get_stream_generate_nodes() self._iteration_nested_relations = self._get_iteration_nested_relations(self._workflow.graph_dict) - def process(self) -> Union[WorkflowAppBlockingResponse, Generator[WorkflowAppStreamResponse, None, None]]: + def process( + self, + app_id: Optional[str] = None, + workflow: Optional[Workflow] = None, + ) -> Union[WorkflowAppBlockingResponse, Generator[WorkflowAppStreamResponse, None, None]]: """ Process generate task pipeline. :return: @@ -104,7 +109,7 @@ def process(self) -> Union[WorkflowAppBlockingResponse, Generator[WorkflowAppStr db.session.refresh(self._user) db.session.close() - generator = self._process_stream_response() + generator = self._process_stream_response(app_id, workflow) if self._stream: return self._to_stream_response(generator) else: @@ -158,11 +163,16 @@ def _to_stream_response(self, generator: Generator[StreamResponse, None, None]) stream_response=stream_response ) - def _process_stream_response(self) -> Generator[StreamResponse, None, None]: + def _process_stream_response( + self, + app_id: Optional[str] = None, + workflow: Optional[Workflow] = None, + ) -> Generator[StreamResponse, None, None]: """ Process stream response. :return: """ + tracing_instance = OpsTraceService.get_ops_trace_instance(app_id=app_id, workflow=workflow) for message in self._queue_manager.listen(): event = message.event @@ -215,7 +225,7 @@ def _process_stream_response(self) -> Generator[StreamResponse, None, None]: yield self._handle_iteration_to_stream_response(self._application_generate_entity.task_id, event) self._handle_iteration_operation(event) elif isinstance(event, QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent): - workflow_run = self._handle_workflow_finished(event) + workflow_run = self._handle_workflow_finished(event, tracing_instance) # save workflow app log self._save_workflow_app_log(workflow_run) diff --git a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py index ccb684d84b0c8f..da0731df3eff1f 100644 --- a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py +++ b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py @@ -2,7 +2,7 @@ import logging import time from collections.abc import Generator -from typing import Optional, Union, cast +from typing import Any, Optional, Union, cast from core.app.apps.base_app_queue_manager import AppQueueManager, PublishFrom from core.app.entities.app_invoke_entities import ( @@ -50,6 +50,7 @@ from extensions.ext_database import db from models.account import Account from models.model import AppMode, Conversation, EndUser, Message, MessageAgentThought +from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName logger = logging.getLogger(__name__) @@ -100,7 +101,10 @@ def __init__(self, application_generate_entity: Union[ self._conversation_name_generate_thread = None - def process(self) -> Union[ + def process( + self, + tracing_instance: Optional[Any] = None + ) -> Union[ ChatbotAppBlockingResponse, CompletionAppBlockingResponse, Generator[Union[ChatbotAppStreamResponse, CompletionAppStreamResponse], None, None] @@ -120,7 +124,7 @@ def process(self) -> Union[ self._application_generate_entity.query ) - generator = self._process_stream_response() + generator = self._process_stream_response(tracing_instance) if self._stream: return self._to_stream_response(generator) else: @@ -197,7 +201,9 @@ def _to_stream_response(self, generator: Generator[StreamResponse, None, None]) stream_response=stream_response ) - def _process_stream_response(self) -> Generator[StreamResponse, None, None]: + def _process_stream_response( + self, tracing_instance: Optional[Any] = None + ) -> Generator[StreamResponse, None, None]: """ Process stream response. :return: @@ -224,7 +230,7 @@ def _process_stream_response(self) -> Generator[StreamResponse, None, None]: yield self._message_replace_to_stream_response(answer=output_moderation_answer) # Save message - self._save_message() + self._save_message(tracing_instance) yield self._message_end_to_stream_response() elif isinstance(event, QueueRetrieverResourcesEvent): @@ -269,7 +275,9 @@ def _process_stream_response(self) -> Generator[StreamResponse, None, None]: if self._conversation_name_generate_thread: self._conversation_name_generate_thread.join() - def _save_message(self) -> None: + def _save_message( + self, tracing_instance: Optional[Any] = None, + ) -> None: """ Save message. :return: @@ -300,6 +308,17 @@ def _save_message(self) -> None: db.session.commit() + if tracing_instance: + trace_manager = TraceQueueManager() + trace_manager.add_trace_task( + TraceTask( + tracing_instance, + TraceTaskName.MESSAGE_TRACE, + conversation_id=self._conversation.id, + message_id=self._message.id + ) + ) + message_was_created.send( self._message, application_generate_entity=self._application_generate_entity, diff --git a/api/core/app/task_pipeline/workflow_cycle_manage.py b/api/core/app/task_pipeline/workflow_cycle_manage.py index 978a318279165f..5c0ffbe07bdd5b 100644 --- a/api/core/app/task_pipeline/workflow_cycle_manage.py +++ b/api/core/app/task_pipeline/workflow_cycle_manage.py @@ -1,7 +1,7 @@ import json import time from datetime import datetime, timezone -from typing import Optional, Union, cast +from typing import Any, Optional, Union, cast from core.app.entities.app_invoke_entities import InvokeFrom from core.app.entities.queue_entities import ( @@ -39,6 +39,8 @@ WorkflowRunStatus, WorkflowRunTriggeredFrom, ) +from services.ops_trace.base_trace_instance import BaseTraceInstance +from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName class WorkflowCycleManage(WorkflowIterationCycleManage): @@ -94,11 +96,15 @@ def _init_workflow_run(self, workflow: Workflow, return workflow_run - def _workflow_run_success(self, workflow_run: WorkflowRun, - start_at: float, - total_tokens: int, - total_steps: int, - outputs: Optional[str] = None) -> WorkflowRun: + def _workflow_run_success( + self, workflow_run: WorkflowRun, + start_at: float, + total_tokens: int, + total_steps: int, + outputs: Optional[str] = None, + conversation_id: Optional[str] = None, + tracing_instance: Optional[BaseTraceInstance] = None + ) -> WorkflowRun: """ Workflow run success :param workflow_run: workflow run @@ -106,6 +112,8 @@ def _workflow_run_success(self, workflow_run: WorkflowRun, :param total_tokens: total tokens :param total_steps: total steps :param outputs: outputs + :param conversation_id: conversation id + :param tracing_instance: tracing instance :return: """ workflow_run.status = WorkflowRunStatus.SUCCEEDED.value @@ -119,14 +127,29 @@ def _workflow_run_success(self, workflow_run: WorkflowRun, db.session.refresh(workflow_run) db.session.close() + if tracing_instance: + trace_manager = TraceQueueManager() + trace_manager.add_trace_task( + TraceTask( + tracing_instance, + TraceTaskName.WORKFLOW_TRACE, + workflow_run=workflow_run, + conversation_id=conversation_id, + ) + ) + return workflow_run - def _workflow_run_failed(self, workflow_run: WorkflowRun, - start_at: float, - total_tokens: int, - total_steps: int, - status: WorkflowRunStatus, - error: str) -> WorkflowRun: + def _workflow_run_failed( + self, workflow_run: WorkflowRun, + start_at: float, + total_tokens: int, + total_steps: int, + status: WorkflowRunStatus, + error: str, + conversation_id: Optional[str] = None, + tracing_instance: Optional[Any] = None + ) -> WorkflowRun: """ Workflow run failed :param workflow_run: workflow run @@ -148,6 +171,17 @@ def _workflow_run_failed(self, workflow_run: WorkflowRun, db.session.refresh(workflow_run) db.session.close() + if tracing_instance: + trace_manager = TraceQueueManager() + trace_manager.add_trace_task( + TraceTask( + tracing_instance, + TraceTaskName.WORKFLOW_TRACE, + workflow_run=workflow_run, + conversation_id=conversation_id, + ) + ) + return workflow_run def _init_node_execution_from_workflow_run(self, workflow_run: WorkflowRun, @@ -440,9 +474,9 @@ def _handle_node_finished(self, event: QueueNodeSucceededEvent | QueueNodeFailed current_node_execution = self._task_state.ran_node_execution_infos[event.node_id] workflow_node_execution = db.session.query(WorkflowNodeExecution).filter( WorkflowNodeExecution.id == current_node_execution.workflow_node_execution_id).first() - + execution_metadata = event.execution_metadata if isinstance(event, QueueNodeSucceededEvent) else None - + if self._iteration_state and self._iteration_state.current_iterations: if not execution_metadata: execution_metadata = {} @@ -470,7 +504,7 @@ def _handle_node_finished(self, event: QueueNodeSucceededEvent | QueueNodeFailed if execution_metadata and execution_metadata.get(NodeRunMetadataKey.TOTAL_TOKENS): self._task_state.total_tokens += ( int(execution_metadata.get(NodeRunMetadataKey.TOTAL_TOKENS))) - + if self._iteration_state: for iteration_node_id in self._iteration_state.current_iterations: data = self._iteration_state.current_iterations[iteration_node_id] @@ -496,13 +530,16 @@ def _handle_node_finished(self, event: QueueNodeSucceededEvent | QueueNodeFailed return workflow_node_execution - def _handle_workflow_finished(self, event: QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent) \ - -> Optional[WorkflowRun]: + def _handle_workflow_finished( + self, event: QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent, + tracing_instance: Optional[BaseTraceInstance] = None + ) -> Optional[WorkflowRun]: workflow_run = db.session.query(WorkflowRun).filter( WorkflowRun.id == self._task_state.workflow_run_id).first() if not workflow_run: return None + conversation_id = self._application_generate_entity.inputs.get('sys.conversation_id') if isinstance(event, QueueStopEvent): workflow_run = self._workflow_run_failed( workflow_run=workflow_run, @@ -510,7 +547,8 @@ def _handle_workflow_finished(self, event: QueueStopEvent | QueueWorkflowSucceed total_tokens=self._task_state.total_tokens, total_steps=self._task_state.total_steps, status=WorkflowRunStatus.STOPPED, - error='Workflow stopped.' + error='Workflow stopped.', + conversation_id=conversation_id, ) latest_node_execution_info = self._task_state.latest_node_execution_info @@ -531,7 +569,9 @@ def _handle_workflow_finished(self, event: QueueStopEvent | QueueWorkflowSucceed total_tokens=self._task_state.total_tokens, total_steps=self._task_state.total_steps, status=WorkflowRunStatus.FAILED, - error=event.error + error=event.error, + conversation_id=conversation_id, + tracing_instance=tracing_instance, ) else: if self._task_state.latest_node_execution_info: @@ -546,7 +586,9 @@ def _handle_workflow_finished(self, event: QueueStopEvent | QueueWorkflowSucceed start_at=self._task_state.start_at, total_tokens=self._task_state.total_tokens, total_steps=self._task_state.total_steps, - outputs=outputs + outputs=outputs, + conversation_id=conversation_id, + tracing_instance=tracing_instance, ) self._task_state.workflow_run_id = workflow_run.id diff --git a/api/core/callback_handler/agent_tool_callback_handler.py b/api/core/callback_handler/agent_tool_callback_handler.py index ac5076cd012d0d..d70161f64c1ebf 100644 --- a/api/core/callback_handler/agent_tool_callback_handler.py +++ b/api/core/callback_handler/agent_tool_callback_handler.py @@ -3,6 +3,8 @@ from pydantic import BaseModel +from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName + _TEXT_COLOR_MAPPING = { "blue": "36;1", "yellow": "33;1", @@ -51,6 +53,9 @@ def on_tool_end( tool_name: str, tool_inputs: dict[str, Any], tool_outputs: str, + message_id: Optional[str] = None, + timer: Optional[Any] = None, + tracing_instance: Optional[Any] = None, ) -> None: """If not the final action, print out observation.""" print_text("\n[on_tool_end]\n", color=self.color) @@ -59,6 +64,20 @@ def on_tool_end( print_text("Outputs: " + str(tool_outputs)[:1000] + "\n", color=self.color) print_text("\n") + if tracing_instance: + trace_manager = TraceQueueManager() + trace_manager.add_trace_task( + TraceTask( + tracing_instance, + TraceTaskName.TOOL_TRACE, + message_id=message_id, + tool_name=tool_name, + tool_inputs=tool_inputs, + tool_outputs=tool_outputs, + timer=timer, + ) + ) + def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> None: diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 14de8649c637e7..c705cc754286c7 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -1,5 +1,6 @@ import json import logging +from typing import Optional from core.llm_generator.output_parser.errors import OutputParserException from core.llm_generator.output_parser.rule_config_generator import RuleConfigGeneratorOutputParser @@ -10,11 +11,16 @@ from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError from core.prompt.utils.prompt_template_parser import PromptTemplateParser +from extensions.ext_database import db +from models.model import Conversation +from services.ops_trace.ops_trace_service import OpsTraceService +from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName +from services.ops_trace.utils import measure_time class LLMGenerator: @classmethod - def generate_conversation_name(cls, tenant_id: str, query): + def generate_conversation_name(cls, tenant_id: str, query, conversation_id: Optional[str] = None): prompt = CONVERSATION_TITLE_PROMPT if len(query) > 2000: @@ -29,18 +35,19 @@ def generate_conversation_name(cls, tenant_id: str, query): tenant_id=tenant_id, model_type=ModelType.LLM, ) - prompts = [UserPromptMessage(content=prompt)] - response = model_instance.invoke_llm( - prompt_messages=prompts, - model_parameters={ - "max_tokens": 100, - "temperature": 1 - }, - stream=False - ) - answer = response.message.content + with measure_time() as timer: + response = model_instance.invoke_llm( + prompt_messages=prompts, + model_parameters={ + "max_tokens": 100, + "temperature": 1 + }, + stream=False + ) + + answer = response.message.content result_dict = json.loads(answer) answer = result_dict['Your Output'] name = answer.strip() @@ -48,6 +55,29 @@ def generate_conversation_name(cls, tenant_id: str, query): if len(name) > 75: name = name[:75] + '...' + # get tracing instance + conversation_data: Conversation = db.query(Conversation).filter(Conversation.id == conversation_id).first() + app_id = conversation_data.app_id + app_model_config = OpsTraceService.get_app_config_through_message_id(message_id=conversation_data.message_id) + + tracing_instance = OpsTraceService.get_ops_trace_instance( + app_id=app_id, app_model_config=app_model_config + ) + + if tracing_instance: + trace_manager = TraceQueueManager() + trace_manager.add_trace_task( + TraceTask( + tracing_instance, + TraceTaskName.CONVERSATION_TRACE, + conversation_id=conversation_id, + generate_conversation_name=name, + inputs=prompt, + timer=timer, + tenant_id=tenant_id, + ) + ) + return name @classmethod diff --git a/api/core/moderation/input_moderation.py b/api/core/moderation/input_moderation.py index 8fbc0c2d5003f6..0d915f74fed64d 100644 --- a/api/core/moderation/input_moderation.py +++ b/api/core/moderation/input_moderation.py @@ -3,16 +3,21 @@ from core.app.app_config.entities import AppConfig from core.moderation.base import ModerationAction, ModerationException from core.moderation.factory import ModerationFactory +from services.ops_trace.ops_trace_service import OpsTraceService +from services.ops_trace.utils import measure_time logger = logging.getLogger(__name__) class InputModeration: - def check(self, app_id: str, - tenant_id: str, - app_config: AppConfig, - inputs: dict, - query: str) -> tuple[bool, dict, str]: + def check( + self, app_id: str, + tenant_id: str, + app_config: AppConfig, + inputs: dict, + query: str, + message_id: str, + ) -> tuple[bool, dict, str]: """ Process sensitive_word_avoidance. :param app_id: app id @@ -20,6 +25,7 @@ def check(self, app_id: str, :param app_config: app config :param inputs: inputs :param query: query + :param message_id: message id :return: """ if not app_config.sensitive_word_avoidance: @@ -35,8 +41,30 @@ def check(self, app_id: str, config=sensitive_word_avoidance_config.config ) - moderation_result = moderation_factory.moderation_for_inputs(inputs, query) + with measure_time() as timer: + moderation_result = moderation_factory.moderation_for_inputs(inputs, query) + from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName + + # get tracing instance + app_model_config = OpsTraceService.get_app_config_through_message_id(message_id) + tracing_instance = OpsTraceService.get_ops_trace_instance( + app_id=app_id, app_model_config=app_model_config + ) + + if tracing_instance: + trace_manager = TraceQueueManager() + trace_manager.add_trace_task( + TraceTask( + tracing_instance, + TraceTaskName.MODERATION_TRACE, + message_id=message_id, + moderation_result=moderation_result, + inputs=inputs, + timer=timer + ) + ) + if not moderation_result.flagged: return False, inputs, query diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index 3f50427141c93e..08676e7f9113e1 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -24,6 +24,9 @@ from extensions.ext_database import db from models.dataset import Dataset, DatasetQuery, DocumentSegment from models.dataset import Document as DatasetDocument +from services.ops_trace.ops_trace_service import OpsTraceService +from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName +from services.ops_trace.utils import measure_time default_retrieval_model = { 'search_method': RetrievalMethod.SEMANTIC_SEARCH, @@ -38,14 +41,17 @@ class DatasetRetrieval: - def retrieve(self, app_id: str, user_id: str, tenant_id: str, - model_config: ModelConfigWithCredentialsEntity, - config: DatasetEntity, - query: str, - invoke_from: InvokeFrom, - show_retrieve_source: bool, - hit_callback: DatasetIndexToolCallbackHandler, - memory: Optional[TokenBufferMemory] = None) -> Optional[str]: + def retrieve( + self, app_id: str, user_id: str, tenant_id: str, + model_config: ModelConfigWithCredentialsEntity, + config: DatasetEntity, + query: str, + invoke_from: InvokeFrom, + show_retrieve_source: bool, + hit_callback: DatasetIndexToolCallbackHandler, + message_id: str, + memory: Optional[TokenBufferMemory] = None, + ) -> Optional[str]: """ Retrieve dataset. :param app_id: app_id @@ -57,6 +63,7 @@ def retrieve(self, app_id: str, user_id: str, tenant_id: str, :param invoke_from: invoke from :param show_retrieve_source: show retrieve source :param hit_callback: hit callback + :param message_id: message id :param memory: memory :return: """ @@ -113,15 +120,20 @@ def retrieve(self, app_id: str, user_id: str, tenant_id: str, all_documents = [] user_from = 'account' if invoke_from in [InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER] else 'end_user' if retrieve_config.retrieve_strategy == DatasetRetrieveConfigEntity.RetrieveStrategy.SINGLE: - all_documents = self.single_retrieve(app_id, tenant_id, user_id, user_from, available_datasets, query, - model_instance, - model_config, planning_strategy) + all_documents = self.single_retrieve( + app_id, tenant_id, user_id, user_from, available_datasets, query, + model_instance, + model_config, planning_strategy, message_id + ) elif retrieve_config.retrieve_strategy == DatasetRetrieveConfigEntity.RetrieveStrategy.MULTIPLE: - all_documents = self.multiple_retrieve(app_id, tenant_id, user_id, user_from, - available_datasets, query, retrieve_config.top_k, - retrieve_config.score_threshold, - retrieve_config.reranking_model.get('reranking_provider_name'), - retrieve_config.reranking_model.get('reranking_model_name')) + all_documents = self.multiple_retrieve( + app_id, tenant_id, user_id, user_from, + available_datasets, query, retrieve_config.top_k, + retrieve_config.score_threshold, + retrieve_config.reranking_model.get('reranking_provider_name'), + retrieve_config.reranking_model.get('reranking_model_name'), + message_id, + ) document_score_list = {} for item in all_documents: @@ -189,16 +201,18 @@ def retrieve(self, app_id: str, user_id: str, tenant_id: str, return str("\n".join(document_context_list)) return '' - def single_retrieve(self, app_id: str, - tenant_id: str, - user_id: str, - user_from: str, - available_datasets: list, - query: str, - model_instance: ModelInstance, - model_config: ModelConfigWithCredentialsEntity, - planning_strategy: PlanningStrategy, - ): + def single_retrieve( + self, app_id: str, + tenant_id: str, + user_id: str, + user_from: str, + available_datasets: list, + query: str, + model_instance: ModelInstance, + model_config: ModelConfigWithCredentialsEntity, + planning_strategy: PlanningStrategy, + message_id: Optional[str] = None, + ): tools = [] for dataset in available_datasets: description = dataset.description @@ -251,27 +265,35 @@ def single_retrieve(self, app_id: str, if score_threshold_enabled: score_threshold = retrieval_model_config.get("score_threshold") - results = RetrievalService.retrieve(retrival_method=retrival_method, dataset_id=dataset.id, - query=query, - top_k=top_k, score_threshold=score_threshold, - reranking_model=reranking_model) + with measure_time() as timer: + results = RetrievalService.retrieve( + retrival_method=retrival_method, dataset_id=dataset.id, + query=query, + top_k=top_k, score_threshold=score_threshold, + reranking_model=reranking_model + ) self._on_query(query, [dataset_id], app_id, user_from, user_id) + if results: - self._on_retrival_end(results) + self._on_retrival_end(results, message_id, timer) + return results return [] - def multiple_retrieve(self, - app_id: str, - tenant_id: str, - user_id: str, - user_from: str, - available_datasets: list, - query: str, - top_k: int, - score_threshold: float, - reranking_provider_name: str, - reranking_model_name: str): + def multiple_retrieve( + self, + app_id: str, + tenant_id: str, + user_id: str, + user_from: str, + available_datasets: list, + query: str, + top_k: int, + score_threshold: float, + reranking_provider_name: str, + reranking_model_name: str, + message_id: Optional[str] = None, + ): threads = [] all_documents = [] dataset_ids = [dataset.id for dataset in available_datasets] @@ -297,15 +319,23 @@ def multiple_retrieve(self, ) rerank_runner = RerankRunner(rerank_model_instance) - all_documents = rerank_runner.run(query, all_documents, - score_threshold, - top_k) + + with measure_time() as timer: + all_documents = rerank_runner.run( + query, all_documents, + score_threshold, + top_k + ) self._on_query(query, dataset_ids, app_id, user_from, user_id) + if all_documents: - self._on_retrival_end(all_documents) + self._on_retrival_end(all_documents, message_id, timer) + return all_documents - def _on_retrival_end(self, documents: list[Document]) -> None: + def _on_retrival_end( + self, documents: list[Document], message_id: Optional[str] = None, timer: Optional[dict] = None + ) -> None: """Handle retrival end.""" for document in documents: query = db.session.query(DocumentSegment).filter( @@ -324,6 +354,23 @@ def _on_retrival_end(self, documents: list[Document]) -> None: db.session.commit() + # get tracing instance + app_model_config = OpsTraceService.get_app_config_through_message_id(message_id) + tracing_instance = OpsTraceService.get_ops_trace_instance( + app_id=app_model_config.app_id, app_model_config=app_model_config + ) + if tracing_instance: + trace_manager = TraceQueueManager() + trace_manager.add_trace_task( + TraceTask( + tracing_instance, + TraceTaskName.DATASET_RETRIEVAL_TRACE, + message_id=message_id, + documents=documents, + timer=timer + ) + ) + def _on_query(self, query: str, dataset_ids: list[str], app_id: str, user_from: str, user_id: str) -> None: """ Handle query. diff --git a/api/core/tools/tool/workflow_tool.py b/api/core/tools/tool/workflow_tool.py index 122b663f943be3..2c362d80c47831 100644 --- a/api/core/tools/tool/workflow_tool.py +++ b/api/core/tools/tool/workflow_tool.py @@ -1,7 +1,7 @@ import json import logging from copy import deepcopy -from typing import Any, Union +from typing import Any, Optional, Union from core.file.file_obj import FileTransferMethod, FileVar from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter, ToolProviderType @@ -31,9 +31,10 @@ def tool_provider_type(self) -> ToolProviderType: :return: the tool provider type """ return ToolProviderType.WORKFLOW - - def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) \ - -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: + + def _invoke( + self, user_id: str, tool_parameters: dict[str, Any], tracing_instance: Optional[Any] = None + ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: """ invoke the tool """ @@ -56,6 +57,7 @@ def _invoke(self, user_id: str, tool_parameters: dict[str, Any]) \ invoke_from=self.runtime.invoke_from, stream=False, call_depth=self.workflow_call_depth + 1, + tracing_instance=tracing_instance, ) data = result.get('data', {}) diff --git a/api/core/tools/tool_engine.py b/api/core/tools/tool_engine.py index 16fe9051e3b34a..bf96461cc1447e 100644 --- a/api/core/tools/tool_engine.py +++ b/api/core/tools/tool_engine.py @@ -1,7 +1,7 @@ from copy import deepcopy from datetime import datetime, timezone from mimetypes import guess_type -from typing import Union +from typing import Optional, Union from yarl import URL @@ -24,6 +24,7 @@ from core.tools.utils.message_transformer import ToolFileMessageTransformer from extensions.ext_database import db from models.model import Message, MessageFile +from services.ops_trace.base_trace_instance import BaseTraceInstance class ToolEngine: @@ -31,10 +32,12 @@ class ToolEngine: Tool runtime engine take care of the tool executions. """ @staticmethod - def agent_invoke(tool: Tool, tool_parameters: Union[str, dict], - user_id: str, tenant_id: str, message: Message, invoke_from: InvokeFrom, - agent_tool_callback: DifyAgentCallbackHandler) \ - -> tuple[str, list[tuple[MessageFile, bool]], ToolInvokeMeta]: + def agent_invoke( + tool: Tool, tool_parameters: Union[str, dict], + user_id: str, tenant_id: str, message: Message, invoke_from: InvokeFrom, + agent_tool_callback: DifyAgentCallbackHandler, + tracing_instance: Optional[BaseTraceInstance] = None + ) -> tuple[str, list[tuple[MessageFile, bool]], ToolInvokeMeta]: """ Agent invokes the tool with the given arguments. """ @@ -82,9 +85,11 @@ def agent_invoke(tool: Tool, tool_parameters: Union[str, dict], # hit the callback handler agent_tool_callback.on_tool_end( - tool_name=tool.identity.name, - tool_inputs=tool_parameters, - tool_outputs=plain_text + tool_name=tool.identity.name, + tool_inputs=tool_parameters, + tool_outputs=plain_text, + message_id=message.id, + tracing_instance=tracing_instance, ) # transform tool invoke message to get LLM friendly message @@ -120,8 +125,9 @@ def agent_invoke(tool: Tool, tool_parameters: Union[str, dict], def workflow_invoke(tool: Tool, tool_parameters: dict, user_id: str, workflow_id: str, workflow_tool_callback: DifyWorkflowCallbackHandler, - workflow_call_depth: int) \ - -> list[ToolInvokeMessage]: + workflow_call_depth: int, + tracing_instance: Optional[BaseTraceInstance] = None + ) -> list[ToolInvokeMessage]: """ Workflow invokes the tool with the given arguments. """ @@ -139,9 +145,10 @@ def workflow_invoke(tool: Tool, tool_parameters: dict, # hit the callback handler workflow_tool_callback.on_tool_end( - tool_name=tool.identity.name, - tool_inputs=tool_parameters, - tool_outputs=response + tool_name=tool.identity.name, + tool_inputs=tool_parameters, + tool_outputs=response, + tracing_instance=tracing_instance, ) return response diff --git a/api/core/workflow/nodes/tool/tool_node.py b/api/core/workflow/nodes/tool/tool_node.py index 2a472fc8d2cbf4..42d8648594ad5f 100644 --- a/api/core/workflow/nodes/tool/tool_node.py +++ b/api/core/workflow/nodes/tool/tool_node.py @@ -13,7 +13,9 @@ from core.workflow.nodes.base_node import BaseNode from core.workflow.nodes.tool.entities import ToolNodeData from core.workflow.utils.variable_template_parser import VariableTemplateParser -from models.workflow import WorkflowNodeExecutionStatus +from extensions.ext_database import db +from models.workflow import Workflow, WorkflowNodeExecutionStatus +from services.ops_trace.ops_trace_service import OpsTraceService class ToolNode(BaseNode): @@ -54,6 +56,11 @@ def _run(self, variable_pool: VariablePool) -> NodeRunResult: # get parameters parameters = self._generate_parameters(variable_pool, node_data, tool_runtime) + # get tracing instance + workflow: Workflow = db.session.query(Workflow).filter(Workflow.id == self.workflow_id).first() + app_id = workflow.app_id + tracing_instance = OpsTraceService.get_ops_trace_instance(app_id=app_id, workflow=workflow) + try: messages = ToolEngine.workflow_invoke( tool=tool_runtime, @@ -62,6 +69,7 @@ def _run(self, variable_pool: VariablePool) -> NodeRunResult: workflow_id=self.workflow_id, workflow_tool_callback=DifyWorkflowCallbackHandler(), workflow_call_depth=self.workflow_call_depth, + tracing_instance=tracing_instance ) except Exception as e: return NodeRunResult( diff --git a/api/migrations/versions/04c602f5dc9b_update_appmodelconfig_and_add_table_.py b/api/migrations/versions/04c602f5dc9b_update_appmodelconfig_and_add_table_.py new file mode 100644 index 00000000000000..cffd36a0cd3ee8 --- /dev/null +++ b/api/migrations/versions/04c602f5dc9b_update_appmodelconfig_and_add_table_.py @@ -0,0 +1,49 @@ +"""update AppModelConfig and add table TracingAppConfig + +Revision ID: 04c602f5dc9b +Revises: 4e99a8df00ff +Create Date: 2024-06-12 07:49:07.666510 + +""" +import sqlalchemy as sa +from alembic import op + +import models as models + +# revision identifiers, used by Alembic. +revision = '04c602f5dc9b' +down_revision = '4e99a8df00ff' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('tracing_app_configs', + sa.Column('id', models.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('app_id', models.StringUUID(), nullable=False), + sa.Column('tracing_provider', sa.String(length=255), nullable=True), + sa.Column('tracing_config', sa.JSON(), nullable=True), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), + sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), + sa.PrimaryKeyConstraint('id', name='tracing_app_config_pkey') + ) + with op.batch_alter_table('tracing_app_configs', schema=None) as batch_op: + batch_op.create_index('tracing_app_config_app_id_idx', ['app_id'], unique=False) + + with op.batch_alter_table('app_model_configs', schema=None) as batch_op: + batch_op.add_column(sa.Column('trace_config', sa.Text(), nullable=True)) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('app_model_configs', schema=None) as batch_op: + batch_op.drop_column('trace_config') + + with op.batch_alter_table('tracing_app_configs', schema=None) as batch_op: + batch_op.drop_index('tracing_app_config_app_id_idx') + + op.drop_table('tracing_app_configs') + # ### end Alembic commands ### diff --git a/api/models/model.py b/api/models/model.py index 3024be0b4c86b6..cfa7203a6cf200 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -6,7 +6,7 @@ from flask import current_app, request from flask_login import UserMixin -from sqlalchemy import Float, text +from sqlalchemy import Float, func, text from core.file.tool_file_parser import ToolFileParser from core.file.upload_file_parser import UploadFileParser @@ -233,6 +233,7 @@ class AppModelConfig(db.Model): dataset_configs = db.Column(db.Text) external_data_tools = db.Column(db.Text) file_upload = db.Column(db.Text) + trace_config = db.Column(db.Text) @property def app(self): @@ -1328,3 +1329,36 @@ class TagBinding(db.Model): target_id = db.Column(StringUUID, nullable=True) created_by = db.Column(StringUUID, nullable=False) created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) + + +class TracingAppConfig(db.Model): + __tablename__ = 'tracing_app_configs' + __table_args__ = ( + db.PrimaryKeyConstraint('id', name='tracing_app_config_pkey'), + db.Index('tracing_app_config_app_id_idx', 'app_id'), + ) + + id = db.Column(StringUUID, server_default=db.text('uuid_generate_v4()')) + app_id = db.Column(StringUUID, nullable=False) + tracing_provider = db.Column(db.String(255), nullable=True) + tracing_config = db.Column(db.JSON, nullable=True) + created_at = db.Column(db.DateTime, nullable=False, server_default=func.now()) + updated_at = db.Column(db.DateTime, nullable=False, server_default=func.now(), onupdate=func.now()) + + @property + def tracing_config_dict(self): + return self.tracing_config if self.tracing_config else {} + + @property + def tracing_config_str(self): + return json.dumps(self.tracing_config_dict) + + def to_dict(self): + return { + 'id': self.id, + 'app_id': self.app_id, + 'tracing_provider': self.tracing_provider, + 'tracing_config': self.tracing_config_dict, + "created_at": self.created_at.__str__() if self.created_at else None, + 'updated_at': self.updated_at.__str__() if self.updated_at else None, + } diff --git a/api/pyproject.toml b/api/pyproject.toml index f69ef26beee9f1..8b3f32596367dd 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -185,6 +185,8 @@ tcvectordb = "1.3.2" chromadb = "~0.5.1" tenacity = "~8.3.0" cos-python-sdk-v5 = "1.9.30" +langfuse = "^2.36.1" +langsmith = "^0.1.77" novita-client = "^0.5.6" opensearch-py = "2.4.0" diff --git a/api/services/app_generate_service.py b/api/services/app_generate_service.py index f73a6dcbb686b1..f73a88fdd11451 100644 --- a/api/services/app_generate_service.py +++ b/api/services/app_generate_service.py @@ -18,7 +18,8 @@ def generate(cls, app_model: App, user: Union[Account, EndUser], args: Any, invoke_from: InvokeFrom, - streaming: bool = True) -> Union[dict, Generator[dict, None, None]]: + streaming: bool = True, + ) -> Union[dict, Generator[dict, None, None]]: """ App Content Generate :param app_model: app model diff --git a/api/services/conversation_service.py b/api/services/conversation_service.py index 5c2fb83b7249e5..44a264087cbd69 100644 --- a/api/services/conversation_service.py +++ b/api/services/conversation_service.py @@ -96,7 +96,7 @@ def auto_generate_name(cls, app_model: App, conversation: Conversation): # generate conversation name try: - name = LLMGenerator.generate_conversation_name(app_model.tenant_id, message.query) + name = LLMGenerator.generate_conversation_name(app_model.tenant_id, message.query, conversation.id) conversation.name = name except: pass diff --git a/api/services/message_service.py b/api/services/message_service.py index e826dcc6bf1455..49555e55588a77 100644 --- a/api/services/message_service.py +++ b/api/services/message_service.py @@ -19,6 +19,9 @@ MessageNotExistsError, SuggestedQuestionsAfterAnswerDisabledError, ) +from services.ops_trace.ops_trace_service import OpsTraceService +from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName +from services.ops_trace.utils import measure_time from services.workflow_service import WorkflowService @@ -262,9 +265,27 @@ def get_suggested_questions_after_answer(cls, app_model: App, user: Optional[Uni message_limit=3, ) - questions = LLMGenerator.generate_suggested_questions_after_answer( - tenant_id=app_model.tenant_id, - histories=histories + with measure_time() as timer: + questions = LLMGenerator.generate_suggested_questions_after_answer( + tenant_id=app_model.tenant_id, + histories=histories + ) + + # get tracing instance + app_model_config = OpsTraceService.get_app_config_through_message_id(message_id) + tracing_instance = OpsTraceService.get_ops_trace_instance( + app_id=app_model_config.app_id, app_model_config=app_model_config ) + if tracing_instance: + trace_manager = TraceQueueManager() + trace_manager.add_trace_task( + TraceTask( + tracing_instance, + TraceTaskName.SUGGESTED_QUESTION_TRACE, + message_id=message_id, + suggested_question=questions, + timer=timer + ) + ) return questions diff --git a/api/services/ops_trace/base_trace_instance.py b/api/services/ops_trace/base_trace_instance.py new file mode 100644 index 00000000000000..d785c09b75cd79 --- /dev/null +++ b/api/services/ops_trace/base_trace_instance.py @@ -0,0 +1,31 @@ +from abc import ABC, abstractmethod + + +class BaseTraceInstance(ABC): + @abstractmethod + def __init__(self): + ... + + @abstractmethod + def message_trace(self, **kwargs): + return kwargs + + @abstractmethod + def moderation_trace(self, **kwargs): + return kwargs + + @abstractmethod + def suggested_question_trace(self, **kwargs): + return kwargs + + @abstractmethod + def dataset_retrieval_trace(self, **kwargs): + return kwargs + + @abstractmethod + def tool_trace(self, **kwargs): + return kwargs + + @abstractmethod + def generate_name_trace(self, **kwargs): + return kwargs diff --git a/api/services/ops_trace/langfuse_trace.py b/api/services/ops_trace/langfuse_trace.py new file mode 100644 index 00000000000000..af6badbd96e626 --- /dev/null +++ b/api/services/ops_trace/langfuse_trace.py @@ -0,0 +1,712 @@ +import json +import os +from datetime import datetime, timedelta +from enum import Enum +from typing import Any, Optional, Union + +from langfuse import Langfuse +from pydantic import BaseModel, Field, field_validator +from pydantic_core.core_schema import ValidationInfo + +from core.moderation.base import ModerationInputsResult +from extensions.ext_database import db +from models.dataset import Document +from models.model import Message, MessageAgentThought, MessageFile +from models.workflow import WorkflowNodeExecution, WorkflowRun +from services.ops_trace.base_trace_instance import BaseTraceInstance +from services.ops_trace.utils import filter_none_values + + +def validate_input_output(v, field_name): + """ + Validate input output + :param v: + :param field_name: + :return: + """ + if v == {} or v is None: + return v + if isinstance(v, str): + return {field_name: v} + elif isinstance(v, list): + if len(v) > 0 and isinstance(v[0], dict): + return {"message": v} + else: + return {field_name: v} + return v + + +class LevelEnum(str, Enum): + DEBUG = "DEBUG" + WARNING = "WARNING" + ERROR = "ERROR" + DEFAULT = "DEFAULT" + + +class LangfuseTrace(BaseModel): + """ + Langfuse trace model + """ + id: Optional[str] = Field( + default=None, + description="The id of the trace can be set, defaults to a random id. Used to link traces to external systems " + "or when creating a distributed trace. Traces are upserted on id.", + ) + name: Optional[str] = Field( + default=None, + description="Identifier of the trace. Useful for sorting/filtering in the UI.", + ) + input: Optional[Union[str, dict[str, Any], list, None]] = Field( + default=None, description="The input of the trace. Can be any JSON object." + ) + output: Optional[Union[str, dict[str, Any], list, None]] = Field( + default=None, description="The output of the trace. Can be any JSON object." + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, + description="Additional metadata of the trace. Can be any JSON object. Metadata is merged when being updated " + "via the API.", + ) + user_id: Optional[str] = Field( + default=None, + description="The id of the user that triggered the execution. Used to provide user-level analytics.", + ) + session_id: Optional[str] = Field( + default=None, + description="Used to group multiple traces into a session in Langfuse. Use your own session/thread identifier.", + ) + version: Optional[str] = Field( + default=None, + description="The version of the trace type. Used to understand how changes to the trace type affect metrics. " + "Useful in debugging.", + ) + release: Optional[str] = Field( + default=None, + description="The release identifier of the current deployment. Used to understand how changes of different " + "deployments affect metrics. Useful in debugging.", + ) + tags: Optional[list[str]] = Field( + default=None, + description="Tags are used to categorize or label traces. Traces can be filtered by tags in the UI and GET " + "API. Tags can also be changed in the UI. Tags are merged and never deleted via the API.", + ) + public: Optional[bool] = Field( + default=None, + description="You can make a trace public to share it via a public link. This allows others to view the trace " + "without needing to log in or be members of your Langfuse project.", + ) + + @field_validator("input", "output") + def ensure_dict(cls, v, info: ValidationInfo): + field_name = info.field_name + return validate_input_output(v, field_name) + + +class LangfuseSpan(BaseModel): + """ + Langfuse span model + """ + id: Optional[str] = Field( + default=None, + description="The id of the span can be set, otherwise a random id is generated. Spans are upserted on id.", + ) + session_id: Optional[str] = Field( + default=None, + description="Used to group multiple spans into a session in Langfuse. Use your own session/thread identifier.", + ) + trace_id: Optional[str] = Field( + default=None, + description="The id of the trace the span belongs to. Used to link spans to traces.", + ) + user_id: Optional[str] = Field( + default=None, + description="The id of the user that triggered the execution. Used to provide user-level analytics.", + ) + start_time: Optional[datetime | str] = Field( + default_factory=datetime.now, + description="The time at which the span started, defaults to the current time.", + ) + end_time: Optional[datetime | str] = Field( + default=None, + description="The time at which the span ended. Automatically set by span.end().", + ) + name: Optional[str] = Field( + default=None, + description="Identifier of the span. Useful for sorting/filtering in the UI.", + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, + description="Additional metadata of the span. Can be any JSON object. Metadata is merged when being updated " + "via the API.", + ) + level: Optional[str] = Field( + default=None, + description="The level of the span. Can be DEBUG, DEFAULT, WARNING or ERROR. Used for sorting/filtering of " + "traces with elevated error levels and for highlighting in the UI.", + ) + status_message: Optional[str] = Field( + default=None, + description="The status message of the span. Additional field for context of the event. E.g. the error " + "message of an error event.", + ) + input: Optional[Union[str, dict[str, Any], list, None]] = Field( + default=None, description="The input of the span. Can be any JSON object." + ) + output: Optional[Union[str, dict[str, Any], list, None]] = Field( + default=None, description="The output of the span. Can be any JSON object." + ) + version: Optional[str] = Field( + default=None, + description="The version of the span type. Used to understand how changes to the span type affect metrics. " + "Useful in debugging.", + ) + + @field_validator("input", "output") + def ensure_dict(cls, v, info: ValidationInfo): + field_name = info.field_name + return validate_input_output(v, field_name) + + +class UnitEnum(str, Enum): + CHARACTERS = "CHARACTERS" + TOKENS = "TOKENS" + SECONDS = "SECONDS" + MILLISECONDS = "MILLISECONDS" + IMAGES = "IMAGES" + + +class GenerationUsage(BaseModel): + promptTokens: Optional[int] = None + completionTokens: Optional[int] = None + totalTokens: Optional[int] = None + input: Optional[int] = None + output: Optional[int] = None + total: Optional[int] = None + unit: Optional[UnitEnum] = None + inputCost: Optional[float] = None + outputCost: Optional[float] = None + totalCost: Optional[float] = None + + +class LangfuseGeneration(BaseModel): + id: Optional[str] = Field( + default=None, + description="The id of the generation can be set, defaults to random id.", + ) + trace_id: Optional[str] = Field( + default=None, + description="The id of the trace the generation belongs to. Used to link generations to traces.", + ) + parent_observation_id: Optional[str] = Field( + default=None, + description="The id of the observation the generation belongs to. Used to link generations to observations.", + ) + name: Optional[str] = Field( + default=None, + description="Identifier of the generation. Useful for sorting/filtering in the UI.", + ) + start_time: Optional[datetime | str] = Field( + default_factory=datetime.now, + description="The time at which the generation started, defaults to the current time.", + ) + completion_start_time: Optional[datetime | str] = Field( + default=None, + description="The time at which the completion started (streaming). Set it to get latency analytics broken " + "down into time until completion started and completion duration.", + ) + end_time: Optional[datetime | str] = Field( + default=None, + description="The time at which the generation ended. Automatically set by generation.end().", + ) + model: Optional[str] = Field( + default=None, description="The name of the model used for the generation." + ) + model_parameters: Optional[dict[str, Any]] = Field( + default=None, + description="The parameters of the model used for the generation; can be any key-value pairs.", + ) + input: Optional[Any] = Field( + default=None, + description="The prompt used for the generation. Can be any string or JSON object.", + ) + output: Optional[Any] = Field( + default=None, + description="The completion generated by the model. Can be any string or JSON object.", + ) + usage: Optional[GenerationUsage] = Field( + default=None, + description="The usage object supports the OpenAi structure with tokens and a more generic version with " + "detailed costs and units.", + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, + description="Additional metadata of the generation. Can be any JSON object. Metadata is merged when being " + "updated via the API.", + ) + level: Optional[LevelEnum] = Field( + default=None, + description="The level of the generation. Can be DEBUG, DEFAULT, WARNING or ERROR. Used for sorting/filtering " + "of traces with elevated error levels and for highlighting in the UI.", + ) + status_message: Optional[str] = Field( + default=None, + description="The status message of the generation. Additional field for context of the event. E.g. the error " + "message of an error event.", + ) + version: Optional[str] = Field( + default=None, + description="The version of the generation type. Used to understand how changes to the span type affect " + "metrics. Useful in debugging.", + ) + + @field_validator("input", "output") + def ensure_dict(cls, v, info: ValidationInfo): + field_name = info.field_name + return validate_input_output(v, field_name) + + +class LangFuseDataTrace(BaseTraceInstance): + def __init__( + self, + langfuse_client_public_key: str = None, + langfuse_client_secret_key: str = None, + langfuse_client_host: str = "https://cloud.langfuse.com", + ): + super().__init__() + self.langfuse_client = Langfuse( + public_key=langfuse_client_public_key, + secret_key=langfuse_client_secret_key, + host=langfuse_client_host, + ) + self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") + + def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): + conversion_id = kwargs.get("conversation_id") + workflow_id = workflow_run.workflow_id + tenant_id = workflow_run.tenant_id + workflow_run_id = workflow_run.id + workflow_run_elapsed_time = workflow_run.elapsed_time + workflow_run_status = workflow_run.status + workflow_run_inputs = ( + json.loads(workflow_run.inputs) if workflow_run.inputs else {} + ) + workflow_run_outputs = ( + json.loads(workflow_run.outputs) if workflow_run.outputs else {} + ) + workflow_run_version = workflow_run.version + error = workflow_run.error if workflow_run.error else "" + + total_tokens = workflow_run.total_tokens + + file_list = workflow_run_inputs.get("sys.file") if workflow_run_inputs.get("sys.file") else [] + query = workflow_run_inputs.get("query") or workflow_run_inputs.get("sys.query") or "" + + metadata = { + "workflow_id": workflow_id, + "conversation_id": conversion_id, + "workflow_run_id": workflow_run_id, + "tenant_id": tenant_id, + "elapsed_time": workflow_run_elapsed_time, + "status": workflow_run_status, + "version": workflow_run_version, + "total_tokens": total_tokens, + "file_list": file_list, + } + + trace_data = LangfuseTrace( + id=workflow_run_id, + name=f"workflow_{workflow_run_id}", + user_id=tenant_id, + input=query, + output=workflow_run_outputs, + metadata=metadata, + session_id=conversion_id, + tags=["workflow"], + ) + + self.add_trace(langfuse_trace_data=trace_data) + + # through workflow_run_id get all_nodes_execution + workflow_nodes_executions = ( + db.session.query(WorkflowNodeExecution) + .filter(WorkflowNodeExecution.workflow_run_id == workflow_run_id) + .order_by(WorkflowNodeExecution.created_at) + .all() + ) + + for node_execution in workflow_nodes_executions: + node_execution_id = node_execution.id + tenant_id = node_execution.tenant_id + app_id = node_execution.app_id + node_name = node_execution.title + node_type = node_execution.node_type + status = node_execution.status + inputs = json.loads(node_execution.inputs) if node_execution.inputs else {} + outputs = ( + json.loads(node_execution.outputs) if node_execution.outputs else {} + ) + created_at = node_execution.created_at if node_execution.created_at else datetime.now() + finished_at = node_execution.finished_at if node_execution.finished_at else datetime.now() + metadata = json.loads(node_execution.execution_metadata) if node_execution.execution_metadata else {} + metadata.update( + { + "workflow_run_id": workflow_run_id, + "node_execution_id": node_execution_id, + "tenant_id": tenant_id, + "app_id": app_id, + "node_name": node_name, + "node_type": node_type, + "status": status, + } + ) + + process_data = json.loads(node_execution.process_data) if node_execution.process_data else {} + if process_data and process_data.get("model_mode") == "chat": + # add generation + node_total_tokens = json.loads(node_execution.execution_metadata).get("total_tokens") + generation_usage = GenerationUsage( + totalTokens=node_total_tokens, + ) + + langfuse_generation_data = LangfuseGeneration( + name=f"{node_name}_{node_execution_id}", + trace_id=workflow_run_id, + start_time=created_at, + end_time=finished_at, + input=inputs, + output=outputs, + metadata=metadata, + level=LevelEnum.DEFAULT if status == 'succeeded' else LevelEnum.ERROR, + status_message=error if error else "", + usage=generation_usage, + ) + + self.add_generation(langfuse_generation_data) + + # add span + span_data = LangfuseSpan( + name=f"{node_name}_{node_execution_id}", + input=inputs, + output=outputs, + trace_id=workflow_run_id, + start_time=created_at, + end_time=finished_at, + metadata=metadata, + level=LevelEnum.DEFAULT if status == 'succeeded' else LevelEnum.ERROR, + status_message=error if error else "", + ) + + self.add_span(langfuse_span_data=span_data) + + def message_trace(self, message_id: str, conversation_id: str, **kwargs): + message_data = kwargs.get("message_data") + conversation_mode = kwargs.get("conversation_mode") + message_tokens = message_data.message_tokens + answer_tokens = message_data.answer_tokens + total_tokens = message_tokens + answer_tokens + error = message_data.error if message_data.error else "" + input = message_data.message + file_list = input[0].get("files", []) + provider_response_latency = message_data.provider_response_latency + created_at = message_data.created_at + end_time = created_at + timedelta(seconds=provider_response_latency) + + # get message file data + message_file_data: MessageFile = kwargs.get("message_file_data") + file_url = f"{self.file_base_url}/{message_file_data.url}" if message_file_data else "" + file_list.append(file_url) + + metadata = { + "conversation_id": conversation_id, + "ls_provider": message_data.model_provider, + "ls_model_name": message_data.model_id, + "status": message_data.status, + "from_end_user_id": message_data.from_account_id, + "from_account_id": message_data.from_account_id, + "agent_based": message_data.agent_based, + "workflow_run_id": message_data.workflow_run_id, + "from_source": message_data.from_source, + } + + trace_data = LangfuseTrace( + id=message_id, + user_id=message_data.from_end_user_id if message_data.from_end_user_id else message_data.from_account_id, + name=f"message_{message_id}", + input={ + "message": input, + "files": file_list, + "message_tokens": message_tokens, + "answer_tokens": answer_tokens, + "total_tokens": total_tokens, + "error": error, + "provider_response_latency": provider_response_latency, + "created_at": created_at, + }, + output=message_data.answer, + metadata=metadata, + session_id=conversation_id, + tags=["message", str(conversation_mode)], + ) + self.add_trace(langfuse_trace_data=trace_data) + + # start add span + generation_usage = GenerationUsage( + totalTokens=total_tokens, + input=message_tokens, + output=answer_tokens, + total=total_tokens, + unit=UnitEnum.TOKENS, + ) + + langfuse_generation_data = LangfuseGeneration( + name=f"generation_{message_id}", + trace_id=message_id, + start_time=created_at, + end_time=end_time, + model=message_data.model_id, + input=input, + output=message_data.answer, + metadata=metadata, + level=LevelEnum.DEFAULT if message_data.status != 'error' else LevelEnum.ERROR, + status_message=message_data.error if message_data.error else "", + usage=generation_usage, + ) + + self.add_generation(langfuse_generation_data) + + def moderation_trace(self, message_id: str, moderation_result: ModerationInputsResult, **kwargs): + inputs = kwargs.get("inputs") + message_data = kwargs.get("message_data") + flagged = moderation_result.flagged + action = moderation_result.action + preset_response = moderation_result.preset_response + query = moderation_result.query + timer = kwargs.get("timer") + start_time = timer.get("start") + end_time = timer.get("end") + + metadata = { + "message_id": message_id, + "action": action, + "preset_response": preset_response, + "query": query, + } + + span_data = LangfuseSpan( + name="moderation", + input=inputs, + output={ + "action": action, + "flagged": flagged, + "preset_response": preset_response, + "inputs": inputs, + }, + trace_id=message_id, + start_time=start_time or message_data.created_at, + end_time=end_time or message_data.created_at, + metadata=metadata, + ) + + self.add_span(langfuse_span_data=span_data) + + def suggested_question_trace(self, message_id: str, suggested_question: str, **kwargs): + message_data = kwargs.get("message_data") + timer = kwargs.get("timer") + start_time = timer.get("start") + end_time = timer.get("end") + inputs = message_data.query + + metadata = { + "message_id": message_id, + "ls_provider": message_data.model_provider, + "ls_model_name": message_data.model_id, + "status": message_data.status, + "from_end_user_id": message_data.from_account_id, + "from_account_id": message_data.from_account_id, + "agent_based": message_data.agent_based, + "workflow_run_id": message_data.workflow_run_id, + "from_source": message_data.from_source, + } + + span_data = LangfuseSpan( + name="suggested_question", + input=inputs, + output=suggested_question, + trace_id=message_id, + start_time=start_time, + end_time=end_time, + metadata=metadata, + level=LevelEnum.DEFAULT if message_data.status != 'error' else LevelEnum.ERROR, + status_message=message_data.error if message_data.error else "", + ) + + self.add_span(langfuse_span_data=span_data) + + def dataset_retrieval_trace(self, message_id: str, documents: list[Document], **kwargs): + message_data = kwargs.get("message_data") + inputs = message_data.query if message_data.query else message_data.inputs + metadata = { + "message_id": message_id, + "documents": documents + } + timer = kwargs.get("timer") + start_time = timer.get("start") + end_time = timer.get("end") + + dataset_retrieval_span_data = LangfuseSpan( + name="dataset_retrieval", + input=inputs, + output={"documents": documents}, + trace_id=message_id, + start_time=start_time, + end_time=end_time, + metadata=metadata, + ) + + self.add_span(langfuse_span_data=dataset_retrieval_span_data) + + def tool_trace(self, message_id: str, tool_name: str, tool_inputs: dict[str, Any], tool_outputs: str, **kwargs): + message_data: Message = kwargs.get("message_data") + created_time = message_data.created_at + end_time = message_data.updated_at + tool_config = {} + time_cost = 0 + error = None + tool_parameters = {} + + agent_thoughts: list[MessageAgentThought] = message_data.agent_thoughts + for agent_thought in agent_thoughts: + if tool_name in agent_thought.tools: + created_time = agent_thought.created_at + tool_meta_data = agent_thought.tool_meta.get(tool_name, {}) + tool_config = tool_meta_data.get('tool_config', {}) + time_cost = tool_meta_data.get('time_cost', 0) + end_time = created_time + timedelta(seconds=time_cost) + error = tool_meta_data.get('error', "") + tool_parameters = tool_meta_data.get('tool_parameters', {}) + + metadata = { + "message_id": message_id, + "tool_name": tool_name, + "tool_inputs": tool_inputs, + "tool_outputs": tool_outputs, + "tool_config": tool_config, + "time_cost": time_cost, + "error": error, + "tool_parameters": tool_parameters, + } + + # get message file data + message_file_data: MessageFile = kwargs.get("message_file_data") + if message_file_data: + message_file_id = message_file_data.id if message_file_data else None + type = message_file_data.type + created_by_role = message_file_data.created_by_role + created_user_id = message_file_data.created_by + + metadata.update( + { + "message_file_id": message_file_id, + "created_by_role": created_by_role, + "created_user_id": created_user_id, + "type": type, + } + ) + + tool_span_data = LangfuseSpan( + name=tool_name, + input=tool_inputs, + output=tool_outputs, + trace_id=message_id, + start_time=created_time, + end_time=end_time, + metadata=metadata, + level=LevelEnum.DEFAULT if error == "" else LevelEnum.ERROR, + status_message=error, + ) + + self.add_span(langfuse_span_data=tool_span_data) + + def generate_name_trace(self, conversation_id: str, inputs: str, generate_conversation_name: str, **kwargs): + timer = kwargs.get("timer") + tenant_id = kwargs.get("tenant_id") + start_time = timer.get("start") + end_time = timer.get("end") + + metadata = { + "conversation_id": conversation_id, + } + + name_generation_trace_data = LangfuseTrace( + name="generate_name", + input=inputs, + output=generate_conversation_name, + user_id=tenant_id, + metadata=metadata, + session_id=conversation_id, + ) + + self.add_trace(langfuse_trace_data=name_generation_trace_data) + + name_generation_span_data = LangfuseSpan( + name="generate_name", + input=inputs, + output=generate_conversation_name, + trace_id=conversation_id, + start_time=start_time, + end_time=end_time, + metadata=metadata, + ) + self.add_span(langfuse_span_data=name_generation_span_data) + + def add_trace(self, langfuse_trace_data: Optional[LangfuseTrace] = None): + format_trace_data = ( + filter_none_values(langfuse_trace_data.model_dump()) if langfuse_trace_data else {} + ) + try: + self.langfuse_client.trace(**format_trace_data) + print("LangFuse Trace created successfully") + except Exception as e: + raise f"LangFuse Failed to create trace: {str(e)}" + + def add_span(self, langfuse_span_data: Optional[LangfuseSpan] = None): + format_span_data = ( + filter_none_values(langfuse_span_data.model_dump()) if langfuse_span_data else {} + ) + try: + self.langfuse_client.span(**format_span_data) + print("LangFuse Span created successfully") + except Exception as e: + raise f"LangFuse Failed to create span: {str(e)}" + + def update_span(self, span, langfuse_span_data: Optional[LangfuseSpan] = None): + format_span_data = ( + filter_none_values(langfuse_span_data.model_dump()) if langfuse_span_data else {} + ) + + span.end(**format_span_data) + + def add_generation( + self, langfuse_generation_data: Optional[LangfuseGeneration] = None + ): + format_generation_data = ( + filter_none_values(langfuse_generation_data.model_dump()) + if langfuse_generation_data + else {} + ) + try: + self.langfuse_client.generation(**format_generation_data) + print("LangFuse Generation created successfully") + except Exception as e: + raise f"LangFuse Failed to create generation: {str(e)}" + + def update_generation( + self, generation, langfuse_generation_data: Optional[LangfuseGeneration] = None + ): + format_generation_data = ( + filter_none_values(langfuse_generation_data.model_dump()) + if langfuse_generation_data + else {} + ) + + generation.end(**format_generation_data) diff --git a/api/services/ops_trace/langsmith_trace.py b/api/services/ops_trace/langsmith_trace.py new file mode 100644 index 00000000000000..ef0afdfeb91681 --- /dev/null +++ b/api/services/ops_trace/langsmith_trace.py @@ -0,0 +1,545 @@ +import json +import os +from datetime import datetime, timedelta +from enum import Enum +from typing import Any, Optional, Union + +from langsmith import Client +from pydantic import BaseModel, Field, field_validator +from pydantic_core.core_schema import ValidationInfo + +from core.moderation.base import ModerationInputsResult +from extensions.ext_database import db +from models.dataset import Document +from models.model import Message, MessageAgentThought, MessageFile +from models.workflow import WorkflowNodeExecution, WorkflowRun +from services.ops_trace.base_trace_instance import BaseTraceInstance +from services.ops_trace.utils import filter_none_values + + +class LangSmithRunType(str, Enum): + tool = "tool" + chain = "chain" + llm = "llm" + retriever = "retriever" + embedding = "embedding" + prompt = "prompt" + parser = "parser" + + +class LangSmithTokenUsage(BaseModel): + input_tokens: Optional[int] = None + output_tokens: Optional[int] = None + total_tokens: Optional[int] = None + + +class LangSmithMultiModel(BaseModel): + file_list: Optional[list[str]] = Field(None, description="List of files") + + +class LangSmithRunModel(LangSmithTokenUsage, LangSmithMultiModel): + name: Optional[str] = Field(..., description="Name of the run") + inputs: Optional[Union[str, dict[str, Any], list, None]] = Field(None, description="Inputs of the run") + outputs: Optional[Union[str, dict[str, Any], list, None]] = Field(None, description="Outputs of the run") + run_type: LangSmithRunType = Field(..., description="Type of the run") + start_time: Optional[datetime | str] = Field(None, description="Start time of the run") + end_time: Optional[datetime | str] = Field(None, description="End time of the run") + extra: Optional[dict[str, Any]] = Field( + None, description="Extra information of the run" + ) + error: Optional[str] = Field(None, description="Error message of the run") + serialized: Optional[dict[str, Any]] = Field( + None, description="Serialized data of the run" + ) + parent_run_id: Optional[str] = Field(None, description="Parent run ID") + events: Optional[list[dict[str, Any]]] = Field( + None, description="Events associated with the run" + ) + tags: Optional[list[str]] = Field(None, description="Tags associated with the run") + trace_id: Optional[str] = Field( + None, description="Trace ID associated with the run" + ) + dotted_order: Optional[str] = Field(None, description="Dotted order of the run") + id: Optional[str] = Field(None, description="ID of the run") + session_id: Optional[str] = Field( + None, description="Session ID associated with the run" + ) + session_name: Optional[str] = Field( + None, description="Session name associated with the run" + ) + reference_example_id: Optional[str] = Field( + None, description="Reference example ID associated with the run" + ) + input_attachments: Optional[dict[str, Any]] = Field( + None, description="Input attachments of the run" + ) + output_attachments: Optional[dict[str, Any]] = Field( + None, description="Output attachments of the run" + ) + + @field_validator("inputs", "outputs") + def ensure_dict(cls, v, info: ValidationInfo): + field_name = info.field_name + values = info.data + if v == {} or v is None: + return v + usage_metadata = { + "input_tokens": values.get('input_tokens', 0), + "output_tokens": values.get('output_tokens', 0), + "total_tokens": values.get('total_tokens', 0), + } + file_list = values.get("file_list", []) + if isinstance(v, str): + return { + field_name: v, + "file_list": file_list, + "usage_metadata": usage_metadata, + } + elif isinstance(v, list): + if len(v) > 0 and isinstance(v[0], dict): + data = { + "message": v, + "usage_metadata": usage_metadata, + "file_list": file_list, + } + return data + else: + return {field_name: v} + if isinstance(v, dict): + v["usage_metadata"] = usage_metadata + v["file_list"] = file_list + return v + return v + + @field_validator("start_time", "end_time") + def format_time(cls, v, info: ValidationInfo): + if not isinstance(v, datetime): + raise ValueError(f"{info.field_name} must be a datetime object") + else: + return v.strftime("%Y-%m-%dT%H:%M:%S.%fZ") + + +class LangSmithRunUpdateModel(BaseModel): + run_id: str = Field(..., description="ID of the run") + trace_id: Optional[str] = Field( + None, description="Trace ID associated with the run" + ) + dotted_order: Optional[str] = Field(None, description="Dotted order of the run") + parent_run_id: Optional[str] = Field(None, description="Parent run ID") + end_time: Optional[datetime | str] = Field(None, description="End time of the run") + error: Optional[str] = Field(None, description="Error message of the run") + inputs: Optional[dict[str, Any]] = Field(None, description="Inputs of the run") + outputs: Optional[dict[str, Any]] = Field(None, description="Outputs of the run") + events: Optional[list[dict[str, Any]]] = Field( + None, description="Events associated with the run" + ) + tags: Optional[list[str]] = Field(None, description="Tags associated with the run") + extra: Optional[dict[str, Any]] = Field( + None, description="Extra information of the run" + ) + input_attachments: Optional[dict[str, Any]] = Field( + None, description="Input attachments of the run" + ) + output_attachments: Optional[dict[str, Any]] = Field( + None, description="Output attachments of the run" + ) + + +class LangSmithDataTrace(BaseTraceInstance): + def __init__( + self, + langsmith_key: str = None, + project_name: str = None, + endpoint: str = "https://api.smith.langchain.com" + ): + super().__init__() + self.langsmith_key = langsmith_key + self.project_name = project_name + self.project_id = None + self.langsmith_client = Client( + api_key=langsmith_key, api_url=endpoint + ) + self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") + + def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): + conversion_id = kwargs.get("conversation_id") + workflow_id = workflow_run.workflow_id + tenant_id = workflow_run.tenant_id + workflow_run_id = workflow_run.id + workflow_run_created_at = workflow_run.created_at + workflow_run_finished_at = workflow_run.finished_at + workflow_run_elapsed_time = workflow_run.elapsed_time + workflow_run_status = workflow_run.status + workflow_run_inputs = ( + json.loads(workflow_run.inputs) if workflow_run.inputs else {} + ) + workflow_run_outputs = ( + json.loads(workflow_run.outputs) if workflow_run.outputs else {} + ) + workflow_run_version = workflow_run.version + error = workflow_run.error if workflow_run.error else "" + + total_tokens = workflow_run.total_tokens + + file_list = workflow_run_inputs.get("sys.file") if workflow_run_inputs.get("sys.file") else [] + query = workflow_run_inputs.get("query") or workflow_run_inputs.get("sys.query") or "" + + metadata = { + "workflow_id": workflow_id, + "conversation_id": conversion_id, + "workflow_run_id": workflow_run_id, + "tenant_id": tenant_id, + "elapsed_time": workflow_run_elapsed_time, + "status": workflow_run_status, + "version": workflow_run_version, + "total_tokens": total_tokens, + } + + langsmith_run = LangSmithRunModel( + file_list=file_list, + total_tokens=total_tokens, + id=workflow_run_id, + name=f"workflow_run_{workflow_run_id}", + inputs=query, + run_type=LangSmithRunType.tool, + start_time=workflow_run_created_at, + end_time=workflow_run_finished_at, + outputs=workflow_run_outputs, + extra={ + "metadata": metadata, + }, + error=error, + tags=["workflow"], + ) + + self.add_run(langsmith_run) + + # through workflow_run_id get all_nodes_execution + workflow_nodes_executions = ( + db.session.query(WorkflowNodeExecution) + .filter(WorkflowNodeExecution.workflow_run_id == workflow_run_id) + .order_by(WorkflowNodeExecution.created_at) + .all() + ) + + for node_execution in workflow_nodes_executions: + node_execution_id = node_execution.id + tenant_id = node_execution.tenant_id + app_id = node_execution.app_id + node_name = node_execution.title + node_type = node_execution.node_type + status = node_execution.status + inputs = json.loads(node_execution.inputs) if node_execution.inputs else {} + outputs = ( + json.loads(node_execution.outputs) if node_execution.outputs else {} + ) + created_at = node_execution.created_at if node_execution.created_at else datetime.now() + finished_at = node_execution.finished_at if node_execution.finished_at else datetime.now() + execution_metadata = ( + json.loads(node_execution.execution_metadata) + if node_execution.execution_metadata + else {} + ) + node_total_tokens = execution_metadata.get("total_tokens", 0) + + metadata = json.loads(node_execution.execution_metadata) if node_execution.execution_metadata else {} + metadata.update( + { + "node_execution_id": node_execution_id, + "tenant_id": tenant_id, + "app_id": app_id, + "app_name": node_name, + "node_type": node_type, + "status": status, + } + ) + + process_data = json.loads(node_execution.process_data) if node_execution.process_data else {} + if process_data and process_data.get("model_mode") == "chat": + run_type = LangSmithRunType.llm + elif node_type == "knowledge-retrieval": + run_type = LangSmithRunType.retriever + else: + run_type = LangSmithRunType.tool + + langsmith_run = LangSmithRunModel( + total_tokens=node_total_tokens, + name=f"{node_name}_{node_execution_id}", + inputs=inputs, + run_type=run_type, + start_time=created_at, + end_time=finished_at, + outputs=outputs, + file_list=file_list, + extra={ + "metadata": metadata, + }, + parent_run_id=workflow_run_id, + tags=["node_execution"], + ) + + self.add_run(langsmith_run) + + def message_trace(self, message_id: str, conversation_id: str, **kwargs): + message_data = kwargs.get("message_data") + conversation_mode = kwargs.get("conversation_mode") + message_tokens = message_data.message_tokens + answer_tokens = message_data.answer_tokens + total_tokens = message_tokens + answer_tokens + error = message_data.error if message_data.error else "" + inputs = message_data.message + file_list = inputs[0].get("files", []) + provider_response_latency = message_data.provider_response_latency + created_at = message_data.created_at + end_time = created_at + timedelta(seconds=provider_response_latency) + + # get message file data + message_file_data: MessageFile = kwargs.get("message_file_data") + file_url = f"{self.file_base_url}/{message_file_data.url}" if message_file_data else "" + file_list.append(file_url) + + metadata = { + "conversation_id": conversation_id, + "ls_provider": message_data.model_provider, + "ls_model_name": message_data.model_id, + "status": message_data.status, + "from_end_user_id": message_data.from_account_id, + "from_account_id": message_data.from_account_id, + "agent_based": message_data.agent_based, + "workflow_run_id": message_data.workflow_run_id, + "from_source": message_data.from_source, + } + message_run = LangSmithRunModel( + input_tokens=message_tokens, + output_tokens=answer_tokens, + total_tokens=total_tokens, + id=message_id, + name=f"message_{message_id}", + inputs=inputs, + run_type=LangSmithRunType.llm, + start_time=created_at, + end_time=end_time, + outputs=message_data.answer, + extra={ + "metadata": metadata, + }, + tags=["message", str(conversation_mode)], + error=error, + file_list=file_list, + ) + self.add_run(message_run) + + def moderation_trace(self, message_id: str, moderation_result: ModerationInputsResult, **kwargs): + inputs = kwargs.get("inputs") + message_data = kwargs.get("message_data") + flagged = moderation_result.flagged + action = moderation_result.action + preset_response = moderation_result.preset_response + query = moderation_result.query + timer = kwargs.get("timer") + start_time = timer.get("start") + end_time = timer.get("end") + + metadata = { + "message_id": message_id, + "action": action, + "preset_response": preset_response, + "query": query, + } + + langsmith_run = LangSmithRunModel( + name="moderation", + inputs=inputs, + outputs={ + "action": action, + "flagged": flagged, + "preset_response": preset_response, + "inputs": inputs, + }, + run_type=LangSmithRunType.tool, + extra={ + "metadata": metadata, + }, + tags=["moderation"], + parent_run_id=message_id, + start_time=start_time or message_data.created_at, + end_time=end_time or message_data.updated_at, + ) + + self.add_run(langsmith_run) + + def suggested_question_trace(self, message_id: str, suggested_question: str, **kwargs): + message_data = kwargs.get("message_data") + timer = kwargs.get("timer") + start_time = timer.get("start") + end_time = timer.get("end") + inputs = message_data.query + + metadata = { + "message_id": message_id, + "ls_provider": message_data.model_provider, + "ls_model_name": message_data.model_id, + "status": message_data.status, + "from_end_user_id": message_data.from_account_id, + "from_account_id": message_data.from_account_id, + "agent_based": message_data.agent_based, + "workflow_run_id": message_data.workflow_run_id, + "from_source": message_data.from_source, + } + + suggested_question_run = LangSmithRunModel( + name="suggested_question", + inputs=inputs, + outputs=suggested_question, + run_type=LangSmithRunType.tool, + extra={ + "metadata": metadata, + }, + tags=["suggested_question"], + parent_run_id=message_id, + start_time=start_time or message_data.created_at, + end_time=end_time or message_data.updated_at, + ) + + self.add_run(suggested_question_run) + + def dataset_retrieval_trace(self, message_id: str, documents: list[Document], **kwargs): + message_data = kwargs.get("message_data") + inputs = message_data.query if message_data.query else message_data.inputs + metadata = { + "message_id": message_id, + "documents": documents + } + timer = kwargs.get("timer") + start_time = timer.get("start") + end_time = timer.get("end") + + dataset_retrieval_run = LangSmithRunModel( + name="dataset_retrieval", + inputs=inputs, + outputs={"documents": documents}, + run_type=LangSmithRunType.retriever, + extra={ + "metadata": metadata, + }, + tags=["dataset_retrieval"], + parent_run_id=message_id, + start_time=start_time or message_data.created_at, + end_time=end_time or message_data.updated_at, + ) + + self.add_run(dataset_retrieval_run) + + def tool_trace(self, message_id: str, tool_name: str, tool_inputs: dict[str, Any], tool_outputs: str, **kwargs): + message_data: Message = kwargs.get("message_data") + created_time = message_data.created_at + end_time = message_data.updated_at + tool_config = {} + time_cost = 0 + error = "" + tool_parameters = {} + file_url = "" + + agent_thoughts: list[MessageAgentThought] = message_data.agent_thoughts + for agent_thought in agent_thoughts: + if tool_name in agent_thought.tools: + created_time = agent_thought.created_at + tool_meta_data = agent_thought.tool_meta.get(tool_name, {}) + tool_config = tool_meta_data.get('tool_config', {}) + time_cost = tool_meta_data.get('time_cost', 0) + end_time = created_time + timedelta(seconds=time_cost) + error = tool_meta_data.get('error', "") + tool_parameters = tool_meta_data.get('tool_parameters', {}) + + metadata = { + "message_id": message_id, + "tool_name": tool_name, + "tool_inputs": tool_inputs, + "tool_outputs": tool_outputs, + "tool_config": tool_config, + "time_cost": time_cost, + "error": error, + "tool_parameters": tool_parameters, + } + + # get message file data + message_file_data: MessageFile = kwargs.get("message_file_data") + if message_file_data: + message_file_id = message_file_data.id if message_file_data else None + type = message_file_data.type + created_by_role = message_file_data.created_by_role + created_user_id = message_file_data.created_by + file_url = f"{self.file_base_url}/{message_file_data.url}" + + metadata.update( + { + "message_file_id": message_file_id, + "created_by_role": created_by_role, + "created_user_id": created_user_id, + "type": type, + } + ) + + tool_run = LangSmithRunModel( + name=tool_name, + inputs=tool_inputs, + outputs=tool_outputs, + run_type=LangSmithRunType.tool, + extra={ + "metadata": metadata, + }, + tags=["tool", tool_name], + parent_run_id=message_id, + start_time=created_time, + end_time=end_time, + file_list=[file_url], + ) + + self.add_run(tool_run) + + def generate_name_trace(self, conversation_id: str, inputs: str, generate_conversation_name: str, **kwargs): + timer = kwargs.get("timer") + start_time = timer.get("start") + end_time = timer.get("end") + + metadata = { + "conversation_id": conversation_id, + } + + name_run = LangSmithRunModel( + name="generate_name", + inputs=inputs, + outputs=generate_conversation_name, + run_type=LangSmithRunType.tool, + extra={ + "metadata": metadata, + }, + tags=["generate_name"], + start_time=start_time or datetime.now(), + end_time=end_time or datetime.now(), + ) + + self.add_run(name_run) + + def add_run(self, run_data: LangSmithRunModel): + data = run_data.model_dump() + if self.project_id: + data["session_id"] = self.project_id + elif self.project_name: + data["session_name"] = self.project_name + + data = filter_none_values(data) + try: + self.langsmith_client.create_run(**data) + print("LangSmith Run created successfully.") + except Exception as e: + raise f"LangSmith Failed to create run: {str(e)}" + + def update_run(self, update_run_data: LangSmithRunUpdateModel): + data = update_run_data.model_dump() + data = filter_none_values(data) + try: + self.langsmith_client.update_run(**data) + print("LangSmith Run updated successfully.") + except Exception as e: + raise f"LangSmith Failed to update run: {str(e)}" diff --git a/api/services/ops_trace/ops_trace_service.py b/api/services/ops_trace/ops_trace_service.py new file mode 100644 index 00000000000000..8072ca3948a830 --- /dev/null +++ b/api/services/ops_trace/ops_trace_service.py @@ -0,0 +1,321 @@ +import json +from enum import Enum +from typing import Optional + +from pydantic import BaseModel + +from core.app.app_config.entities import AppAdditionalFeatures +from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token +from extensions.ext_database import db +from models.model import App, AppModelConfig, Conversation, Message, TracingAppConfig +from models.workflow import Workflow +from services.ops_trace.langfuse_trace import LangFuseDataTrace +from services.ops_trace.langsmith_trace import LangSmithDataTrace + + +class TracingProviderEnum(Enum): + LANGFUSE = 'langfuse' + LANGSMITH = 'langSmith' + + +class LangfuseConfig(BaseModel): + """ + Model class for Langfuse tracing config. + """ + public_key: str + secret_key: str + host: str + + +class LangsmithConfig(BaseModel): + """ + Model class for Langsmith tracing config. + """ + api_key: str + project: str + endpoint: str + + +class OpsTraceService: + @classmethod + def get_tracing_app_config(cls, app_id: str, tracing_provider: str): + """ + Get tracing app config + :param app_id: app id + :param tracing_provider: tracing provider + :return: + """ + trace_config_data: TracingAppConfig = db.session.query(TracingAppConfig).filter( + TracingAppConfig.app_id == app_id, TracingAppConfig.tracing_provider == tracing_provider + ).first() + + if not trace_config_data: + return None + + # decrypt_token and obfuscated_token + tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id + decrypt_tracing_config = cls.decrypt_tracing_config(tenant_id, tracing_provider, trace_config_data.tracing_config) + decrypt_tracing_config = cls.obfuscated_decrypt_token(tracing_provider, decrypt_tracing_config) + + trace_config_data.tracing_config = decrypt_tracing_config + + return trace_config_data.to_dict() + + @classmethod + def create_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_config: dict): + """ + Create tracing app config + :param app_id: app id + :param tracing_provider: tracing provider + :param tracing_config: tracing config + :return: + """ + # check if trace config already exists + trace_config_data: TracingAppConfig = db.session.query(TracingAppConfig).filter( + TracingAppConfig.app_id == app_id, TracingAppConfig.tracing_provider == tracing_provider + ).first() + + if trace_config_data: + return None + + # get tenant id + tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id + tracing_config = cls.encrypt_tracing_config(tenant_id, tracing_provider, tracing_config) + trace_config_data = TracingAppConfig( + app_id=app_id, + tracing_provider=tracing_provider, + tracing_config=tracing_config, + ) + db.session.add(trace_config_data) + db.session.commit() + + return trace_config_data.to_dict() + + @classmethod + def update_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_config: dict): + """ + Update tracing app config + :param app_id: app id + :param tracing_provider: tracing provider + :param tracing_config: tracing config + :return: + """ + # check if trace config already exists + trace_config = db.session.query(TracingAppConfig).filter( + TracingAppConfig.app_id == app_id, TracingAppConfig.tracing_provider == tracing_provider + ).first() + + if not trace_config: + return None + + # get tenant id + tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id + tracing_config = cls.encrypt_tracing_config(tenant_id, tracing_provider, tracing_config) + + trace_config.tracing_config = tracing_config + db.session.commit() + + return trace_config.to_dict() + + @classmethod + def encrypt_tracing_config(cls, tenant_id: str, tracing_provider: str, tracing_config: dict): + """ + Encrypt tracing config + :param tenant_id: tenant id + :param tracing_provider: tracing provider + :param tracing_config: tracing config + :return: + """ + if tracing_provider == TracingProviderEnum.LANGFUSE.value: + tracing_config = LangfuseConfig(**tracing_config) + encrypt_public_key = encrypt_token(tenant_id, tracing_config.public_key) + encrypt_secret_key = encrypt_token(tenant_id, tracing_config.secret_key) + tracing_config = LangfuseConfig( + public_key=encrypt_public_key, + secret_key=encrypt_secret_key, + host=tracing_config.host + ) + elif tracing_provider == TracingProviderEnum.LANGSMITH.value: + tracing_config = LangsmithConfig(**tracing_config) + encrypt_api_key = encrypt_token(tenant_id, tracing_config.api_key) + tracing_config = LangsmithConfig( + api_key=encrypt_api_key, + project=tracing_config.project, + endpoint=tracing_config.endpoint + ) + + if isinstance(tracing_config, BaseModel): + return tracing_config.dict() + return tracing_config + + @classmethod + def decrypt_tracing_config(cls, tenant_id: str, tracing_provider: str, tracing_config: dict): + """ + Decrypt tracing config + :param tenant_id: tenant id + :param tracing_provider: tracing provider + :param tracing_config: tracing config + :return: + """ + if tracing_provider == TracingProviderEnum.LANGFUSE.value: + tracing_config = LangfuseConfig(**tracing_config) + decrypt_public_key = decrypt_token(tenant_id, tracing_config.public_key) + decrypt_secret_key = decrypt_token(tenant_id, tracing_config.secret_key) + tracing_config = LangfuseConfig( + public_key=decrypt_public_key, + secret_key=decrypt_secret_key, + host=tracing_config.host + ) + elif tracing_provider == TracingProviderEnum.LANGSMITH.value: + tracing_config = LangsmithConfig(**tracing_config) + decrypt_api_key = decrypt_token(tenant_id, tracing_config.api_key) + tracing_config = LangsmithConfig( + api_key=decrypt_api_key, + project=tracing_config.project, + endpoint=tracing_config.endpoint + ) + + if isinstance(tracing_config, BaseModel): + return tracing_config.dict() + return tracing_config + + @classmethod + def obfuscated_decrypt_token(cls, tracing_provider: str, decrypt_tracing_config:dict): + """ + Decrypt tracing config + :param tracing_provider: tracing provider + :param decrypt_tracing_config: tracing config + :return: + """ + if tracing_provider == TracingProviderEnum.LANGFUSE.value: + decrypt_tracing_config = LangfuseConfig(**decrypt_tracing_config) + decrypt_public_key = decrypt_tracing_config.public_key + decrypt_secret_key = decrypt_tracing_config.secret_key + obfuscated_public_key = obfuscated_token(decrypt_public_key) + obfuscated_secret_key = obfuscated_token(decrypt_secret_key) + decrypt_tracing_config = LangfuseConfig( + public_key=obfuscated_public_key, + secret_key=obfuscated_secret_key, + host=decrypt_tracing_config.host + ) + elif tracing_provider == TracingProviderEnum.LANGSMITH.value: + decrypt_tracing_config = LangsmithConfig(**decrypt_tracing_config) + decrypt_api_key = decrypt_tracing_config.api_key + obfuscated_api_key = obfuscated_token(decrypt_api_key) + decrypt_tracing_config = LangsmithConfig( + api_key=obfuscated_api_key, + project=decrypt_tracing_config.project, + endpoint=decrypt_tracing_config.endpoint + ) + + return decrypt_tracing_config.dict() + + @classmethod + def get_decrypted_tracing_config(cls, app_id: str, tracing_provider: str): + """ + Get decrypted tracing config + :param app_id: app id + :param tracing_provider: tracing provider + :return: + """ + trace_config_data: TracingAppConfig = db.session.query(TracingAppConfig).filter( + TracingAppConfig.app_id == app_id, TracingAppConfig.tracing_provider == tracing_provider + ).first() + + if not trace_config_data: + return None + + # decrypt_token + tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id + decrypt_tracing_config = cls.decrypt_tracing_config( + tenant_id, tracing_provider, trace_config_data.tracing_config + ) + + return decrypt_tracing_config + + @classmethod + def get_ops_trace_instance( + cls, + app_id: str, + workflow: Optional[Workflow] = None, + app_model_config: Optional[AppModelConfig | AppAdditionalFeatures] = None + ): + """ + Get ops trace through model config + :param app_id: app_id + :param workflow: workflow + :param app_model_config: app_model_config + :return: + """ + tracing_instance = None + app_ops_trace_config = None + + # get trace configuration from available sources + if app_model_config is not None: + if isinstance(app_model_config, AppAdditionalFeatures): + app_ops_trace_config = app_model_config.trace_config + elif isinstance(app_model_config, AppModelConfig): + app_ops_trace_config = json.loads( + app_model_config.trace_config + ) if app_model_config.trace_config else None + elif workflow: + features_data = json.loads(workflow.features) + app_ops_trace_config = features_data.get('trace_config') if features_data else None + else: + # As a last resort, fetch from the database + trace_config_data = db.session.query(AppModelConfig.trace_config).filter( + AppModelConfig.app_id == app_id + ).order_by(AppModelConfig.updated_at.desc()).first() + if trace_config_data: + app_ops_trace_config = json.loads(trace_config_data.trace_config) + else: + raise ValueError('Trace config not found') + + if app_ops_trace_config is not None: + tracing_provider = app_ops_trace_config.get('tracing_provider') + else: + return None + + # decrypt_token + decrypt_trace_config = cls.get_decrypted_tracing_config(app_id, tracing_provider) + if app_ops_trace_config.get('enabled'): + tracing_provider = tracing_provider + if tracing_provider == TracingProviderEnum.LANGFUSE.value: + langfuse_client_public_key = decrypt_trace_config.get('public_key') + langfuse_client_secret_key = decrypt_trace_config.get('secret_key') + langfuse_host = decrypt_trace_config.get('host') + tracing_instance = LangFuseDataTrace( + langfuse_client_public_key, + langfuse_client_secret_key, + langfuse_host, + ) + elif tracing_provider == TracingProviderEnum.LANGSMITH.value: + langsmith_api_key = decrypt_trace_config.get('api_key') + langsmith_project = decrypt_trace_config.get('project') + langsmith_endpoint = decrypt_trace_config.get('endpoint') + print(langsmith_api_key, langsmith_project, langsmith_endpoint) + tracing_instance = LangSmithDataTrace( + langsmith_api_key, + langsmith_project, + langsmith_endpoint, + ) + + return tracing_instance + + return None + + @classmethod + def get_app_config_through_message_id(cls, message_id: str): + app_model_config = None + message_data = db.session.query(Message).filter(Message.id == message_id).first() + conversation_id = message_data.conversation_id + conversation_data = db.session.query(Conversation).filter(Conversation.id == conversation_id).first() + + if conversation_data.app_model_config_id: + app_model_config = db.session.query(AppModelConfig).filter( + AppModelConfig.id == conversation_data.app_model_config_id + ).first() + elif conversation_data.app_model_config_id is None and conversation_data.override_model_configs: + app_model_config = conversation_data.override_model_configs + + return app_model_config diff --git a/api/services/ops_trace/trace_queue_manager.py b/api/services/ops_trace/trace_queue_manager.py new file mode 100644 index 00000000000000..bb65d7e1a2cde0 --- /dev/null +++ b/api/services/ops_trace/trace_queue_manager.py @@ -0,0 +1,133 @@ +import queue +import threading +from enum import Enum + +from extensions.ext_database import db +from models.model import Conversation, MessageFile +from services.ops_trace.utils import get_message_data + + +class TraceTaskName(str, Enum): + CONVERSATION_TRACE = 'conversation_trace' + WORKFLOW_TRACE = 'workflow_trace' + MESSAGE_TRACE = 'message_trace' + MODERATION_TRACE = 'moderation_trace' + SUGGESTED_QUESTION_TRACE = 'suggested_question_trace' + DATASET_RETRIEVAL_TRACE = 'dataset_retrieval_trace' + TOOL_TRACE = 'tool_trace' + GENERATE_NAME_TRACE = 'generate_name_trace' + + +class TraceTask: + def __init__(self, trace_instance, trace_type, **kwargs): + self.trace_instance = trace_instance + self.trace_type = trace_type + self.kwargs = kwargs + + def execute(self): + method_name, processed_kwargs = self.preprocess() + method = getattr(self.trace_instance, method_name) + method(**processed_kwargs) + + def preprocess(self): + if self.trace_type == TraceTaskName.CONVERSATION_TRACE: + return TraceTaskName.CONVERSATION_TRACE, self.process_conversation_trace(**self.kwargs) + if self.trace_type == TraceTaskName.WORKFLOW_TRACE: + return TraceTaskName.WORKFLOW_TRACE, self.process_workflow_trace(**self.kwargs) + elif self.trace_type == TraceTaskName.MESSAGE_TRACE: + return TraceTaskName.MESSAGE_TRACE, self.process_message_trace(**self.kwargs) + elif self.trace_type == TraceTaskName.MODERATION_TRACE: + return TraceTaskName.MODERATION_TRACE, self.process_moderation_trace(**self.kwargs) + elif self.trace_type == TraceTaskName.SUGGESTED_QUESTION_TRACE: + return TraceTaskName.SUGGESTED_QUESTION_TRACE, self.process_suggested_question_trace(**self.kwargs) + elif self.trace_type == TraceTaskName.DATASET_RETRIEVAL_TRACE: + return TraceTaskName.DATASET_RETRIEVAL_TRACE, self.process_dataset_retrieval_trace(**self.kwargs) + elif self.trace_type == TraceTaskName.TOOL_TRACE: + return TraceTaskName.TOOL_TRACE, self.process_tool_trace(**self.kwargs) + elif self.trace_type == TraceTaskName.GENERATE_NAME_TRACE: + return TraceTaskName.GENERATE_NAME_TRACE, self.process_generate_name_trace(**self.kwargs) + else: + return '', {} + + # process methods for different trace types + def process_conversation_trace(self, **kwargs): + return kwargs + + def process_workflow_trace(self, **kwargs): + return kwargs + + def process_message_trace(self, **kwargs): + message_id = kwargs.get('message_id') + message_data = get_message_data(message_id) + if not message_data: + return {} + message_file_data = db.session.query(MessageFile).filter_by(message_id=message_id).first() + conversation_mode = db.session.query(Conversation.mode).filter_by(id=message_data.conversation_id).first() + conversation_mode = conversation_mode[0] + kwargs['message_data'] = message_data + kwargs['message_file_data'] = message_file_data + kwargs['conversation_mode'] = conversation_mode + return kwargs + + def process_moderation_trace(self, **kwargs): + message_id = kwargs.get('message_id') + message_data = get_message_data(message_id) + if not message_data: + return {} + kwargs['message_data'] = message_data + return kwargs + + def process_suggested_question_trace(self, **kwargs): + message_id = kwargs.get('message_id') + message_data = get_message_data(message_id) + if not message_data: + return {} + kwargs['message_data'] = message_data + return kwargs + + def process_dataset_retrieval_trace(self, **kwargs): + message_id = kwargs.get('message_id') + message_data = get_message_data(message_id) + if not message_data: + return {} + kwargs['message_data'] = message_data + return kwargs + + def process_tool_trace(self, **kwargs): + message_id = kwargs.get('message_id') + message_data = get_message_data(message_id) + if not message_data: + return {} + message_file_data = db.session.query(MessageFile).filter_by(message_id=message_id).first() + kwargs['message_data'] = message_data + kwargs['message_file_data'] = message_file_data + return kwargs + + def process_generate_name_trace(self, **kwargs): + return kwargs + + +class TraceQueueManager: + def __init__(self): + from app import app + self.app = app + self.queue = queue.Queue() + self.is_running = True + self.thread = threading.Thread(target=self.process_queue) + self.thread.start() + + def stop(self): + self.is_running = False + + def process_queue(self): + with self.app.app_context(): + while self.is_running: + try: + task = self.queue.get(timeout=1) + task.execute() + self.queue.task_done() + except queue.Empty: + self.stop() + + def add_trace_task(self, trace_task): + self.queue.put(trace_task) diff --git a/api/services/ops_trace/utils.py b/api/services/ops_trace/utils.py new file mode 100644 index 00000000000000..c556947380d9e5 --- /dev/null +++ b/api/services/ops_trace/utils.py @@ -0,0 +1,28 @@ +from contextlib import contextmanager +from datetime import datetime + +from extensions.ext_database import db +from models.model import Message + + +def filter_none_values(data: dict): + for key, value in data.items(): + if value is None: + continue + if isinstance(value, datetime): + data[key] = value.isoformat() + return {key: value for key, value in data.items() if value is not None} + + +def get_message_data(message_id): + return db.session.query(Message).filter(Message.id == message_id).first() + + +@contextmanager +def measure_time(): + timing_info = {'start': datetime.now(), 'end': None} + try: + yield timing_info + finally: + timing_info['end'] = datetime.now() + print(f"Execution time: {timing_info['end'] - timing_info['start']}") \ No newline at end of file From a40433cf18f7277db0678f49226af6f63de04629 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 17 Jun 2024 10:33:57 +0800 Subject: [PATCH 02/65] feat: add remove tracing app --- api/controllers/console/app/ops_trace.py | 20 ++++++++++++++++++++ api/services/ops_trace/ops_trace_service.py | 21 ++++++++++++++++++++- 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/api/controllers/console/app/ops_trace.py b/api/controllers/console/app/ops_trace.py index c4b3641b14ffec..d421a913ac09e5 100644 --- a/api/controllers/console/app/ops_trace.py +++ b/api/controllers/console/app/ops_trace.py @@ -75,5 +75,25 @@ def put(self, app_id): except Exception as e: raise e + @setup_required + @login_required + @account_initialization_required + def delete(self, app_id): + """Delete an existing trace app configuration""" + parser = reqparse.RequestParser() + parser.add_argument('tracing_provider', type=str, required=True, location='args') + args = parser.parse_args() + + try: + result = OpsTraceService.delete_tracing_app_config( + app_id=app_id, + tracing_provider=args['tracing_provider'] + ) + if not result: + raise TracingConfigNotExist() + return {"result": "success"} + except Exception as e: + raise e + api.add_resource(TraceAppConfigApi, '/apps//trace-config') diff --git a/api/services/ops_trace/ops_trace_service.py b/api/services/ops_trace/ops_trace_service.py index 8072ca3948a830..0b604367d4c907 100644 --- a/api/services/ops_trace/ops_trace_service.py +++ b/api/services/ops_trace/ops_trace_service.py @@ -117,6 +117,26 @@ def update_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c return trace_config.to_dict() + @classmethod + def delete_tracing_app_config(cls, app_id: str, tracing_provider: str): + """ + Delete tracing app config + :param app_id: app id + :param tracing_provider: tracing provider + :return: + """ + trace_config = db.session.query(TracingAppConfig).filter( + TracingAppConfig.app_id == app_id, TracingAppConfig.tracing_provider == tracing_provider + ).first() + + if not trace_config: + return None + + db.session.delete(trace_config) + db.session.commit() + + return True + @classmethod def encrypt_tracing_config(cls, tenant_id: str, tracing_provider: str, tracing_config: dict): """ @@ -279,7 +299,6 @@ def get_ops_trace_instance( # decrypt_token decrypt_trace_config = cls.get_decrypted_tracing_config(app_id, tracing_provider) if app_ops_trace_config.get('enabled'): - tracing_provider = tracing_provider if tracing_provider == TracingProviderEnum.LANGFUSE.value: langfuse_client_public_key = decrypt_trace_config.get('public_key') langfuse_client_secret_key = decrypt_trace_config.get('secret_key') From 4eb0c4d49f0417af4e1498ab935a05581ea24e9f Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 17 Jun 2024 18:09:45 +0800 Subject: [PATCH 03/65] feat: update trace table --- ...9b_update_appmodelconfig_and_add_table_.py | 2 +- .../versions/2a3aebbbf4bb_add_app_tracing.py | 39 +++++++++++ ...9_remove_app_model_config_trace_config_.py | 66 +++++++++++++++++++ api/models/model.py | 8 ++- api/services/ops_trace/ops_trace_service.py | 28 ++++---- 5 files changed, 125 insertions(+), 18 deletions(-) create mode 100644 api/migrations/versions/2a3aebbbf4bb_add_app_tracing.py create mode 100644 api/migrations/versions/c031d46af369_remove_app_model_config_trace_config_.py diff --git a/api/migrations/versions/04c602f5dc9b_update_appmodelconfig_and_add_table_.py b/api/migrations/versions/04c602f5dc9b_update_appmodelconfig_and_add_table_.py index cffd36a0cd3ee8..316a44edce0c55 100644 --- a/api/migrations/versions/04c602f5dc9b_update_appmodelconfig_and_add_table_.py +++ b/api/migrations/versions/04c602f5dc9b_update_appmodelconfig_and_add_table_.py @@ -12,7 +12,7 @@ # revision identifiers, used by Alembic. revision = '04c602f5dc9b' -down_revision = '4e99a8df00ff' +down_revision = '7b45942e39bb' branch_labels = None depends_on = None diff --git a/api/migrations/versions/2a3aebbbf4bb_add_app_tracing.py b/api/migrations/versions/2a3aebbbf4bb_add_app_tracing.py new file mode 100644 index 00000000000000..09ef5e186cd089 --- /dev/null +++ b/api/migrations/versions/2a3aebbbf4bb_add_app_tracing.py @@ -0,0 +1,39 @@ +"""add app tracing + +Revision ID: 2a3aebbbf4bb +Revises: c031d46af369 +Create Date: 2024-06-17 10:08:54.803701 + +""" +import sqlalchemy as sa +from alembic import op + +import models as models + +# revision identifiers, used by Alembic. +revision = '2a3aebbbf4bb' +down_revision = 'c031d46af369' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('apps', schema=None) as batch_op: + batch_op.add_column(sa.Column('tracing', sa.Text(), nullable=True)) + + with op.batch_alter_table('trace_app_config', schema=None) as batch_op: + batch_op.create_index('tracing_app_config_app_id_idx', ['app_id'], unique=False) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('trace_app_config', schema=None) as batch_op: + batch_op.drop_index('tracing_app_config_app_id_idx') + + with op.batch_alter_table('apps', schema=None) as batch_op: + batch_op.drop_column('tracing') + + # ### end Alembic commands ### diff --git a/api/migrations/versions/c031d46af369_remove_app_model_config_trace_config_.py b/api/migrations/versions/c031d46af369_remove_app_model_config_trace_config_.py new file mode 100644 index 00000000000000..8d69fa86eb8487 --- /dev/null +++ b/api/migrations/versions/c031d46af369_remove_app_model_config_trace_config_.py @@ -0,0 +1,66 @@ +"""remove app model config trace config and rename trace app config + +Revision ID: c031d46af369 +Revises: 04c602f5dc9b +Create Date: 2024-06-17 10:01:00.255189 + +""" +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +import models as models + +# revision identifiers, used by Alembic. +revision = 'c031d46af369' +down_revision = '04c602f5dc9b' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('trace_app_config', + sa.Column('id', models.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('app_id', models.StringUUID(), nullable=False), + sa.Column('tracing_provider', sa.String(length=255), nullable=True), + sa.Column('tracing_config', sa.JSON(), nullable=True), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), + sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), + sa.Column('is_active', sa.Boolean(), server_default=sa.text('true'), nullable=False), + sa.PrimaryKeyConstraint('id', name='tracing_app_config_pkey') + ) + with op.batch_alter_table('trace_app_config', schema=None) as batch_op: + batch_op.create_index('tracing_app_config_app_id_idx', ['app_id'], unique=False) + + with op.batch_alter_table('tracing_app_configs', schema=None) as batch_op: + batch_op.drop_index('tracing_app_config_app_id_idx') + + with op.batch_alter_table('app_model_configs', schema=None) as batch_op: + batch_op.drop_column('trace_config') + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('app_model_configs', schema=None) as batch_op: + batch_op.add_column(sa.Column('trace_config', sa.TEXT(), autoincrement=False, nullable=True)) + + op.create_table('tracing_app_configs', + sa.Column('id', sa.UUID(), server_default=sa.text('uuid_generate_v4()'), autoincrement=False, nullable=False), + sa.Column('app_id', sa.UUID(), autoincrement=False, nullable=False), + sa.Column('tracing_provider', sa.VARCHAR(length=255), autoincrement=False, nullable=True), + sa.Column('tracing_config', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True), + sa.Column('created_at', postgresql.TIMESTAMP(), server_default=sa.text('now()'), autoincrement=False, nullable=False), + sa.Column('updated_at', postgresql.TIMESTAMP(), server_default=sa.text('now()'), autoincrement=False, nullable=False), + sa.PrimaryKeyConstraint('id', name='tracing_app_config_pkey') + ) + with op.batch_alter_table('tracing_app_configs', schema=None) as batch_op: + batch_op.create_index('tracing_app_config_app_id_idx', ['app_id'], unique=False) + + with op.batch_alter_table('trace_app_config', schema=None) as batch_op: + batch_op.drop_index('tracing_app_config_app_id_idx') + + op.drop_table('trace_app_config') + # ### end Alembic commands ### diff --git a/api/models/model.py b/api/models/model.py index cfa7203a6cf200..b0e25537a0d038 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -73,6 +73,7 @@ class App(db.Model): is_demo = db.Column(db.Boolean, nullable=False, server_default=db.text('false')) is_public = db.Column(db.Boolean, nullable=False, server_default=db.text('false')) is_universal = db.Column(db.Boolean, nullable=False, server_default=db.text('false')) + tracing = db.Column(db.Text, nullable=True) created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) @@ -233,7 +234,6 @@ class AppModelConfig(db.Model): dataset_configs = db.Column(db.Text) external_data_tools = db.Column(db.Text) file_upload = db.Column(db.Text) - trace_config = db.Column(db.Text) @property def app(self): @@ -1331,8 +1331,8 @@ class TagBinding(db.Model): created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) -class TracingAppConfig(db.Model): - __tablename__ = 'tracing_app_configs' +class TraceAppConfig(db.Model): + __tablename__ = 'trace_app_config' __table_args__ = ( db.PrimaryKeyConstraint('id', name='tracing_app_config_pkey'), db.Index('tracing_app_config_app_id_idx', 'app_id'), @@ -1344,6 +1344,7 @@ class TracingAppConfig(db.Model): tracing_config = db.Column(db.JSON, nullable=True) created_at = db.Column(db.DateTime, nullable=False, server_default=func.now()) updated_at = db.Column(db.DateTime, nullable=False, server_default=func.now(), onupdate=func.now()) + is_active = db.Column(db.Boolean, nullable=False, server_default=db.text('true')) @property def tracing_config_dict(self): @@ -1359,6 +1360,7 @@ def to_dict(self): 'app_id': self.app_id, 'tracing_provider': self.tracing_provider, 'tracing_config': self.tracing_config_dict, + "is_active": self.is_active, "created_at": self.created_at.__str__() if self.created_at else None, 'updated_at': self.updated_at.__str__() if self.updated_at else None, } diff --git a/api/services/ops_trace/ops_trace_service.py b/api/services/ops_trace/ops_trace_service.py index 0b604367d4c907..9cc88a10e4639d 100644 --- a/api/services/ops_trace/ops_trace_service.py +++ b/api/services/ops_trace/ops_trace_service.py @@ -7,7 +7,7 @@ from core.app.app_config.entities import AppAdditionalFeatures from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token from extensions.ext_database import db -from models.model import App, AppModelConfig, Conversation, Message, TracingAppConfig +from models.model import App, AppModelConfig, Conversation, Message, TraceAppConfig from models.workflow import Workflow from services.ops_trace.langfuse_trace import LangFuseDataTrace from services.ops_trace.langsmith_trace import LangSmithDataTrace @@ -15,7 +15,7 @@ class TracingProviderEnum(Enum): LANGFUSE = 'langfuse' - LANGSMITH = 'langSmith' + LANGSMITH = 'langsmith' class LangfuseConfig(BaseModel): @@ -45,8 +45,8 @@ def get_tracing_app_config(cls, app_id: str, tracing_provider: str): :param tracing_provider: tracing provider :return: """ - trace_config_data: TracingAppConfig = db.session.query(TracingAppConfig).filter( - TracingAppConfig.app_id == app_id, TracingAppConfig.tracing_provider == tracing_provider + trace_config_data: TraceAppConfig = db.session.query(TraceAppConfig).filter( + TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider ).first() if not trace_config_data: @@ -71,8 +71,8 @@ def create_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c :return: """ # check if trace config already exists - trace_config_data: TracingAppConfig = db.session.query(TracingAppConfig).filter( - TracingAppConfig.app_id == app_id, TracingAppConfig.tracing_provider == tracing_provider + trace_config_data: TraceAppConfig = db.session.query(TraceAppConfig).filter( + TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider ).first() if trace_config_data: @@ -81,7 +81,7 @@ def create_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c # get tenant id tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id tracing_config = cls.encrypt_tracing_config(tenant_id, tracing_provider, tracing_config) - trace_config_data = TracingAppConfig( + trace_config_data = TraceAppConfig( app_id=app_id, tracing_provider=tracing_provider, tracing_config=tracing_config, @@ -101,8 +101,8 @@ def update_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c :return: """ # check if trace config already exists - trace_config = db.session.query(TracingAppConfig).filter( - TracingAppConfig.app_id == app_id, TracingAppConfig.tracing_provider == tracing_provider + trace_config = db.session.query(TraceAppConfig).filter( + TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider ).first() if not trace_config: @@ -125,8 +125,8 @@ def delete_tracing_app_config(cls, app_id: str, tracing_provider: str): :param tracing_provider: tracing provider :return: """ - trace_config = db.session.query(TracingAppConfig).filter( - TracingAppConfig.app_id == app_id, TracingAppConfig.tracing_provider == tracing_provider + trace_config = db.session.query(TraceAppConfig).filter( + TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider ).first() if not trace_config: @@ -136,7 +136,7 @@ def delete_tracing_app_config(cls, app_id: str, tracing_provider: str): db.session.commit() return True - + @classmethod def encrypt_tracing_config(cls, tenant_id: str, tracing_provider: str, tracing_config: dict): """ @@ -238,8 +238,8 @@ def get_decrypted_tracing_config(cls, app_id: str, tracing_provider: str): :param tracing_provider: tracing provider :return: """ - trace_config_data: TracingAppConfig = db.session.query(TracingAppConfig).filter( - TracingAppConfig.app_id == app_id, TracingAppConfig.tracing_provider == tracing_provider + trace_config_data: TraceAppConfig = db.session.query(TraceAppConfig).filter( + TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider ).first() if not trace_config_data: From 000e855007e34a492537f29611c8af6066f762f4 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 18 Jun 2024 12:34:59 +0800 Subject: [PATCH 04/65] feat: change table struct --- api/controllers/console/app/app.py | 45 +++++++++-- api/core/agent/cot_agent_runner.py | 39 ++++++---- api/core/agent/fc_agent_runner.py | 7 +- .../advanced_chat/generate_task_pipeline.py | 2 +- api/core/app/apps/agent_chat/app_generator.py | 1 - api/core/app/apps/chat/app_generator.py | 1 - api/core/app/apps/completion/app_generator.py | 6 +- .../apps/workflow/generate_task_pipeline.py | 2 +- api/core/llm_generator/llm_generator.py | 3 +- api/core/moderation/input_moderation.py | 3 +- api/core/rag/retrieval/dataset_retrieval.py | 5 +- api/core/workflow/nodes/tool/tool_node.py | 2 +- api/fields/app_fields.py | 1 + api/services/message_service.py | 4 +- api/services/ops_trace/ops_trace_service.py | 74 +++++++++++-------- 15 files changed, 123 insertions(+), 72 deletions(-) diff --git a/api/controllers/console/app/app.py b/api/controllers/console/app/app.py index 082838334ae040..bc9a3c04300311 100644 --- a/api/controllers/console/app/app.py +++ b/api/controllers/console/app/app.py @@ -1,4 +1,3 @@ -import json import uuid from flask_login import current_user @@ -9,17 +8,14 @@ from controllers.console.app.wraps import get_app_model from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check -from core.tools.tool_manager import ToolManager -from core.tools.utils.configuration import ToolParameterConfigurationManager from fields.app_fields import ( app_detail_fields, app_detail_fields_with_site, app_pagination_fields, ) from libs.login import login_required -from models.model import App, AppMode, AppModelConfig from services.app_service import AppService -from services.tag_service import TagService +from services.ops_trace.ops_trace_service import OpsTraceService ALLOW_CREATE_APP_MODES = ['chat', 'agent-chat', 'advanced-chat', 'workflow', 'completion'] @@ -286,6 +282,44 @@ def post(self, app_model): return app_model +class AppTraceApi(Resource): + @setup_required + @login_required + @account_initialization_required + def get(self, app_id): + """Get app trace""" + app_trace_config = OpsTraceService.get_app_tracing_config( + app_id=app_id + ) + if not app_trace_config: + raise BadRequest("Tracing config not found") + + return app_trace_config + + @setup_required + @login_required + @account_initialization_required + def post(self, app_id): + # add app trace + if not current_user.is_admin_or_owner: + raise Forbidden() + parser = reqparse.RequestParser() + parser.add_argument('enabled', type=bool, required=True, location='json') + parser.add_argument('tracing_provider', type=str, required=True, location='json') + args = parser.parse_args() + + try: + OpsTraceService.update_app_tracing_config( + app_id=app_id, + enabled=args['enabled'], + tracing_provider=args['tracing_provider'], + ) + except Exception as e: + raise e + + return {"result": "success"} + + api.add_resource(AppListApi, '/apps') api.add_resource(AppImportApi, '/apps/import') api.add_resource(AppApi, '/apps/') @@ -295,3 +329,4 @@ def post(self, app_model): api.add_resource(AppIconApi, '/apps//icon') api.add_resource(AppSiteStatus, '/apps//site-enable') api.add_resource(AppApiStatus, '/apps//api-enable') +api.add_resource(AppTraceApi, '/apps//trace') diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py index 31d1ec0cdb23df..9f3c7ea3bba9bf 100644 --- a/api/core/agent/cot_agent_runner.py +++ b/api/core/agent/cot_agent_runner.py @@ -1,7 +1,7 @@ import json from abc import ABC, abstractmethod from collections.abc import Generator -from typing import Union, Optional +from typing import Optional, Union from core.agent.base_agent_runner import BaseAgentRunner from core.agent.entities import AgentScratchpadUnit @@ -21,6 +21,7 @@ from core.tools.tool_engine import ToolEngine from models.model import Message from services.ops_trace.base_trace_instance import BaseTraceInstance +from services.ops_trace.ops_trace_service import OpsTraceService class CotAgentRunner(BaseAgentRunner, ABC): @@ -32,7 +33,8 @@ class CotAgentRunner(BaseAgentRunner, ABC): _query: str = None _prompt_messages_tools: list[PromptMessage] = None - def run(self, message: Message, + def run( + self, message: Message, query: str, inputs: dict[str, str], ) -> Union[Generator, LLMResult]: @@ -43,6 +45,12 @@ def run(self, message: Message, self._repack_app_generate_entity(app_generate_entity) self._init_react_state(query) + # get tracing instance + app_id = self.app_config.app_id + tracing_instance = OpsTraceService.get_ops_trace_instance( + app_id=app_id + ) + # check model mode if 'Observation' not in app_generate_entity.model_conf.stop: if app_generate_entity.model_conf.provider not in self._ignore_observation_providers: @@ -212,7 +220,8 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): tool_invoke_response, tool_invoke_meta = self._handle_invoke_action( action=scratchpad.action, tool_instances=tool_instances, - message_file_ids=message_file_ids + message_file_ids=message_file_ids, + tracing_instance=tracing_instance, ) scratchpad.observation = tool_invoke_response scratchpad.agent_response = tool_invoke_response @@ -238,8 +247,7 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): # update prompt tool message for prompt_tool in self._prompt_messages_tools: - self.update_prompt_message_tool( - tool_instances[prompt_tool.name], prompt_tool) + self.update_prompt_message_tool(tool_instances[prompt_tool.name], prompt_tool) iteration_step += 1 @@ -276,12 +284,11 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): message=AssistantPromptMessage( content=final_answer ), - usage=llm_usage['usage'] if llm_usage['usage'] else LLMUsage.empty_usage( - ), + usage=llm_usage['usage'] if llm_usage['usage'] else LLMUsage.empty_usage(), system_fingerprint='' )), PublishFrom.APPLICATION_MANAGER) - def _handle_invoke_action(self, action: AgentScratchpadUnit.Action, + def _handle_invoke_action(self, action: AgentScratchpadUnit.Action, tool_instances: dict[str, Tool], message_file_ids: list[str], tracing_instance: Optional[BaseTraceInstance] = None @@ -300,7 +307,7 @@ def _handle_invoke_action(self, action: AgentScratchpadUnit.Action, if not tool_instance: answer = f"there is not a tool named {tool_call_name}" return answer, ToolInvokeMeta.error_instance(answer) - + if isinstance(tool_call_args, str): try: tool_call_args = json.loads(tool_call_args) @@ -322,8 +329,7 @@ def _handle_invoke_action(self, action: AgentScratchpadUnit.Action, # publish files for message_file, save_as in message_files: if save_as: - self.variables_pool.set_file( - tool_name=tool_call_name, value=message_file.id, name=save_as) + self.variables_pool.set_file(tool_name=tool_call_name, value=message_file.id, name=save_as) # publish message file self.queue_manager.publish(QueueMessageFileEvent( @@ -354,7 +360,7 @@ def _fill_in_inputs_from_external_data_tools(self, instruction: str, inputs: dic continue return instruction - + def _init_react_state(self, query) -> None: """ init agent scratchpad @@ -362,7 +368,7 @@ def _init_react_state(self, query) -> None: self._query = query self._agent_scratchpad = [] self._historic_prompt_messages = self._organize_historic_prompt_messages() - + @abstractmethod def _organize_prompt_messages(self) -> list[PromptMessage]: """ @@ -394,6 +400,13 @@ def _organize_historic_prompt_messages(self, current_session_messages: list[Prom scratchpads: list[AgentScratchpadUnit] = [] current_scratchpad: AgentScratchpadUnit = None + self.history_prompt_messages = AgentHistoryPromptTransform( + model_config=self.model_config, + prompt_messages=current_session_messages or [], + history_messages=self.history_prompt_messages, + memory=self.memory + ).get_prompt() + for message in self.history_prompt_messages: if isinstance(message, AssistantPromptMessage): if not current_scratchpad: diff --git a/api/core/agent/fc_agent_runner.py b/api/core/agent/fc_agent_runner.py index e64722d22ca58d..3ddd43a771602e 100644 --- a/api/core/agent/fc_agent_runner.py +++ b/api/core/agent/fc_agent_runner.py @@ -20,8 +20,7 @@ from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform from core.tools.entities.tool_entities import ToolInvokeMeta from core.tools.tool_engine import ToolEngine -from extensions.ext_database import db -from models.model import AppModelConfig, Message +from models.model import Message from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) @@ -54,10 +53,8 @@ def run(self, # get tracing instance app_id = app_config.app_id - app_model_config_id = app_config.app_model_config_id - app_model_config = db.session.query(AppModelConfig).filter_by(id=app_model_config_id).first() tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_id, app_model_config=app_model_config + app_id=app_id ) def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index 207e962b376efd..0d461528bdc737 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -189,7 +189,7 @@ def _process_stream_response(self, workflow: Optional[Workflow] = None) -> Gener :return: """ app_id = self._conversation.app_id - tracing_instance = OpsTraceService.get_ops_trace_instance(app_id=app_id, workflow=workflow) + tracing_instance = OpsTraceService.get_ops_trace_instance(app_id=app_id) for message in self._queue_manager.listen(): event = message.event diff --git a/api/core/app/apps/agent_chat/app_generator.py b/api/core/app/apps/agent_chat/app_generator.py index ca8fbe138efa37..431a8b88a15d5f 100644 --- a/api/core/app/apps/agent_chat/app_generator.py +++ b/api/core/app/apps/agent_chat/app_generator.py @@ -74,7 +74,6 @@ def generate(self, app_model: App, # get tracing instance tracing_instance = OpsTraceService.get_ops_trace_instance( app_id=app_model.id, - app_model_config=app_model_config, ) # validate override model config diff --git a/api/core/app/apps/chat/app_generator.py b/api/core/app/apps/chat/app_generator.py index e333e3458cbbc8..2dde1331891cc7 100644 --- a/api/core/app/apps/chat/app_generator.py +++ b/api/core/app/apps/chat/app_generator.py @@ -127,7 +127,6 @@ def generate( # get tracing instance tracing_instance = OpsTraceService.get_ops_trace_instance( app_id=app_model.id, - app_model_config=app_model_config, ) # init queue manager diff --git a/api/core/app/apps/completion/app_generator.py b/api/core/app/apps/completion/app_generator.py index fcf00e685594dd..186a11557af565 100644 --- a/api/core/app/apps/completion/app_generator.py +++ b/api/core/app/apps/completion/app_generator.py @@ -117,8 +117,7 @@ def generate(self, app_model: App, # get tracing instance tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_model.id, - app_model_config=app_model_config, + app_id=app_model.id ) # init queue manager @@ -283,8 +282,7 @@ def generate_more_like_this(self, app_model: App, # get tracing instance tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_model.id, - app_model_config=app_model_config, + app_id=app_model.id ) # init queue manager diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index 944c3736a140d1..54bd3fbf0db264 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -172,7 +172,7 @@ def _process_stream_response( Process stream response. :return: """ - tracing_instance = OpsTraceService.get_ops_trace_instance(app_id=app_id, workflow=workflow) + tracing_instance = OpsTraceService.get_ops_trace_instance(app_id=app_id) for message in self._queue_manager.listen(): event = message.event diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index c705cc754286c7..4059f637c58e2a 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -58,10 +58,9 @@ def generate_conversation_name(cls, tenant_id: str, query, conversation_id: Opti # get tracing instance conversation_data: Conversation = db.query(Conversation).filter(Conversation.id == conversation_id).first() app_id = conversation_data.app_id - app_model_config = OpsTraceService.get_app_config_through_message_id(message_id=conversation_data.message_id) tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_id, app_model_config=app_model_config + app_id=app_id ) if tracing_instance: diff --git a/api/core/moderation/input_moderation.py b/api/core/moderation/input_moderation.py index 0d915f74fed64d..c835c97eda0e34 100644 --- a/api/core/moderation/input_moderation.py +++ b/api/core/moderation/input_moderation.py @@ -47,9 +47,8 @@ def check( from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName # get tracing instance - app_model_config = OpsTraceService.get_app_config_through_message_id(message_id) tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_id, app_model_config=app_model_config + app_id=app_id ) if tracing_instance: diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index 08676e7f9113e1..d2d1283b5f9fcf 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -24,6 +24,7 @@ from extensions.ext_database import db from models.dataset import Dataset, DatasetQuery, DocumentSegment from models.dataset import Document as DatasetDocument +from models.model import Message from services.ops_trace.ops_trace_service import OpsTraceService from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName from services.ops_trace.utils import measure_time @@ -355,9 +356,9 @@ def _on_retrival_end( db.session.commit() # get tracing instance - app_model_config = OpsTraceService.get_app_config_through_message_id(message_id) + app_id = db.session.query(Message.app_id).filter(Message.id == message_id).first() tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_model_config.app_id, app_model_config=app_model_config + app_id=app_id ) if tracing_instance: trace_manager = TraceQueueManager() diff --git a/api/core/workflow/nodes/tool/tool_node.py b/api/core/workflow/nodes/tool/tool_node.py index 42d8648594ad5f..45a2e812197f06 100644 --- a/api/core/workflow/nodes/tool/tool_node.py +++ b/api/core/workflow/nodes/tool/tool_node.py @@ -59,7 +59,7 @@ def _run(self, variable_pool: VariablePool) -> NodeRunResult: # get tracing instance workflow: Workflow = db.session.query(Workflow).filter(Workflow.id == self.workflow_id).first() app_id = workflow.app_id - tracing_instance = OpsTraceService.get_ops_trace_instance(app_id=app_id, workflow=workflow) + tracing_instance = OpsTraceService.get_ops_trace_instance(app_id=app_id) try: messages = ToolEngine.workflow_invoke( diff --git a/api/fields/app_fields.py b/api/fields/app_fields.py index e314fa21a38bce..9de578544140f4 100644 --- a/api/fields/app_fields.py +++ b/api/fields/app_fields.py @@ -50,6 +50,7 @@ 'enable_site': fields.Boolean, 'enable_api': fields.Boolean, 'model_config': fields.Nested(model_config_fields, attribute='app_model_config', allow_null=True), + 'tracing': fields.Raw, 'created_at': TimestampField } diff --git a/api/services/message_service.py b/api/services/message_service.py index 49555e55588a77..e43d244d58bdbd 100644 --- a/api/services/message_service.py +++ b/api/services/message_service.py @@ -272,9 +272,9 @@ def get_suggested_questions_after_answer(cls, app_model: App, user: Optional[Uni ) # get tracing instance - app_model_config = OpsTraceService.get_app_config_through_message_id(message_id) + app_id = db.session.query(Message.app_id).filter(Message.id == message_id).first() tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_model_config.app_id, app_model_config=app_model_config + app_id=app_id, ) if tracing_instance: trace_manager = TraceQueueManager() diff --git a/api/services/ops_trace/ops_trace_service.py b/api/services/ops_trace/ops_trace_service.py index 9cc88a10e4639d..e5cf1b57d63db9 100644 --- a/api/services/ops_trace/ops_trace_service.py +++ b/api/services/ops_trace/ops_trace_service.py @@ -27,7 +27,7 @@ class LangfuseConfig(BaseModel): host: str -class LangsmithConfig(BaseModel): +class LangSmithConfig(BaseModel): """ Model class for Langsmith tracing config. """ @@ -156,9 +156,9 @@ def encrypt_tracing_config(cls, tenant_id: str, tracing_provider: str, tracing_c host=tracing_config.host ) elif tracing_provider == TracingProviderEnum.LANGSMITH.value: - tracing_config = LangsmithConfig(**tracing_config) + tracing_config = LangSmithConfig(**tracing_config) encrypt_api_key = encrypt_token(tenant_id, tracing_config.api_key) - tracing_config = LangsmithConfig( + tracing_config = LangSmithConfig( api_key=encrypt_api_key, project=tracing_config.project, endpoint=tracing_config.endpoint @@ -187,9 +187,9 @@ def decrypt_tracing_config(cls, tenant_id: str, tracing_provider: str, tracing_c host=tracing_config.host ) elif tracing_provider == TracingProviderEnum.LANGSMITH.value: - tracing_config = LangsmithConfig(**tracing_config) + tracing_config = LangSmithConfig(**tracing_config) decrypt_api_key = decrypt_token(tenant_id, tracing_config.api_key) - tracing_config = LangsmithConfig( + tracing_config = LangSmithConfig( api_key=decrypt_api_key, project=tracing_config.project, endpoint=tracing_config.endpoint @@ -219,10 +219,10 @@ def obfuscated_decrypt_token(cls, tracing_provider: str, decrypt_tracing_config: host=decrypt_tracing_config.host ) elif tracing_provider == TracingProviderEnum.LANGSMITH.value: - decrypt_tracing_config = LangsmithConfig(**decrypt_tracing_config) + decrypt_tracing_config = LangSmithConfig(**decrypt_tracing_config) decrypt_api_key = decrypt_tracing_config.api_key obfuscated_api_key = obfuscated_token(decrypt_api_key) - decrypt_tracing_config = LangsmithConfig( + decrypt_tracing_config = LangSmithConfig( api_key=obfuscated_api_key, project=decrypt_tracing_config.project, endpoint=decrypt_tracing_config.endpoint @@ -256,9 +256,9 @@ def get_decrypted_tracing_config(cls, app_id: str, tracing_provider: str): @classmethod def get_ops_trace_instance( cls, - app_id: str, + app_id, workflow: Optional[Workflow] = None, - app_model_config: Optional[AppModelConfig | AppAdditionalFeatures] = None + app_model_config: Optional[AppModelConfig | AppAdditionalFeatures] = None, ): """ Get ops trace through model config @@ -268,29 +268,10 @@ def get_ops_trace_instance( :return: """ tracing_instance = None - app_ops_trace_config = None - - # get trace configuration from available sources - if app_model_config is not None: - if isinstance(app_model_config, AppAdditionalFeatures): - app_ops_trace_config = app_model_config.trace_config - elif isinstance(app_model_config, AppModelConfig): - app_ops_trace_config = json.loads( - app_model_config.trace_config - ) if app_model_config.trace_config else None - elif workflow: - features_data = json.loads(workflow.features) - app_ops_trace_config = features_data.get('trace_config') if features_data else None - else: - # As a last resort, fetch from the database - trace_config_data = db.session.query(AppModelConfig.trace_config).filter( - AppModelConfig.app_id == app_id - ).order_by(AppModelConfig.updated_at.desc()).first() - if trace_config_data: - app_ops_trace_config = json.loads(trace_config_data.trace_config) - else: - raise ValueError('Trace config not found') - + app: App = db.session.query(App).filter( + App.id == app_id + ).first() + app_ops_trace_config = json.loads(app.tracing) if app.tracing else None if app_ops_trace_config is not None: tracing_provider = app_ops_trace_config.get('tracing_provider') else: @@ -338,3 +319,32 @@ def get_app_config_through_message_id(cls, message_id: str): app_model_config = conversation_data.override_model_configs return app_model_config + + @classmethod + def update_app_tracing_config(cls, app_id: str, enabled: bool, tracing_provider: str): + """ + Update app tracing config + :param app_id: app id + :param enabled: enabled + :param tracing_provider: tracing provider + :return: + """ + app_config: App = db.session.query(App).filter(App.id == app_id).first() + app_config.tracing = json.dumps( + { + "enabled": enabled, + "tracing_provider": tracing_provider, + } + ) + db.session.commit() + + @classmethod + def get_app_tracing_config(cls, app_id: str): + """ + Get app tracing config + :param app_id: app id + :return: + """ + app: App = db.session.query(App).filter(App.id == app_id).first() + app_trace_config = json.loads(app.tracing) + return app_trace_config From e6def583dde5b2bdd3adb6c042e2a5595b8d0378 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 18 Jun 2024 14:17:18 +0800 Subject: [PATCH 05/65] feat: change TraceAppConfigApi request type patch --- api/controllers/console/app/ops_trace.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/controllers/console/app/ops_trace.py b/api/controllers/console/app/ops_trace.py index d421a913ac09e5..1316fb7cdbc630 100644 --- a/api/controllers/console/app/ops_trace.py +++ b/api/controllers/console/app/ops_trace.py @@ -56,7 +56,7 @@ def post(self, app_id): @setup_required @login_required @account_initialization_required - def put(self, app_id): + def patch(self, app_id): """Update an existing trace app configuration""" parser = reqparse.RequestParser() parser.add_argument('tracing_provider', type=str, required=True, location='json') From 6cfeb1ab4b571871b70de5d050859e11ee7edbd8 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 18 Jun 2024 18:01:42 +0800 Subject: [PATCH 06/65] feat: update default reply when config is none --- api/controllers/console/app/app.py | 2 -- api/controllers/console/app/ops_trace.py | 2 +- ...31d46af369_remove_app_model_config_trace_config_.py | 10 +++++----- api/requirements.txt | 4 +++- api/services/ops_trace/ops_trace_service.py | 5 +++++ 5 files changed, 14 insertions(+), 9 deletions(-) diff --git a/api/controllers/console/app/app.py b/api/controllers/console/app/app.py index bc9a3c04300311..a9e8b5fb4a2d33 100644 --- a/api/controllers/console/app/app.py +++ b/api/controllers/console/app/app.py @@ -291,8 +291,6 @@ def get(self, app_id): app_trace_config = OpsTraceService.get_app_tracing_config( app_id=app_id ) - if not app_trace_config: - raise BadRequest("Tracing config not found") return app_trace_config diff --git a/api/controllers/console/app/ops_trace.py b/api/controllers/console/app/ops_trace.py index 1316fb7cdbc630..382f1ed89b3c9b 100644 --- a/api/controllers/console/app/ops_trace.py +++ b/api/controllers/console/app/ops_trace.py @@ -26,7 +26,7 @@ def get(self, app_id): app_id=app_id, tracing_provider=args['tracing_provider'] ) if not trace_config: - raise TracingConfigNotExist() + return {"has_not_configured": True} return trace_config except Exception as e: raise e diff --git a/api/migrations/versions/c031d46af369_remove_app_model_config_trace_config_.py b/api/migrations/versions/c031d46af369_remove_app_model_config_trace_config_.py index 8d69fa86eb8487..64489b11f50136 100644 --- a/api/migrations/versions/c031d46af369_remove_app_model_config_trace_config_.py +++ b/api/migrations/versions/c031d46af369_remove_app_model_config_trace_config_.py @@ -28,10 +28,10 @@ def upgrade(): sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.Column('is_active', sa.Boolean(), server_default=sa.text('true'), nullable=False), - sa.PrimaryKeyConstraint('id', name='tracing_app_config_pkey') + sa.PrimaryKeyConstraint('id', name='trace_app_config_pkey') # 修改了主键约束名称以避免冲突 ) with op.batch_alter_table('trace_app_config', schema=None) as batch_op: - batch_op.create_index('tracing_app_config_app_id_idx', ['app_id'], unique=False) + batch_op.create_index('trace_app_config_app_id_idx', ['app_id'], unique=False) with op.batch_alter_table('tracing_app_configs', schema=None) as batch_op: batch_op.drop_index('tracing_app_config_app_id_idx') @@ -54,13 +54,13 @@ def downgrade(): sa.Column('tracing_config', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True), sa.Column('created_at', postgresql.TIMESTAMP(), server_default=sa.text('now()'), autoincrement=False, nullable=False), sa.Column('updated_at', postgresql.TIMESTAMP(), server_default=sa.text('now()'), autoincrement=False, nullable=False), - sa.PrimaryKeyConstraint('id', name='tracing_app_config_pkey') + sa.PrimaryKeyConstraint('id', name='trace_app_config_pkey') ) with op.batch_alter_table('tracing_app_configs', schema=None) as batch_op: - batch_op.create_index('tracing_app_config_app_id_idx', ['app_id'], unique=False) + batch_op.create_index('trace_app_config_app_id_idx', ['app_id'], unique=False) with op.batch_alter_table('trace_app_config', schema=None) as batch_op: - batch_op.drop_index('tracing_app_config_app_id_idx') + batch_op.drop_index('trace_app_config_app_id_idx') op.drop_table('trace_app_config') # ### end Alembic commands ### diff --git a/api/requirements.txt b/api/requirements.txt index 5c41ee9725bcda..995fd4acb658f9 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -92,4 +92,6 @@ chromadb~=0.5.1 novita_client~=0.5.6 tenacity~=8.3.0 opensearch-py==2.4.0 -cos-python-sdk-v5==1.9.30 \ No newline at end of file +cos-python-sdk-v5==1.9.30 +langfuse==2.36.1 +langsmith==0.1.77 \ No newline at end of file diff --git a/api/services/ops_trace/ops_trace_service.py b/api/services/ops_trace/ops_trace_service.py index e5cf1b57d63db9..e4700dc685fd12 100644 --- a/api/services/ops_trace/ops_trace_service.py +++ b/api/services/ops_trace/ops_trace_service.py @@ -346,5 +346,10 @@ def get_app_tracing_config(cls, app_id: str): :return: """ app: App = db.session.query(App).filter(App.id == app_id).first() + if not app.tracing: + return { + "enabled": False, + "tracing_provider": None + } app_trace_config = json.loads(app.tracing) return app_trace_config From 98e1c30793595377f19c40732f7b23e3ed19cd8f Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Wed, 19 Jun 2024 09:50:29 +0800 Subject: [PATCH 07/65] fix: advanced chat trace error --- api/core/app/apps/advanced_chat/app_generator.py | 1 - 1 file changed, 1 deletion(-) diff --git a/api/core/app/apps/advanced_chat/app_generator.py b/api/core/app/apps/advanced_chat/app_generator.py index a06ae902fed5db..f8a2cb0f64de3c 100644 --- a/api/core/app/apps/advanced_chat/app_generator.py +++ b/api/core/app/apps/advanced_chat/app_generator.py @@ -109,7 +109,6 @@ def generate( application_generate_entity=application_generate_entity, conversation=conversation, stream=stream, - tracing_instance=tracing_instance, ) def single_iteration_generate(self, app_model: App, From 1fc02257b3fd7ecdd6a99bb095556cbeb82d768e Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Wed, 19 Jun 2024 15:48:51 +0800 Subject: [PATCH 08/65] feat: add trace ops api check --- api/controllers/console/app/ops_trace.py | 1 + api/services/ops_trace/langfuse_trace.py | 7 +++ api/services/ops_trace/langsmith_trace.py | 14 +++++- api/services/ops_trace/ops_trace_service.py | 52 ++++++++++++++++++--- 4 files changed, 66 insertions(+), 8 deletions(-) diff --git a/api/controllers/console/app/ops_trace.py b/api/controllers/console/app/ops_trace.py index 382f1ed89b3c9b..c3e9ebac1c2aa4 100644 --- a/api/controllers/console/app/ops_trace.py +++ b/api/controllers/console/app/ops_trace.py @@ -69,6 +69,7 @@ def patch(self, app_id): tracing_provider=args['tracing_provider'], tracing_config=args['tracing_config'] ) + print("==============", result) if not result: raise TracingConfigNotExist() return {"result": "success"} diff --git a/api/services/ops_trace/langfuse_trace.py b/api/services/ops_trace/langfuse_trace.py index af6badbd96e626..6a037b4ad7e20d 100644 --- a/api/services/ops_trace/langfuse_trace.py +++ b/api/services/ops_trace/langfuse_trace.py @@ -710,3 +710,10 @@ def update_generation( ) generation.end(**format_generation_data) + + def api_check(self): + try: + return self.langfuse_client.auth_check() + except Exception as e: + print(f"LangFuse API check failed: {str(e)}") + return False diff --git a/api/services/ops_trace/langsmith_trace.py b/api/services/ops_trace/langsmith_trace.py index ef0afdfeb91681..529ce1b07aaa78 100644 --- a/api/services/ops_trace/langsmith_trace.py +++ b/api/services/ops_trace/langsmith_trace.py @@ -14,7 +14,7 @@ from models.model import Message, MessageAgentThought, MessageFile from models.workflow import WorkflowNodeExecution, WorkflowRun from services.ops_trace.base_trace_instance import BaseTraceInstance -from services.ops_trace.utils import filter_none_values +from services.ops_trace.utils import filter_none_values, replace_text_with_content class LangSmithRunType(str, Enum): @@ -97,6 +97,8 @@ def ensure_dict(cls, v, info: ValidationInfo): } elif isinstance(v, list): if len(v) > 0 and isinstance(v[0], dict): + # rename text to content + replace_text_with_content(data=v) data = { "message": v, "usage_metadata": usage_metadata, @@ -543,3 +545,13 @@ def update_run(self, update_run_data: LangSmithRunUpdateModel): print("LangSmith Run updated successfully.") except Exception as e: raise f"LangSmith Failed to update run: {str(e)}" + + def api_check(self): + try: + random_project_name = f"test_project_{datetime.now().strftime('%Y%m%d%H%M%S')}" + self.langsmith_client.create_project(project_name=random_project_name) + self.langsmith_client.delete_project(project_name=random_project_name) + return True + except Exception as e: + print(f"LangSmith API check failed: {str(e)}") + return False diff --git a/api/services/ops_trace/ops_trace_service.py b/api/services/ops_trace/ops_trace_service.py index e4700dc685fd12..db5c5cc92bf131 100644 --- a/api/services/ops_trace/ops_trace_service.py +++ b/api/services/ops_trace/ops_trace_service.py @@ -1,14 +1,11 @@ import json from enum import Enum -from typing import Optional from pydantic import BaseModel -from core.app.app_config.entities import AppAdditionalFeatures from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token from extensions.ext_database import db from models.model import App, AppModelConfig, Conversation, Message, TraceAppConfig -from models.workflow import Workflow from services.ops_trace.langfuse_trace import LangFuseDataTrace from services.ops_trace.langsmith_trace import LangSmithDataTrace @@ -70,6 +67,13 @@ def create_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c :param tracing_config: tracing config :return: """ + if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, TracingProviderEnum.LANGSMITH.value]: + raise {"error": f"Invalid tracing provider: {tracing_provider}"} + + # api check + if not cls.check_trace_config_is_effective(tracing_config, tracing_provider): + return {"error": "Tracing config is not effective"} + # check if trace config already exists trace_config_data: TraceAppConfig = db.session.query(TraceAppConfig).filter( TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider @@ -100,6 +104,13 @@ def update_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c :param tracing_config: tracing config :return: """ + if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, TracingProviderEnum.LANGSMITH.value]: + raise ValueError(f"Invalid tracing provider: {tracing_provider}") + + # api check + if not cls.check_trace_config_is_effective(tracing_config, tracing_provider): + raise ValueError("Invalid Credentials") + # check if trace config already exists trace_config = db.session.query(TraceAppConfig).filter( TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider @@ -257,14 +268,10 @@ def get_decrypted_tracing_config(cls, app_id: str, tracing_provider: str): def get_ops_trace_instance( cls, app_id, - workflow: Optional[Workflow] = None, - app_model_config: Optional[AppModelConfig | AppAdditionalFeatures] = None, ): """ Get ops trace through model config :param app_id: app_id - :param workflow: workflow - :param app_model_config: app_model_config :return: """ tracing_instance = None @@ -329,7 +336,11 @@ def update_app_tracing_config(cls, app_id: str, enabled: bool, tracing_provider: :param tracing_provider: tracing provider :return: """ + # auth check + if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, TracingProviderEnum.LANGSMITH.value]: + raise ValueError(f"Invalid tracing provider: {tracing_provider}") app_config: App = db.session.query(App).filter(App.id == app_id).first() + app_config.tracing = json.dumps( { "enabled": enabled, @@ -353,3 +364,30 @@ def get_app_tracing_config(cls, app_id: str): } app_trace_config = json.loads(app.tracing) return app_trace_config + + @staticmethod + def check_trace_config_is_effective(tracing_config: dict, tracing_provider: str): + """ + Check trace config is effective + :param tracing_config: tracing config + :param tracing_provider: tracing provider + :return: + """ + if tracing_provider == TracingProviderEnum.LANGFUSE.value: + tracing_config = LangfuseConfig(**tracing_config) + langfuse_trace_instance = LangFuseDataTrace( + tracing_config.public_key, + tracing_config.secret_key, + tracing_config.host, + ) + return langfuse_trace_instance.api_check() + elif tracing_provider == TracingProviderEnum.LANGSMITH.value: + tracing_config = LangSmithConfig(**tracing_config) + langsmith_trace_instance = LangSmithDataTrace( + tracing_config.api_key, + tracing_config.project, + tracing_config.endpoint, + ) + return langsmith_trace_instance.api_check() + else: + raise ValueError(f"Unsupported tracing provider: {tracing_provider}") From a454813d5d682b1649c10e4bc2564db714d748fa Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Wed, 19 Jun 2024 17:41:46 +0800 Subject: [PATCH 09/65] fix: uuid error --- api/core/agent/cot_agent_runner.py | 3 +-- api/core/agent/fc_agent_runner.py | 3 +-- .../apps/advanced_chat/generate_task_pipeline.py | 3 +-- api/core/rag/retrieval/dataset_retrieval.py | 4 +--- api/services/message_service.py | 3 +-- api/services/ops_trace/ops_trace_service.py | 14 +++++++++++++- 6 files changed, 18 insertions(+), 12 deletions(-) diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py index 9f3c7ea3bba9bf..c08adc5c7d5838 100644 --- a/api/core/agent/cot_agent_runner.py +++ b/api/core/agent/cot_agent_runner.py @@ -46,9 +46,8 @@ def run( self._init_react_state(query) # get tracing instance - app_id = self.app_config.app_id tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_id + app_id=self.app_config.app_id ) # check model mode diff --git a/api/core/agent/fc_agent_runner.py b/api/core/agent/fc_agent_runner.py index 3ddd43a771602e..721a8b7f03fc0e 100644 --- a/api/core/agent/fc_agent_runner.py +++ b/api/core/agent/fc_agent_runner.py @@ -52,9 +52,8 @@ def run(self, final_answer = '' # get tracing instance - app_id = app_config.app_id tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_id + app_id=app_config.app_id ) def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index 0d461528bdc737..ec93eb2d5dfada 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -188,8 +188,7 @@ def _process_stream_response(self, workflow: Optional[Workflow] = None) -> Gener Process stream response. :return: """ - app_id = self._conversation.app_id - tracing_instance = OpsTraceService.get_ops_trace_instance(app_id=app_id) + tracing_instance = OpsTraceService.get_ops_trace_instance(app_id=self._conversation.app_id) for message in self._queue_manager.listen(): event = message.event diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index d2d1283b5f9fcf..c1fb14913e8cf1 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -24,7 +24,6 @@ from extensions.ext_database import db from models.dataset import Dataset, DatasetQuery, DocumentSegment from models.dataset import Document as DatasetDocument -from models.model import Message from services.ops_trace.ops_trace_service import OpsTraceService from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName from services.ops_trace.utils import measure_time @@ -356,9 +355,8 @@ def _on_retrival_end( db.session.commit() # get tracing instance - app_id = db.session.query(Message.app_id).filter(Message.id == message_id).first() tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_id + message_id=message_id ) if tracing_instance: trace_manager = TraceQueueManager() diff --git a/api/services/message_service.py b/api/services/message_service.py index e43d244d58bdbd..b5a32c1b770bff 100644 --- a/api/services/message_service.py +++ b/api/services/message_service.py @@ -272,9 +272,8 @@ def get_suggested_questions_after_answer(cls, app_model: App, user: Optional[Uni ) # get tracing instance - app_id = db.session.query(Message.app_id).filter(Message.id == message_id).first() tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_id, + message_id=message_id ) if tracing_instance: trace_manager = TraceQueueManager() diff --git a/api/services/ops_trace/ops_trace_service.py b/api/services/ops_trace/ops_trace_service.py index db5c5cc92bf131..9e9b76006b1ea0 100644 --- a/api/services/ops_trace/ops_trace_service.py +++ b/api/services/ops_trace/ops_trace_service.py @@ -1,5 +1,7 @@ import json from enum import Enum +from typing import Union +from uuid import UUID from pydantic import BaseModel @@ -267,18 +269,28 @@ def get_decrypted_tracing_config(cls, app_id: str, tracing_provider: str): @classmethod def get_ops_trace_instance( cls, - app_id, + app_id: Union[UUID, str] = None, + message_id: str = None ): """ Get ops trace through model config :param app_id: app_id + :param message_id: message_id :return: """ + if message_id: + record: Message = db.session.query(Message).filter(Message.id == message_id).first() + app_id = record.app_id + + if isinstance(app_id, UUID): + app_id = str(app_id) + tracing_instance = None app: App = db.session.query(App).filter( App.id == app_id ).first() app_ops_trace_config = json.loads(app.tracing) if app.tracing else None + if app_ops_trace_config is not None: tracing_provider = app_ops_trace_config.get('tracing_provider') else: From 07531ab94f204b74e11d7261b758d04b83ee6d9b Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Wed, 19 Jun 2024 18:19:19 +0800 Subject: [PATCH 10/65] feat: update workflow trace conversation_id --- api/core/app/task_pipeline/workflow_cycle_manage.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/api/core/app/task_pipeline/workflow_cycle_manage.py b/api/core/app/task_pipeline/workflow_cycle_manage.py index 5c0ffbe07bdd5b..c76c1c5e418e87 100644 --- a/api/core/app/task_pipeline/workflow_cycle_manage.py +++ b/api/core/app/task_pipeline/workflow_cycle_manage.py @@ -532,14 +532,16 @@ def _handle_node_finished(self, event: QueueNodeSucceededEvent | QueueNodeFailed def _handle_workflow_finished( self, event: QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent, - tracing_instance: Optional[BaseTraceInstance] = None + tracing_instance: Optional[BaseTraceInstance] = None, + conversation_id: Optional[str] = None ) -> Optional[WorkflowRun]: workflow_run = db.session.query(WorkflowRun).filter( WorkflowRun.id == self._task_state.workflow_run_id).first() if not workflow_run: return None - conversation_id = self._application_generate_entity.inputs.get('sys.conversation_id') + if conversation_id is None: + conversation_id = self._application_generate_entity.inputs.get('sys.conversation_id') if isinstance(event, QueueStopEvent): workflow_run = self._workflow_run_failed( workflow_run=workflow_run, From ebea85d6a2de148f75714a0379414271d1d16b7f Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Thu, 20 Jun 2024 11:54:51 +0800 Subject: [PATCH 11/65] feat: format input and out --- .../advanced_chat/generate_task_pipeline.py | 4 +- .../apps/workflow/generate_task_pipeline.py | 2 +- api/core/llm_generator/llm_generator.py | 10 +-- api/services/ops_trace/langfuse_trace.py | 44 ++++++++--- api/services/ops_trace/langsmith_trace.py | 76 +++++++++++++++---- api/services/ops_trace/ops_trace_service.py | 10 ++- api/services/ops_trace/utils.py | 17 ++++- 7 files changed, 129 insertions(+), 34 deletions(-) diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index ec93eb2d5dfada..e30c58f0de55a8 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -256,7 +256,9 @@ def _process_stream_response(self, workflow: Optional[Workflow] = None) -> Gener yield self._handle_iteration_to_stream_response(self._application_generate_entity.task_id, event) self._handle_iteration_operation(event) elif isinstance(event, QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent): - workflow_run = self._handle_workflow_finished(event, tracing_instance) + workflow_run = self._handle_workflow_finished( + event, tracing_instance=tracing_instance, conversation_id=self._conversation.id + ) if workflow_run: yield self._workflow_finish_to_stream_response( task_id=self._application_generate_entity.task_id, diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index 54bd3fbf0db264..982fab01a0bdc3 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -225,7 +225,7 @@ def _process_stream_response( yield self._handle_iteration_to_stream_response(self._application_generate_entity.task_id, event) self._handle_iteration_operation(event) elif isinstance(event, QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent): - workflow_run = self._handle_workflow_finished(event, tracing_instance) + workflow_run = self._handle_workflow_finished(event, tracing_instance=tracing_instance) # save workflow app log self._save_workflow_app_log(workflow_run) diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 4059f637c58e2a..808f2a941f143c 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -11,8 +11,6 @@ from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError from core.prompt.utils.prompt_template_parser import PromptTemplateParser -from extensions.ext_database import db -from models.model import Conversation from services.ops_trace.ops_trace_service import OpsTraceService from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName from services.ops_trace.utils import measure_time @@ -56,19 +54,15 @@ def generate_conversation_name(cls, tenant_id: str, query, conversation_id: Opti name = name[:75] + '...' # get tracing instance - conversation_data: Conversation = db.query(Conversation).filter(Conversation.id == conversation_id).first() - app_id = conversation_data.app_id - tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_id + conversation_id=conversation_id ) - if tracing_instance: trace_manager = TraceQueueManager() trace_manager.add_trace_task( TraceTask( tracing_instance, - TraceTaskName.CONVERSATION_TRACE, + TraceTaskName.SUGGESTED_QUESTION_TRACE, conversation_id=conversation_id, generate_conversation_name=name, inputs=prompt, diff --git a/api/services/ops_trace/langfuse_trace.py b/api/services/ops_trace/langfuse_trace.py index 6a037b4ad7e20d..b9e5642cd09dee 100644 --- a/api/services/ops_trace/langfuse_trace.py +++ b/api/services/ops_trace/langfuse_trace.py @@ -14,7 +14,7 @@ from models.model import Message, MessageAgentThought, MessageFile from models.workflow import WorkflowNodeExecution, WorkflowRun from services.ops_trace.base_trace_instance import BaseTraceInstance -from services.ops_trace.utils import filter_none_values +from services.ops_trace.utils import filter_none_values, replace_text_with_content def validate_input_output(v, field_name): @@ -27,12 +27,24 @@ def validate_input_output(v, field_name): if v == {} or v is None: return v if isinstance(v, str): - return {field_name: v} + return [ + { + "role": "assistant" if field_name == "output" else "user", + "content": v, + } + ] elif isinstance(v, list): if len(v) > 0 and isinstance(v[0], dict): - return {"message": v} + v = replace_text_with_content(data=v) + return v else: - return {field_name: v} + return [ + { + "role": "assistant" if field_name == "output" else "user", + "content": str(v), + } + ] + return v @@ -187,6 +199,11 @@ class GenerationUsage(BaseModel): outputCost: Optional[float] = None totalCost: Optional[float] = None + @field_validator("input", "output") + def ensure_dict(cls, v, info: ValidationInfo): + field_name = info.field_name + return validate_input_output(v, field_name) + class LangfuseGeneration(BaseModel): id: Optional[str] = Field( @@ -514,7 +531,7 @@ def suggested_question_trace(self, message_id: str, suggested_question: str, **k timer = kwargs.get("timer") start_time = timer.get("start") end_time = timer.get("end") - inputs = message_data.query + input = message_data.query metadata = { "message_id": message_id, @@ -528,19 +545,28 @@ def suggested_question_trace(self, message_id: str, suggested_question: str, **k "from_source": message_data.from_source, } - span_data = LangfuseSpan( + generation_usage = GenerationUsage( + totalTokens=len(suggested_question), + input=len(input), + output=len(suggested_question), + total=len(suggested_question), + unit=UnitEnum.CHARACTERS, + ) + + generation_data = LangfuseGeneration( name="suggested_question", - input=inputs, - output=suggested_question, + input=input, + output=str(suggested_question), trace_id=message_id, start_time=start_time, end_time=end_time, metadata=metadata, level=LevelEnum.DEFAULT if message_data.status != 'error' else LevelEnum.ERROR, status_message=message_data.error if message_data.error else "", + usage=generation_usage, ) - self.add_span(langfuse_span_data=span_data) + self.add_generation(langfuse_generation_data=generation_data) def dataset_retrieval_trace(self, message_id: str, documents: list[Document], **kwargs): message_data = kwargs.get("message_data") diff --git a/api/services/ops_trace/langsmith_trace.py b/api/services/ops_trace/langsmith_trace.py index 529ce1b07aaa78..e9453138ae00a3 100644 --- a/api/services/ops_trace/langsmith_trace.py +++ b/api/services/ops_trace/langsmith_trace.py @@ -90,23 +90,52 @@ def ensure_dict(cls, v, info: ValidationInfo): } file_list = values.get("file_list", []) if isinstance(v, str): - return { - field_name: v, - "file_list": file_list, - "usage_metadata": usage_metadata, - } + if field_name == "inputs": + return { + "messages": { + "role": "user", + "content": v, + "usage_metadata": usage_metadata, + "file_list": file_list, + }, + } + elif field_name == "outputs": + return { + "choices": { + "role": "ai", + "content": v, + "usage_metadata": usage_metadata, + "file_list": file_list, + }, + } elif isinstance(v, list): + data = {} if len(v) > 0 and isinstance(v[0], dict): # rename text to content - replace_text_with_content(data=v) - data = { - "message": v, - "usage_metadata": usage_metadata, - "file_list": file_list, - } + v = replace_text_with_content(data=v) + if field_name == "inputs": + data = { + "messages": v, + } + elif field_name == "outputs": + data = { + "choices": { + "role": "ai", + "content": v, + "usage_metadata": usage_metadata, + "file_list": file_list, + }, + } return data else: - return {field_name: v} + return { + "choices": { + "role": "ai" if field_name == "outputs" else "user", + "content": str(v), + "usage_metadata": usage_metadata, + "file_list": file_list, + }, + } if isinstance(v, dict): v["usage_metadata"] = usage_metadata v["file_list"] = file_list @@ -318,7 +347,7 @@ def message_trace(self, message_id: str, conversation_id: str, **kwargs): id=message_id, name=f"message_{message_id}", inputs=inputs, - run_type=LangSmithRunType.llm, + run_type=LangSmithRunType.chain, start_time=created_at, end_time=end_time, outputs=message_data.answer, @@ -331,6 +360,27 @@ def message_trace(self, message_id: str, conversation_id: str, **kwargs): ) self.add_run(message_run) + # create llm run parented to message run + llm_run = LangSmithRunModel( + input_tokens=message_tokens, + output_tokens=answer_tokens, + total_tokens=total_tokens, + name=f"llm_{message_id}", + inputs=inputs, + run_type=LangSmithRunType.llm, + start_time=created_at, + end_time=end_time, + outputs=message_data.answer, + extra={ + "metadata": metadata, + }, + parent_run_id=message_id, + tags=["llm", str(conversation_mode)], + error=error, + file_list=file_list, + ) + self.add_run(llm_run) + def moderation_trace(self, message_id: str, moderation_result: ModerationInputsResult, **kwargs): inputs = kwargs.get("inputs") message_data = kwargs.get("message_data") diff --git a/api/services/ops_trace/ops_trace_service.py b/api/services/ops_trace/ops_trace_service.py index 9e9b76006b1ea0..b402f0cc65c04e 100644 --- a/api/services/ops_trace/ops_trace_service.py +++ b/api/services/ops_trace/ops_trace_service.py @@ -270,14 +270,22 @@ def get_decrypted_tracing_config(cls, app_id: str, tracing_provider: str): def get_ops_trace_instance( cls, app_id: Union[UUID, str] = None, - message_id: str = None + message_id: str = None, + conversation_id: str = None ): """ Get ops trace through model config :param app_id: app_id :param message_id: message_id + :param conversation_id: conversation_id :return: """ + if conversation_id: + conversation_data: Conversation = db.session.query(Conversation).filter( + Conversation.id == conversation_id + ).first() + app_id = conversation_data.app_id + if message_id: record: Message = db.session.query(Message).filter(Message.id == message_id).first() app_id = record.app_id diff --git a/api/services/ops_trace/utils.py b/api/services/ops_trace/utils.py index c556947380d9e5..2b12db0f482c6d 100644 --- a/api/services/ops_trace/utils.py +++ b/api/services/ops_trace/utils.py @@ -25,4 +25,19 @@ def measure_time(): yield timing_info finally: timing_info['end'] = datetime.now() - print(f"Execution time: {timing_info['end'] - timing_info['start']}") \ No newline at end of file + print(f"Execution time: {timing_info['end'] - timing_info['start']}") + + +def replace_text_with_content(data): + if isinstance(data, dict): + new_data = {} + for key, value in data.items(): + if key == 'text': + new_data['content'] = value + else: + new_data[key] = replace_text_with_content(value) + return new_data + elif isinstance(data, list): + return [replace_text_with_content(item) for item in data] + else: + return data From 5eb901a03f41888298439b21a71b8c0040d91bad Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Fri, 21 Jun 2024 13:53:22 +0800 Subject: [PATCH 12/65] feat: generate tracing_instance in app_generator --- api/core/agent/cot_agent_runner.py | 5 +--- api/core/agent/fc_agent_runner.py | 5 +--- .../app/apps/advanced_chat/app_generator.py | 12 +++++++--- .../advanced_chat/generate_task_pipeline.py | 6 ++--- api/core/app/apps/agent_chat/app_generator.py | 10 +++++--- api/core/app/apps/agent_chat/app_runner.py | 6 ++--- api/core/app/apps/base_app_runner.py | 1 + api/core/app/apps/chat/app_generator.py | 14 +++++------ api/core/app/apps/chat/app_runner.py | 2 +- api/core/app/apps/completion/app_generator.py | 8 ++++++- api/core/app/apps/completion/app_runner.py | 2 +- .../app/apps/message_based_app_generator.py | 4 ++-- api/core/app/apps/workflow/app_generator.py | 9 ++++++- .../apps/workflow/generate_task_pipeline.py | 7 ++---- api/core/app/entities/app_invoke_entities.py | 3 +++ api/core/moderation/input_moderation.py | 24 ++++++++----------- api/core/rag/retrieval/dataset_retrieval.py | 9 +++---- api/core/tools/tool_engine.py | 2 -- api/core/workflow/nodes/tool/tool_node.py | 10 +------- api/services/message_service.py | 1 + api/services/ops_trace/langfuse_trace.py | 8 ++++--- api/services/ops_trace/langsmith_trace.py | 6 +++-- api/services/ops_trace/ops_trace_service.py | 3 ++- 23 files changed, 82 insertions(+), 75 deletions(-) diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py index c08adc5c7d5838..1f2db3fb7b4168 100644 --- a/api/core/agent/cot_agent_runner.py +++ b/api/core/agent/cot_agent_runner.py @@ -21,7 +21,6 @@ from core.tools.tool_engine import ToolEngine from models.model import Message from services.ops_trace.base_trace_instance import BaseTraceInstance -from services.ops_trace.ops_trace_service import OpsTraceService class CotAgentRunner(BaseAgentRunner, ABC): @@ -46,9 +45,7 @@ def run( self._init_react_state(query) # get tracing instance - tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=self.app_config.app_id - ) + tracing_instance = app_generate_entity.tracing_instance # check model mode if 'Observation' not in app_generate_entity.model_conf.stop: diff --git a/api/core/agent/fc_agent_runner.py b/api/core/agent/fc_agent_runner.py index 721a8b7f03fc0e..6b2997655f1c1b 100644 --- a/api/core/agent/fc_agent_runner.py +++ b/api/core/agent/fc_agent_runner.py @@ -21,7 +21,6 @@ from core.tools.entities.tool_entities import ToolInvokeMeta from core.tools.tool_engine import ToolEngine from models.model import Message -from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) @@ -52,9 +51,7 @@ def run(self, final_answer = '' # get tracing instance - tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_config.app_id - ) + tracing_instance = app_generate_entity.tracing_instance def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): if not final_llm_usage_dict['usage']: diff --git a/api/core/app/apps/advanced_chat/app_generator.py b/api/core/app/apps/advanced_chat/app_generator.py index f8a2cb0f64de3c..8060bf036f3943 100644 --- a/api/core/app/apps/advanced_chat/app_generator.py +++ b/api/core/app/apps/advanced_chat/app_generator.py @@ -3,7 +3,7 @@ import threading import uuid from collections.abc import Generator -from typing import Any, Optional, Union +from typing import Union from flask import Flask, current_app from pydantic import ValidationError @@ -24,6 +24,7 @@ from models.account import Account from models.model import App, Conversation, EndUser, Message from models.workflow import Workflow +from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) @@ -36,7 +37,6 @@ def generate( args: dict, invoke_from: InvokeFrom, stream: bool = True, - tracing_instance: Optional[Any] = None ) -> Union[dict, Generator[dict, None, None]]: """ Generate App response. @@ -87,6 +87,11 @@ def generate( workflow=workflow ) + # get tracing instance + tracing_instance = OpsTraceService.get_ops_trace_instance( + app_id=app_model.id + ) + # init application generate entity application_generate_entity = AdvancedChatAppGenerateEntity( task_id=str(uuid.uuid4()), @@ -98,7 +103,8 @@ def generate( user_id=user.id, stream=stream, invoke_from=invoke_from, - extras=extras + extras=extras, + tracing_instance=tracing_instance ) return self._generate( diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index e30c58f0de55a8..f4c97033d47c1d 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -54,7 +54,6 @@ WorkflowNodeExecution, WorkflowRunStatus, ) -from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) @@ -132,7 +131,7 @@ def process( self._application_generate_entity.query ) - generator = self._process_stream_response(workflow) + generator = self._process_stream_response(self._application_generate_entity.tracing_instance) if self._stream: return self._to_stream_response(generator) else: @@ -183,12 +182,11 @@ def _to_stream_response(self, generator: Generator[StreamResponse, None, None]) stream_response=stream_response ) - def _process_stream_response(self, workflow: Optional[Workflow] = None) -> Generator[StreamResponse, None, None]: + def _process_stream_response(self, tracing_instance) -> Generator[StreamResponse, None, None]: """ Process stream response. :return: """ - tracing_instance = OpsTraceService.get_ops_trace_instance(app_id=self._conversation.app_id) for message in self._queue_manager.listen(): event = message.event diff --git a/api/core/app/apps/agent_chat/app_generator.py b/api/core/app/apps/agent_chat/app_generator.py index 431a8b88a15d5f..3e93fad59cf893 100644 --- a/api/core/app/apps/agent_chat/app_generator.py +++ b/api/core/app/apps/agent_chat/app_generator.py @@ -109,6 +109,11 @@ def generate(self, app_model: App, override_config_dict=override_model_config_dict ) + # get tracing instance + tracing_instance = OpsTraceService.get_ops_trace_instance( + app_id=app_model.id, + ) + # init application generate entity application_generate_entity = AgentChatAppGenerateEntity( task_id=str(uuid.uuid4()), @@ -122,7 +127,8 @@ def generate(self, app_model: App, stream=stream, invoke_from=invoke_from, extras=extras, - call_depth=0 + call_depth=0, + tracing_instance=tracing_instance ) # init generate records @@ -161,7 +167,6 @@ def generate(self, app_model: App, message=message, user=user, stream=stream, - tracing_instance=tracing_instance, ) return AgentChatAppGenerateResponseConverter.convert( @@ -199,7 +204,6 @@ def _generate_worker(self, flask_app: Flask, queue_manager=queue_manager, conversation=conversation, message=message, - tracing_instance=tracing_instance ) except GenerateTaskStoppedException: pass diff --git a/api/core/app/apps/agent_chat/app_runner.py b/api/core/app/apps/agent_chat/app_runner.py index a942522996ca00..1444a71257e720 100644 --- a/api/core/app/apps/agent_chat/app_runner.py +++ b/api/core/app/apps/agent_chat/app_runner.py @@ -1,5 +1,5 @@ import logging -from typing import Any, Optional, cast +from typing import cast from core.agent.cot_chat_agent_runner import CotChatAgentRunner from core.agent.cot_completion_agent_runner import CotCompletionAgentRunner @@ -34,7 +34,6 @@ def run( queue_manager: AppQueueManager, conversation: Conversation, message: Message, - tracing_instance: Optional[Any] = None ) -> None: """ Run assistant application @@ -225,7 +224,7 @@ def run( runner_cls = FunctionCallAgentRunner else: raise ValueError(f"Invalid agent strategy: {agent_entity.strategy}") - + runner = runner_cls( tenant_id=app_config.tenant_id, application_generate_entity=application_generate_entity, @@ -247,7 +246,6 @@ def run( message=message, query=query, inputs=inputs, - tracing_instance=tracing_instance, ) # handle invoke result diff --git a/api/core/app/apps/base_app_runner.py b/api/core/app/apps/base_app_runner.py index 1ccc9597cee3a9..b0e55bbf605a8d 100644 --- a/api/core/app/apps/base_app_runner.py +++ b/api/core/app/apps/base_app_runner.py @@ -364,6 +364,7 @@ def moderation_for_inputs( inputs=inputs, query=query if query else '', message_id=message_id, + tracing_instance=app_generate_entity.tracing_instance ) def check_hosting_moderation(self, application_generate_entity: EasyUIBasedAppGenerateEntity, diff --git a/api/core/app/apps/chat/app_generator.py b/api/core/app/apps/chat/app_generator.py index 2dde1331891cc7..093e54f3701623 100644 --- a/api/core/app/apps/chat/app_generator.py +++ b/api/core/app/apps/chat/app_generator.py @@ -103,6 +103,11 @@ def generate( override_config_dict=override_model_config_dict ) + # get tracing instance + tracing_instance = OpsTraceService.get_ops_trace_instance( + app_id=app_model.id, + ) + # init application generate entity application_generate_entity = ChatAppGenerateEntity( task_id=str(uuid.uuid4()), @@ -115,7 +120,8 @@ def generate( user_id=user.id, stream=stream, invoke_from=invoke_from, - extras=extras + extras=extras, + tracing_instance=tracing_instance, ) # init generate records @@ -124,11 +130,6 @@ def generate( message ) = self._init_generate_records(application_generate_entity, conversation) - # get tracing instance - tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_model.id, - ) - # init queue manager queue_manager = MessageBasedAppQueueManager( task_id=application_generate_entity.task_id, @@ -158,7 +159,6 @@ def generate( message=message, user=user, stream=stream, - tracing_instance=tracing_instance, ) return ChatAppGenerateResponseConverter.convert( diff --git a/api/core/app/apps/chat/app_runner.py b/api/core/app/apps/chat/app_runner.py index 0a029af86a1ce1..89a498eb3607f9 100644 --- a/api/core/app/apps/chat/app_runner.py +++ b/api/core/app/apps/chat/app_runner.py @@ -155,7 +155,7 @@ def run(self, application_generate_entity: ChatAppGenerateEntity, application_generate_entity.invoke_from ) - dataset_retrieval = DatasetRetrieval() + dataset_retrieval = DatasetRetrieval(application_generate_entity) context = dataset_retrieval.retrieve( app_id=app_record.id, user_id=application_generate_entity.user_id, diff --git a/api/core/app/apps/completion/app_generator.py b/api/core/app/apps/completion/app_generator.py index 186a11557af565..2bb6e5d04f9b65 100644 --- a/api/core/app/apps/completion/app_generator.py +++ b/api/core/app/apps/completion/app_generator.py @@ -95,6 +95,11 @@ def generate(self, app_model: App, override_config_dict=override_model_config_dict ) + # get tracing instance + tracing_instance = OpsTraceService.get_ops_trace_instance( + app_id=app_model.id, + ) + # init application generate entity application_generate_entity = CompletionAppGenerateEntity( task_id=str(uuid.uuid4()), @@ -106,7 +111,8 @@ def generate(self, app_model: App, user_id=user.id, stream=stream, invoke_from=invoke_from, - extras=extras + extras=extras, + tracing_instance=tracing_instance ) # init generate records diff --git a/api/core/app/apps/completion/app_runner.py b/api/core/app/apps/completion/app_runner.py index 2e701320148408..f0e5f9ae173c39 100644 --- a/api/core/app/apps/completion/app_runner.py +++ b/api/core/app/apps/completion/app_runner.py @@ -115,7 +115,7 @@ def run(self, application_generate_entity: CompletionAppGenerateEntity, if dataset_config and dataset_config.retrieve_config.query_variable: query = inputs.get(dataset_config.retrieve_config.query_variable, "") - dataset_retrieval = DatasetRetrieval() + dataset_retrieval = DatasetRetrieval(application_generate_entity) context = dataset_retrieval.retrieve( app_id=app_record.id, user_id=application_generate_entity.user_id, diff --git a/api/core/app/apps/message_based_app_generator.py b/api/core/app/apps/message_based_app_generator.py index 11763d1c6cde53..fbfd2353011bca 100644 --- a/api/core/app/apps/message_based_app_generator.py +++ b/api/core/app/apps/message_based_app_generator.py @@ -1,7 +1,7 @@ import json import logging from collections.abc import Generator -from typing import Any, Optional, Union +from typing import Optional, Union from sqlalchemy import and_ @@ -47,7 +47,6 @@ def _handle_response( message: Message, user: Union[Account, EndUser], stream: bool = False, - tracing_instance: Optional[Any] = None ) -> Union[ ChatbotAppBlockingResponse, CompletionAppBlockingResponse, @@ -74,6 +73,7 @@ def _handle_response( ) try: + tracing_instance = application_generate_entity.tracing_instance return generate_task_pipeline.process(tracing_instance) except ValueError as e: if e.args[0] == "I/O operation on closed file.": # ignore this error diff --git a/api/core/app/apps/workflow/app_generator.py b/api/core/app/apps/workflow/app_generator.py index 829ccc8cab2b5a..d2d4947e783e7d 100644 --- a/api/core/app/apps/workflow/app_generator.py +++ b/api/core/app/apps/workflow/app_generator.py @@ -24,6 +24,7 @@ from models.account import Account from models.model import App, EndUser from models.workflow import Workflow +from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) @@ -72,6 +73,11 @@ def generate( workflow=workflow ) + # get tracing instance + tracing_instance = OpsTraceService.get_ops_trace_instance( + app_id=app_model.id, + ) + # init application generate entity application_generate_entity = WorkflowAppGenerateEntity( task_id=str(uuid.uuid4()), @@ -81,7 +87,8 @@ def generate( user_id=user.id, stream=stream, invoke_from=invoke_from, - call_depth=call_depth + call_depth=call_depth, + tracing_instance=tracing_instance ) return self._generate( diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index 982fab01a0bdc3..49a3cbbbad6f6d 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -48,7 +48,6 @@ WorkflowNodeExecution, WorkflowRun, ) -from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) @@ -109,7 +108,7 @@ def process( db.session.refresh(self._user) db.session.close() - generator = self._process_stream_response(app_id, workflow) + generator = self._process_stream_response(self._application_generate_entity.tracing_instance) if self._stream: return self._to_stream_response(generator) else: @@ -165,14 +164,12 @@ def _to_stream_response(self, generator: Generator[StreamResponse, None, None]) def _process_stream_response( self, - app_id: Optional[str] = None, - workflow: Optional[Workflow] = None, + tracing_instance: Optional[Any] = None ) -> Generator[StreamResponse, None, None]: """ Process stream response. :return: """ - tracing_instance = OpsTraceService.get_ops_trace_instance(app_id=app_id) for message in self._queue_manager.listen(): event = message.event diff --git a/api/core/app/entities/app_invoke_entities.py b/api/core/app/entities/app_invoke_entities.py index f27a110870b111..77905e296c652e 100644 --- a/api/core/app/entities/app_invoke_entities.py +++ b/api/core/app/entities/app_invoke_entities.py @@ -89,6 +89,9 @@ class AppGenerateEntity(BaseModel): # extra parameters, like: auto_generate_conversation_name extras: dict[str, Any] = {} + # tracing instance + tracing_instance: Optional[Any] = None + class EasyUIBasedAppGenerateEntity(AppGenerateEntity): """ diff --git a/api/core/moderation/input_moderation.py b/api/core/moderation/input_moderation.py index c835c97eda0e34..a5965992071ae9 100644 --- a/api/core/moderation/input_moderation.py +++ b/api/core/moderation/input_moderation.py @@ -1,9 +1,10 @@ import logging +from typing import Any, Optional from core.app.app_config.entities import AppConfig from core.moderation.base import ModerationAction, ModerationException from core.moderation.factory import ModerationFactory -from services.ops_trace.ops_trace_service import OpsTraceService +from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName from services.ops_trace.utils import measure_time logger = logging.getLogger(__name__) @@ -11,12 +12,13 @@ class InputModeration: def check( - self, app_id: str, - tenant_id: str, - app_config: AppConfig, - inputs: dict, - query: str, - message_id: str, + self, app_id: str, + tenant_id: str, + app_config: AppConfig, + inputs: dict, + query: str, + message_id: str, + tracing_instance: Optional[Any] = None ) -> tuple[bool, dict, str]: """ Process sensitive_word_avoidance. @@ -26,6 +28,7 @@ def check( :param inputs: inputs :param query: query :param message_id: message id + :param tracing_instance: tracing instance :return: """ if not app_config.sensitive_word_avoidance: @@ -44,13 +47,6 @@ def check( with measure_time() as timer: moderation_result = moderation_factory.moderation_for_inputs(inputs, query) - from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName - - # get tracing instance - tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_id - ) - if tracing_instance: trace_manager = TraceQueueManager() trace_manager.add_trace_task( diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index c1fb14913e8cf1..ae1644e205019d 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -24,7 +24,6 @@ from extensions.ext_database import db from models.dataset import Dataset, DatasetQuery, DocumentSegment from models.dataset import Document as DatasetDocument -from services.ops_trace.ops_trace_service import OpsTraceService from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName from services.ops_trace.utils import measure_time @@ -41,6 +40,9 @@ class DatasetRetrieval: + def __init__(self, application_generate_entity=None): + self.application_generate_entity = application_generate_entity + def retrieve( self, app_id: str, user_id: str, tenant_id: str, model_config: ModelConfigWithCredentialsEntity, @@ -355,9 +357,8 @@ def _on_retrival_end( db.session.commit() # get tracing instance - tracing_instance = OpsTraceService.get_ops_trace_instance( - message_id=message_id - ) + tracing_instance = self.application_generate_entity.tracing_instance if self.application_generate_entity else None + if tracing_instance: trace_manager = TraceQueueManager() trace_manager.add_trace_task( diff --git a/api/core/tools/tool_engine.py b/api/core/tools/tool_engine.py index bf96461cc1447e..34dea1e82b510f 100644 --- a/api/core/tools/tool_engine.py +++ b/api/core/tools/tool_engine.py @@ -126,7 +126,6 @@ def workflow_invoke(tool: Tool, tool_parameters: dict, user_id: str, workflow_id: str, workflow_tool_callback: DifyWorkflowCallbackHandler, workflow_call_depth: int, - tracing_instance: Optional[BaseTraceInstance] = None ) -> list[ToolInvokeMessage]: """ Workflow invokes the tool with the given arguments. @@ -148,7 +147,6 @@ def workflow_invoke(tool: Tool, tool_parameters: dict, tool_name=tool.identity.name, tool_inputs=tool_parameters, tool_outputs=response, - tracing_instance=tracing_instance, ) return response diff --git a/api/core/workflow/nodes/tool/tool_node.py b/api/core/workflow/nodes/tool/tool_node.py index 45a2e812197f06..2a472fc8d2cbf4 100644 --- a/api/core/workflow/nodes/tool/tool_node.py +++ b/api/core/workflow/nodes/tool/tool_node.py @@ -13,9 +13,7 @@ from core.workflow.nodes.base_node import BaseNode from core.workflow.nodes.tool.entities import ToolNodeData from core.workflow.utils.variable_template_parser import VariableTemplateParser -from extensions.ext_database import db -from models.workflow import Workflow, WorkflowNodeExecutionStatus -from services.ops_trace.ops_trace_service import OpsTraceService +from models.workflow import WorkflowNodeExecutionStatus class ToolNode(BaseNode): @@ -56,11 +54,6 @@ def _run(self, variable_pool: VariablePool) -> NodeRunResult: # get parameters parameters = self._generate_parameters(variable_pool, node_data, tool_runtime) - # get tracing instance - workflow: Workflow = db.session.query(Workflow).filter(Workflow.id == self.workflow_id).first() - app_id = workflow.app_id - tracing_instance = OpsTraceService.get_ops_trace_instance(app_id=app_id) - try: messages = ToolEngine.workflow_invoke( tool=tool_runtime, @@ -69,7 +62,6 @@ def _run(self, variable_pool: VariablePool) -> NodeRunResult: workflow_id=self.workflow_id, workflow_tool_callback=DifyWorkflowCallbackHandler(), workflow_call_depth=self.workflow_call_depth, - tracing_instance=tracing_instance ) except Exception as e: return NodeRunResult( diff --git a/api/services/message_service.py b/api/services/message_service.py index b5a32c1b770bff..4922b775319cd9 100644 --- a/api/services/message_service.py +++ b/api/services/message_service.py @@ -275,6 +275,7 @@ def get_suggested_questions_after_answer(cls, app_model: App, user: Optional[Uni tracing_instance = OpsTraceService.get_ops_trace_instance( message_id=message_id ) + if tracing_instance: trace_manager = TraceQueueManager() trace_manager.add_trace_task( diff --git a/api/services/ops_trace/langfuse_trace.py b/api/services/ops_trace/langfuse_trace.py index b9e5642cd09dee..740e3829b52a11 100644 --- a/api/services/ops_trace/langfuse_trace.py +++ b/api/services/ops_trace/langfuse_trace.py @@ -337,7 +337,7 @@ def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): input=query, output=workflow_run_outputs, metadata=metadata, - session_id=conversion_id, + session_id=conversion_id if conversion_id else workflow_run_id, tags=["workflow"], ) @@ -347,7 +347,7 @@ def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): workflow_nodes_executions = ( db.session.query(WorkflowNodeExecution) .filter(WorkflowNodeExecution.workflow_run_id == workflow_run_id) - .order_by(WorkflowNodeExecution.created_at) + .order_by(WorkflowNodeExecution.index.desc()) .all() ) @@ -363,7 +363,9 @@ def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): json.loads(node_execution.outputs) if node_execution.outputs else {} ) created_at = node_execution.created_at if node_execution.created_at else datetime.now() - finished_at = node_execution.finished_at if node_execution.finished_at else datetime.now() + elapsed_time = node_execution.elapsed_time + finished_at = created_at + timedelta(seconds=elapsed_time) + metadata = json.loads(node_execution.execution_metadata) if node_execution.execution_metadata else {} metadata.update( { diff --git a/api/services/ops_trace/langsmith_trace.py b/api/services/ops_trace/langsmith_trace.py index e9453138ae00a3..58da34666de78f 100644 --- a/api/services/ops_trace/langsmith_trace.py +++ b/api/services/ops_trace/langsmith_trace.py @@ -249,7 +249,7 @@ def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): workflow_nodes_executions = ( db.session.query(WorkflowNodeExecution) .filter(WorkflowNodeExecution.workflow_run_id == workflow_run_id) - .order_by(WorkflowNodeExecution.created_at) + .order_by(WorkflowNodeExecution.index.desc()) .all() ) @@ -265,7 +265,9 @@ def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): json.loads(node_execution.outputs) if node_execution.outputs else {} ) created_at = node_execution.created_at if node_execution.created_at else datetime.now() - finished_at = node_execution.finished_at if node_execution.finished_at else datetime.now() + elapsed_time = node_execution.elapsed_time + finished_at = created_at + timedelta(seconds=elapsed_time) + execution_metadata = ( json.loads(node_execution.execution_metadata) if node_execution.execution_metadata diff --git a/api/services/ops_trace/ops_trace_service.py b/api/services/ops_trace/ops_trace_service.py index b402f0cc65c04e..1f98a3186fa8d7 100644 --- a/api/services/ops_trace/ops_trace_service.py +++ b/api/services/ops_trace/ops_trace_service.py @@ -69,7 +69,8 @@ def create_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c :param tracing_config: tracing config :return: """ - if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, TracingProviderEnum.LANGSMITH.value]: + if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, + TracingProviderEnum.LANGSMITH.value] and tracing_provider != "": raise {"error": f"Invalid tracing provider: {tracing_provider}"} # api check From 5ae066a13e4f9db25dae1d2f72eab26feb5c6108 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Fri, 21 Jun 2024 14:36:05 +0800 Subject: [PATCH 13/65] feat: update down_revision --- .../04c602f5dc9b_update_appmodelconfig_and_add_table_.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/migrations/versions/04c602f5dc9b_update_appmodelconfig_and_add_table_.py b/api/migrations/versions/04c602f5dc9b_update_appmodelconfig_and_add_table_.py index 316a44edce0c55..a322b9f50290ce 100644 --- a/api/migrations/versions/04c602f5dc9b_update_appmodelconfig_and_add_table_.py +++ b/api/migrations/versions/04c602f5dc9b_update_appmodelconfig_and_add_table_.py @@ -12,7 +12,7 @@ # revision identifiers, used by Alembic. revision = '04c602f5dc9b' -down_revision = '7b45942e39bb' +down_revision = '4ff534e1eb11' branch_labels = None depends_on = None From 04af80cb4ca0dc15cc4ccb1550f6497cf52518f1 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Fri, 21 Jun 2024 17:21:03 +0800 Subject: [PATCH 14/65] feat: add trace_config_check_error --- api/controllers/console/app/error.py | 6 ++++++ api/controllers/console/app/ops_trace.py | 6 ++++-- api/services/ops_trace/ops_trace_service.py | 6 +++--- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/api/controllers/console/app/error.py b/api/controllers/console/app/error.py index cd7919b5e426bd..f6feed12217a85 100644 --- a/api/controllers/console/app/error.py +++ b/api/controllers/console/app/error.py @@ -109,3 +109,9 @@ class TracingConfigIsExist(BaseHTTPException): error_code = 'trace_config_is_exist' description = "Trace config is exist." code = 400 + + +class TracingConfigCheckError(BaseHTTPException): + error_code = 'trace_config_check_error' + description = "Invalid Credentials." + code = 400 diff --git a/api/controllers/console/app/ops_trace.py b/api/controllers/console/app/ops_trace.py index c3e9ebac1c2aa4..1e96b6f98245e3 100644 --- a/api/controllers/console/app/ops_trace.py +++ b/api/controllers/console/app/ops_trace.py @@ -1,7 +1,7 @@ from flask_restful import Resource, reqparse from controllers.console import api -from controllers.console.app.error import TracingConfigIsExist, TracingConfigNotExist +from controllers.console.app.error import TracingConfigCheckError, TracingConfigIsExist, TracingConfigNotExist from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required from libs.login import login_required @@ -49,7 +49,9 @@ def post(self, app_id): ) if not result: raise TracingConfigIsExist() - return {"result": "success"} + if result.get('error'): + raise TracingConfigCheckError() + return result except Exception as e: raise e diff --git a/api/services/ops_trace/ops_trace_service.py b/api/services/ops_trace/ops_trace_service.py index 1f98a3186fa8d7..ec2bc8d50d25b8 100644 --- a/api/services/ops_trace/ops_trace_service.py +++ b/api/services/ops_trace/ops_trace_service.py @@ -71,11 +71,11 @@ def create_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c """ if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, TracingProviderEnum.LANGSMITH.value] and tracing_provider != "": - raise {"error": f"Invalid tracing provider: {tracing_provider}"} + return {"error": f"Invalid tracing provider: {tracing_provider}"} # api check if not cls.check_trace_config_is_effective(tracing_config, tracing_provider): - return {"error": "Tracing config is not effective"} + return {"error": "Invalid Credentials"} # check if trace config already exists trace_config_data: TraceAppConfig = db.session.query(TraceAppConfig).filter( @@ -96,7 +96,7 @@ def create_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c db.session.add(trace_config_data) db.session.commit() - return trace_config_data.to_dict() + return {"result": "success"} @classmethod def update_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_config: dict): From f604b7c373125415734d870cec3985d9fa762feb Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Fri, 21 Jun 2024 18:01:15 +0800 Subject: [PATCH 15/65] feat: update poetry.lock --- api/poetry.lock | 50 ++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 45 insertions(+), 5 deletions(-) diff --git a/api/poetry.lock b/api/poetry.lock index cadac30d9ba862..f96aa5db0078c1 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -3902,6 +3902,46 @@ files = [ [package.dependencies] six = "*" +[[package]] +name = "langfuse" +version = "2.36.2" +description = "A client library for accessing langfuse" +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langfuse-2.36.2-py3-none-any.whl", hash = "sha256:66728feddcec0974e4eb31612151a282fcce2e333b5a61474182b5e67e78e090"}, + {file = "langfuse-2.36.2.tar.gz", hash = "sha256:3e784505d408aa2c9c2da79487b64d185d8f7fa8a855e5303bcce678454c715b"}, +] + +[package.dependencies] +backoff = ">=1.10.0" +httpx = ">=0.15.4,<1.0" +idna = ">=3.7,<4.0" +packaging = ">=23.2,<24.0" +pydantic = ">=1.10.7,<3.0" +wrapt = ">=1.14,<2.0" + +[package.extras] +langchain = ["langchain (>=0.0.309)"] +llama-index = ["llama-index (>=0.10.12,<2.0.0)"] +openai = ["openai (>=0.27.8)"] + +[[package]] +name = "langsmith" +version = "0.1.81" +description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." +optional = false +python-versions = "<4.0,>=3.8.1" +files = [ + {file = "langsmith-0.1.81-py3-none-any.whl", hash = "sha256:3251d823225eef23ee541980b9d9e506367eabbb7f985a086b5d09e8f78ba7e9"}, + {file = "langsmith-0.1.81.tar.gz", hash = "sha256:585ef3a2251380bd2843a664c9a28da4a7d28432e3ee8bcebf291ffb8e1f0af0"}, +] + +[package.dependencies] +orjson = ">=3.9.14,<4.0.0" +pydantic = ">=1,<3" +requests = ">=2,<3" + [[package]] name = "llvmlite" version = "0.42.0" @@ -5163,13 +5203,13 @@ files = [ [[package]] name = "packaging" -version = "24.1" +version = "23.2" description = "Core utilities for Python packages" optional = false -python-versions = ">=3.8" +python-versions = ">=3.7" files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, + {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, + {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, ] [[package]] @@ -8998,4 +9038,4 @@ testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "5e63749820d62d42f8f0d38104ea135f68361bde660131a93fe7ad08141b51b1" +content-hash = "a767085a82605b4db8ba860c355eca48a8793d4a012f84631cf822fa146c2d55" From a0a7c755d1de2a52ad197e42a04f25d422dffc7c Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Fri, 21 Jun 2024 15:08:27 +0800 Subject: [PATCH 16/65] feat: format llm workflow trace --- api/services/ops_trace/langfuse_trace.py | 5 ++++- api/services/ops_trace/langsmith_trace.py | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/api/services/ops_trace/langfuse_trace.py b/api/services/ops_trace/langfuse_trace.py index 740e3829b52a11..485bb028bb27bd 100644 --- a/api/services/ops_trace/langfuse_trace.py +++ b/api/services/ops_trace/langfuse_trace.py @@ -358,7 +358,10 @@ def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): node_name = node_execution.title node_type = node_execution.node_type status = node_execution.status - inputs = json.loads(node_execution.inputs) if node_execution.inputs else {} + if node_type == "llm": + inputs = json.loads(node_execution.process_data).get("prompts", {}) + else: + inputs = json.loads(node_execution.inputs) if node_execution.inputs else {} outputs = ( json.loads(node_execution.outputs) if node_execution.outputs else {} ) diff --git a/api/services/ops_trace/langsmith_trace.py b/api/services/ops_trace/langsmith_trace.py index 58da34666de78f..a85d729854a11e 100644 --- a/api/services/ops_trace/langsmith_trace.py +++ b/api/services/ops_trace/langsmith_trace.py @@ -260,7 +260,10 @@ def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): node_name = node_execution.title node_type = node_execution.node_type status = node_execution.status - inputs = json.loads(node_execution.inputs) if node_execution.inputs else {} + if node_type == "llm": + inputs = json.loads(node_execution.process_data).get("prompts", {}) + else: + inputs = json.loads(node_execution.inputs) if node_execution.inputs else {} outputs = ( json.loads(node_execution.outputs) if node_execution.outputs else {} ) From 660d4e5294caa094520270c26ef48b8a80de2a20 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Thu, 20 Jun 2024 14:31:37 +0800 Subject: [PATCH 17/65] feat: add ops trace encrypt config decrypt_config obfuscate_config --- api/controllers/console/app/app.py | 13 ++- api/controllers/console/app/ops_trace.py | 1 - api/services/ops_trace/__init__.py | 0 api/services/ops_trace/langfuse_trace.py | 30 ++++++- api/services/ops_trace/langsmith_trace.py | 25 +++++- api/services/ops_trace/model.py | 26 ++++++ api/services/ops_trace/ops_trace_service.py | 89 +++------------------ 7 files changed, 90 insertions(+), 94 deletions(-) create mode 100644 api/services/ops_trace/__init__.py create mode 100644 api/services/ops_trace/model.py diff --git a/api/controllers/console/app/app.py b/api/controllers/console/app/app.py index a9e8b5fb4a2d33..2f49222450490f 100644 --- a/api/controllers/console/app/app.py +++ b/api/controllers/console/app/app.py @@ -306,14 +306,11 @@ def post(self, app_id): parser.add_argument('tracing_provider', type=str, required=True, location='json') args = parser.parse_args() - try: - OpsTraceService.update_app_tracing_config( - app_id=app_id, - enabled=args['enabled'], - tracing_provider=args['tracing_provider'], - ) - except Exception as e: - raise e + OpsTraceService.update_app_tracing_config( + app_id=app_id, + enabled=args['enabled'], + tracing_provider=args['tracing_provider'], + ) return {"result": "success"} diff --git a/api/controllers/console/app/ops_trace.py b/api/controllers/console/app/ops_trace.py index 1e96b6f98245e3..2a0681a9e60c26 100644 --- a/api/controllers/console/app/ops_trace.py +++ b/api/controllers/console/app/ops_trace.py @@ -71,7 +71,6 @@ def patch(self, app_id): tracing_provider=args['tracing_provider'], tracing_config=args['tracing_config'] ) - print("==============", result) if not result: raise TracingConfigNotExist() return {"result": "success"} diff --git a/api/services/ops_trace/__init__.py b/api/services/ops_trace/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/api/services/ops_trace/langfuse_trace.py b/api/services/ops_trace/langfuse_trace.py index 485bb028bb27bd..9a565ec96406ff 100644 --- a/api/services/ops_trace/langfuse_trace.py +++ b/api/services/ops_trace/langfuse_trace.py @@ -1,4 +1,5 @@ import json +import logging import os from datetime import datetime, timedelta from enum import Enum @@ -8,14 +9,17 @@ from pydantic import BaseModel, Field, field_validator from pydantic_core.core_schema import ValidationInfo +from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token from core.moderation.base import ModerationInputsResult from extensions.ext_database import db from models.dataset import Document from models.model import Message, MessageAgentThought, MessageFile from models.workflow import WorkflowNodeExecution, WorkflowRun from services.ops_trace.base_trace_instance import BaseTraceInstance +from services.ops_trace.model import LangfuseConfig from services.ops_trace.utils import filter_none_values, replace_text_with_content +logger = logging.getLogger(__name__) def validate_input_output(v, field_name): """ @@ -696,7 +700,7 @@ def add_trace(self, langfuse_trace_data: Optional[LangfuseTrace] = None): ) try: self.langfuse_client.trace(**format_trace_data) - print("LangFuse Trace created successfully") + logger.debug("LangFuse Trace created successfully") except Exception as e: raise f"LangFuse Failed to create trace: {str(e)}" @@ -706,7 +710,7 @@ def add_span(self, langfuse_span_data: Optional[LangfuseSpan] = None): ) try: self.langfuse_client.span(**format_span_data) - print("LangFuse Span created successfully") + logger.debug("LangFuse Span created successfully") except Exception as e: raise f"LangFuse Failed to create span: {str(e)}" @@ -727,7 +731,7 @@ def add_generation( ) try: self.langfuse_client.generation(**format_generation_data) - print("LangFuse Generation created successfully") + logger.debug("LangFuse Generation created successfully") except Exception as e: raise f"LangFuse Failed to create generation: {str(e)}" @@ -746,5 +750,23 @@ def api_check(self): try: return self.langfuse_client.auth_check() except Exception as e: - print(f"LangFuse API check failed: {str(e)}") + logger.debug(f"LangFuse API check failed: {str(e)}") return False + + @classmethod + def obfuscate_config(cls, config: LangfuseConfig): + public_key = obfuscated_token(config.public_key) + secret_key = obfuscated_token(config.secret_key) + return LangfuseConfig(public_key=public_key, secret_key=secret_key, host=config.host) + + @classmethod + def encrypt_config(cls, tenant_id, config: LangfuseConfig): + decrypt_public_key = encrypt_token(tenant_id, config.public_key) + decrypt_secret_key = encrypt_token(tenant_id, config.secret_key) + return LangfuseConfig(public_key=decrypt_public_key, secret_key=decrypt_secret_key, host=config.host) + + @classmethod + def decrypt_config(cls, tenant_id, config: LangfuseConfig): + decrypt_public_key = decrypt_token(tenant_id, config.public_key) + decrypt_secret_key = decrypt_token(tenant_id, config.secret_key) + return LangfuseConfig(public_key=decrypt_public_key, secret_key=decrypt_secret_key, host=config.host) diff --git a/api/services/ops_trace/langsmith_trace.py b/api/services/ops_trace/langsmith_trace.py index a85d729854a11e..fa7d36aa85d498 100644 --- a/api/services/ops_trace/langsmith_trace.py +++ b/api/services/ops_trace/langsmith_trace.py @@ -1,4 +1,5 @@ import json +import logging import os from datetime import datetime, timedelta from enum import Enum @@ -8,14 +9,17 @@ from pydantic import BaseModel, Field, field_validator from pydantic_core.core_schema import ValidationInfo +from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token from core.moderation.base import ModerationInputsResult from extensions.ext_database import db from models.dataset import Document from models.model import Message, MessageAgentThought, MessageFile from models.workflow import WorkflowNodeExecution, WorkflowRun from services.ops_trace.base_trace_instance import BaseTraceInstance +from services.ops_trace.model import LangSmithConfig from services.ops_trace.utils import filter_none_values, replace_text_with_content +logger = logging.getLogger(__name__) class LangSmithRunType(str, Enum): tool = "tool" @@ -588,7 +592,7 @@ def add_run(self, run_data: LangSmithRunModel): data = filter_none_values(data) try: self.langsmith_client.create_run(**data) - print("LangSmith Run created successfully.") + logger.debug("LangSmith Run created successfully.") except Exception as e: raise f"LangSmith Failed to create run: {str(e)}" @@ -597,7 +601,7 @@ def update_run(self, update_run_data: LangSmithRunUpdateModel): data = filter_none_values(data) try: self.langsmith_client.update_run(**data) - print("LangSmith Run updated successfully.") + logger.debug("LangSmith Run updated successfully.") except Exception as e: raise f"LangSmith Failed to update run: {str(e)}" @@ -608,5 +612,20 @@ def api_check(self): self.langsmith_client.delete_project(project_name=random_project_name) return True except Exception as e: - print(f"LangSmith API check failed: {str(e)}") + logger.debug(f"LangSmith API check failed: {str(e)}") return False + + @classmethod + def obfuscate_config(cls, config: LangSmithConfig): + api_key = obfuscated_token(config.api_key) + return LangSmithConfig(api_key=api_key, project=config.project, endpoint=config.endpoint) + + @classmethod + def encrypt_config(cls, tenant_id, config: LangSmithConfig): + api_key = encrypt_token(tenant_id, config.api_key) + return LangSmithConfig(api_key=api_key, project=config.project, endpoint=config.endpoint) + + @classmethod + def decrypt_config(cls, tenant_id, config: LangSmithConfig): + api_key = decrypt_token(tenant_id, config.api_key) + return LangSmithConfig(api_key=api_key, project=config.project, endpoint=config.endpoint) diff --git a/api/services/ops_trace/model.py b/api/services/ops_trace/model.py new file mode 100644 index 00000000000000..51dca08137e773 --- /dev/null +++ b/api/services/ops_trace/model.py @@ -0,0 +1,26 @@ +from enum import Enum + +from pydantic import BaseModel + + +class TracingProviderEnum(Enum): + LANGFUSE = 'langfuse' + LANGSMITH = 'langsmith' + + +class LangfuseConfig(BaseModel): + """ + Model class for Langfuse tracing config. + """ + public_key: str + secret_key: str + host: str + + +class LangSmithConfig(BaseModel): + """ + Model class for Langsmith tracing config. + """ + api_key: str + project: str + endpoint: str diff --git a/api/services/ops_trace/ops_trace_service.py b/api/services/ops_trace/ops_trace_service.py index ec2bc8d50d25b8..69dea3771fc6e4 100644 --- a/api/services/ops_trace/ops_trace_service.py +++ b/api/services/ops_trace/ops_trace_service.py @@ -1,38 +1,12 @@ import json -from enum import Enum from typing import Union from uuid import UUID -from pydantic import BaseModel - -from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token from extensions.ext_database import db from models.model import App, AppModelConfig, Conversation, Message, TraceAppConfig from services.ops_trace.langfuse_trace import LangFuseDataTrace from services.ops_trace.langsmith_trace import LangSmithDataTrace - - -class TracingProviderEnum(Enum): - LANGFUSE = 'langfuse' - LANGSMITH = 'langsmith' - - -class LangfuseConfig(BaseModel): - """ - Model class for Langfuse tracing config. - """ - public_key: str - secret_key: str - host: str - - -class LangSmithConfig(BaseModel): - """ - Model class for Langsmith tracing config. - """ - api_key: str - project: str - endpoint: str +from services.ops_trace.model import LangfuseConfig, LangSmithConfig, TracingProviderEnum class OpsTraceService: @@ -162,25 +136,12 @@ def encrypt_tracing_config(cls, tenant_id: str, tracing_provider: str, tracing_c """ if tracing_provider == TracingProviderEnum.LANGFUSE.value: tracing_config = LangfuseConfig(**tracing_config) - encrypt_public_key = encrypt_token(tenant_id, tracing_config.public_key) - encrypt_secret_key = encrypt_token(tenant_id, tracing_config.secret_key) - tracing_config = LangfuseConfig( - public_key=encrypt_public_key, - secret_key=encrypt_secret_key, - host=tracing_config.host - ) + tracing_config = LangFuseDataTrace.encrypt_config(tenant_id, tracing_config) elif tracing_provider == TracingProviderEnum.LANGSMITH.value: tracing_config = LangSmithConfig(**tracing_config) - encrypt_api_key = encrypt_token(tenant_id, tracing_config.api_key) - tracing_config = LangSmithConfig( - api_key=encrypt_api_key, - project=tracing_config.project, - endpoint=tracing_config.endpoint - ) + tracing_config = LangSmithDataTrace.encrypt_config(tenant_id, tracing_config) - if isinstance(tracing_config, BaseModel): - return tracing_config.dict() - return tracing_config + return tracing_config.model_dump() @classmethod def decrypt_tracing_config(cls, tenant_id: str, tracing_provider: str, tracing_config: dict): @@ -193,25 +154,12 @@ def decrypt_tracing_config(cls, tenant_id: str, tracing_provider: str, tracing_c """ if tracing_provider == TracingProviderEnum.LANGFUSE.value: tracing_config = LangfuseConfig(**tracing_config) - decrypt_public_key = decrypt_token(tenant_id, tracing_config.public_key) - decrypt_secret_key = decrypt_token(tenant_id, tracing_config.secret_key) - tracing_config = LangfuseConfig( - public_key=decrypt_public_key, - secret_key=decrypt_secret_key, - host=tracing_config.host - ) + tracing_config = LangFuseDataTrace.decrypt_config(tenant_id, tracing_config) elif tracing_provider == TracingProviderEnum.LANGSMITH.value: tracing_config = LangSmithConfig(**tracing_config) - decrypt_api_key = decrypt_token(tenant_id, tracing_config.api_key) - tracing_config = LangSmithConfig( - api_key=decrypt_api_key, - project=tracing_config.project, - endpoint=tracing_config.endpoint - ) + tracing_config = LangSmithDataTrace.decrypt_config(tenant_id, tracing_config) - if isinstance(tracing_config, BaseModel): - return tracing_config.dict() - return tracing_config + return tracing_config.model_dump() @classmethod def obfuscated_decrypt_token(cls, tracing_provider: str, decrypt_tracing_config:dict): @@ -221,28 +169,14 @@ def obfuscated_decrypt_token(cls, tracing_provider: str, decrypt_tracing_config: :param decrypt_tracing_config: tracing config :return: """ + obfuscate_config = None if tracing_provider == TracingProviderEnum.LANGFUSE.value: decrypt_tracing_config = LangfuseConfig(**decrypt_tracing_config) - decrypt_public_key = decrypt_tracing_config.public_key - decrypt_secret_key = decrypt_tracing_config.secret_key - obfuscated_public_key = obfuscated_token(decrypt_public_key) - obfuscated_secret_key = obfuscated_token(decrypt_secret_key) - decrypt_tracing_config = LangfuseConfig( - public_key=obfuscated_public_key, - secret_key=obfuscated_secret_key, - host=decrypt_tracing_config.host - ) + obfuscate_config = LangFuseDataTrace.obfuscate_config(decrypt_tracing_config) elif tracing_provider == TracingProviderEnum.LANGSMITH.value: decrypt_tracing_config = LangSmithConfig(**decrypt_tracing_config) - decrypt_api_key = decrypt_tracing_config.api_key - obfuscated_api_key = obfuscated_token(decrypt_api_key) - decrypt_tracing_config = LangSmithConfig( - api_key=obfuscated_api_key, - project=decrypt_tracing_config.project, - endpoint=decrypt_tracing_config.endpoint - ) - - return decrypt_tracing_config.dict() + obfuscate_config = LangSmithDataTrace.obfuscate_config(decrypt_tracing_config) + return obfuscate_config.model_dump() @classmethod def get_decrypted_tracing_config(cls, app_id: str, tracing_provider: str): @@ -321,7 +255,6 @@ def get_ops_trace_instance( langsmith_api_key = decrypt_trace_config.get('api_key') langsmith_project = decrypt_trace_config.get('project') langsmith_endpoint = decrypt_trace_config.get('endpoint') - print(langsmith_api_key, langsmith_project, langsmith_endpoint) tracing_instance = LangSmithDataTrace( langsmith_api_key, langsmith_project, From 947016902ff4c7ed72d5ffc54759a7ae7939aba3 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Sun, 23 Jun 2024 15:36:41 +0800 Subject: [PATCH 18/65] feat: update the file structure --- api/core/agent/cot_agent_runner.py | 2 +- .../easy_ui_based_generate_task_pipeline.py | 2 +- .../task_pipeline/workflow_cycle_manage.py | 4 +- .../agent_tool_callback_handler.py | 2 +- api/core/llm_generator/llm_generator.py | 4 +- api/core/moderation/input_moderation.py | 4 +- api/core/ops/__init__.py | 0 .../ops}/base_trace_instance.py | 0 .../ops/entities/langfuse_trace_entity.py | 273 +++++++++++++++++ .../ops/entities/langsmith_trace_entity.py | 167 +++++++++++ .../ops_trace => core/ops}/langfuse_trace.py | 283 +----------------- .../ops_trace => core/ops}/langsmith_trace.py | 170 +---------- api/{services/ops_trace => core/ops}/model.py | 0 .../ops}/trace_queue_manager.py | 40 +-- api/{services/ops_trace => core/ops}/utils.py | 0 api/core/rag/retrieval/dataset_retrieval.py | 4 +- api/core/tools/tool_engine.py | 2 +- api/services/message_service.py | 4 +- api/services/ops_trace/ops_trace_service.py | 6 +- 19 files changed, 496 insertions(+), 471 deletions(-) create mode 100644 api/core/ops/__init__.py rename api/{services/ops_trace => core/ops}/base_trace_instance.py (100%) create mode 100644 api/core/ops/entities/langfuse_trace_entity.py create mode 100644 api/core/ops/entities/langsmith_trace_entity.py rename api/{services/ops_trace => core/ops}/langfuse_trace.py (64%) rename api/{services/ops_trace => core/ops}/langsmith_trace.py (72%) rename api/{services/ops_trace => core/ops}/model.py (100%) rename api/{services/ops_trace => core/ops}/trace_queue_manager.py (76%) rename api/{services/ops_trace => core/ops}/utils.py (100%) diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py index 1f2db3fb7b4168..d4b3a7f06a0989 100644 --- a/api/core/agent/cot_agent_runner.py +++ b/api/core/agent/cot_agent_runner.py @@ -15,12 +15,12 @@ ToolPromptMessage, UserPromptMessage, ) +from core.ops.base_trace_instance import BaseTraceInstance from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform from core.tools.entities.tool_entities import ToolInvokeMeta from core.tools.tool.tool import Tool from core.tools.tool_engine import ToolEngine from models.model import Message -from services.ops_trace.base_trace_instance import BaseTraceInstance class CotAgentRunner(BaseAgentRunner, ABC): diff --git a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py index da0731df3eff1f..9b9a4fc56b06fa 100644 --- a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py +++ b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py @@ -44,13 +44,13 @@ ) from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.model_runtime.utils.encoders import jsonable_encoder +from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName from core.prompt.utils.prompt_message_util import PromptMessageUtil from core.prompt.utils.prompt_template_parser import PromptTemplateParser from events.message_event import message_was_created from extensions.ext_database import db from models.account import Account from models.model import AppMode, Conversation, EndUser, Message, MessageAgentThought -from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName logger = logging.getLogger(__name__) diff --git a/api/core/app/task_pipeline/workflow_cycle_manage.py b/api/core/app/task_pipeline/workflow_cycle_manage.py index c76c1c5e418e87..ac589a01b35291 100644 --- a/api/core/app/task_pipeline/workflow_cycle_manage.py +++ b/api/core/app/task_pipeline/workflow_cycle_manage.py @@ -22,6 +22,8 @@ from core.app.task_pipeline.workflow_iteration_cycle_manage import WorkflowIterationCycleManage from core.file.file_obj import FileVar from core.model_runtime.utils.encoders import jsonable_encoder +from core.ops.base_trace_instance import BaseTraceInstance +from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName from core.tools.tool_manager import ToolManager from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeType from core.workflow.nodes.tool.entities import ToolNodeData @@ -39,8 +41,6 @@ WorkflowRunStatus, WorkflowRunTriggeredFrom, ) -from services.ops_trace.base_trace_instance import BaseTraceInstance -from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName class WorkflowCycleManage(WorkflowIterationCycleManage): diff --git a/api/core/callback_handler/agent_tool_callback_handler.py b/api/core/callback_handler/agent_tool_callback_handler.py index d70161f64c1ebf..28a5fb8ec73e49 100644 --- a/api/core/callback_handler/agent_tool_callback_handler.py +++ b/api/core/callback_handler/agent_tool_callback_handler.py @@ -3,7 +3,7 @@ from pydantic import BaseModel -from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName +from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName _TEXT_COLOR_MAPPING = { "blue": "36;1", diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 808f2a941f143c..a37d47221a4797 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -10,10 +10,10 @@ from core.model_runtime.entities.message_entities import SystemPromptMessage, UserPromptMessage from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError +from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName +from core.ops.utils import measure_time from core.prompt.utils.prompt_template_parser import PromptTemplateParser from services.ops_trace.ops_trace_service import OpsTraceService -from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName -from services.ops_trace.utils import measure_time class LLMGenerator: diff --git a/api/core/moderation/input_moderation.py b/api/core/moderation/input_moderation.py index a5965992071ae9..7689e6b18b9e96 100644 --- a/api/core/moderation/input_moderation.py +++ b/api/core/moderation/input_moderation.py @@ -4,8 +4,8 @@ from core.app.app_config.entities import AppConfig from core.moderation.base import ModerationAction, ModerationException from core.moderation.factory import ModerationFactory -from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName -from services.ops_trace.utils import measure_time +from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName +from core.ops.utils import measure_time logger = logging.getLogger(__name__) diff --git a/api/core/ops/__init__.py b/api/core/ops/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/api/services/ops_trace/base_trace_instance.py b/api/core/ops/base_trace_instance.py similarity index 100% rename from api/services/ops_trace/base_trace_instance.py rename to api/core/ops/base_trace_instance.py diff --git a/api/core/ops/entities/langfuse_trace_entity.py b/api/core/ops/entities/langfuse_trace_entity.py new file mode 100644 index 00000000000000..ebcf692a5ca08c --- /dev/null +++ b/api/core/ops/entities/langfuse_trace_entity.py @@ -0,0 +1,273 @@ +from datetime import datetime +from enum import Enum +from typing import Any, Optional, Union + +from pydantic import BaseModel, Field, field_validator +from pydantic_core.core_schema import ValidationInfo + +from core.ops.utils import replace_text_with_content + + +def validate_input_output(v, field_name): + """ + Validate input output + :param v: + :param field_name: + :return: + """ + if v == {} or v is None: + return v + if isinstance(v, str): + return [ + { + "role": "assistant" if field_name == "output" else "user", + "content": v, + } + ] + elif isinstance(v, list): + if len(v) > 0 and isinstance(v[0], dict): + v = replace_text_with_content(data=v) + return v + else: + return [ + { + "role": "assistant" if field_name == "output" else "user", + "content": str(v), + } + ] + + return v + + +class LevelEnum(str, Enum): + DEBUG = "DEBUG" + WARNING = "WARNING" + ERROR = "ERROR" + DEFAULT = "DEFAULT" + + +class LangfuseTrace(BaseModel): + """ + Langfuse trace model + """ + id: Optional[str] = Field( + default=None, + description="The id of the trace can be set, defaults to a random id. Used to link traces to external systems " + "or when creating a distributed trace. Traces are upserted on id.", + ) + name: Optional[str] = Field( + default=None, + description="Identifier of the trace. Useful for sorting/filtering in the UI.", + ) + input: Optional[Union[str, dict[str, Any], list, None]] = Field( + default=None, description="The input of the trace. Can be any JSON object." + ) + output: Optional[Union[str, dict[str, Any], list, None]] = Field( + default=None, description="The output of the trace. Can be any JSON object." + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, + description="Additional metadata of the trace. Can be any JSON object. Metadata is merged when being updated " + "via the API.", + ) + user_id: Optional[str] = Field( + default=None, + description="The id of the user that triggered the execution. Used to provide user-level analytics.", + ) + session_id: Optional[str] = Field( + default=None, + description="Used to group multiple traces into a session in Langfuse. Use your own session/thread identifier.", + ) + version: Optional[str] = Field( + default=None, + description="The version of the trace type. Used to understand how changes to the trace type affect metrics. " + "Useful in debugging.", + ) + release: Optional[str] = Field( + default=None, + description="The release identifier of the current deployment. Used to understand how changes of different " + "deployments affect metrics. Useful in debugging.", + ) + tags: Optional[list[str]] = Field( + default=None, + description="Tags are used to categorize or label traces. Traces can be filtered by tags in the UI and GET " + "API. Tags can also be changed in the UI. Tags are merged and never deleted via the API.", + ) + public: Optional[bool] = Field( + default=None, + description="You can make a trace public to share it via a public link. This allows others to view the trace " + "without needing to log in or be members of your Langfuse project.", + ) + + @field_validator("input", "output") + def ensure_dict(cls, v, info: ValidationInfo): + field_name = info.field_name + return validate_input_output(v, field_name) + + +class LangfuseSpan(BaseModel): + """ + Langfuse span model + """ + id: Optional[str] = Field( + default=None, + description="The id of the span can be set, otherwise a random id is generated. Spans are upserted on id.", + ) + session_id: Optional[str] = Field( + default=None, + description="Used to group multiple spans into a session in Langfuse. Use your own session/thread identifier.", + ) + trace_id: Optional[str] = Field( + default=None, + description="The id of the trace the span belongs to. Used to link spans to traces.", + ) + user_id: Optional[str] = Field( + default=None, + description="The id of the user that triggered the execution. Used to provide user-level analytics.", + ) + start_time: Optional[datetime | str] = Field( + default_factory=datetime.now, + description="The time at which the span started, defaults to the current time.", + ) + end_time: Optional[datetime | str] = Field( + default=None, + description="The time at which the span ended. Automatically set by span.end().", + ) + name: Optional[str] = Field( + default=None, + description="Identifier of the span. Useful for sorting/filtering in the UI.", + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, + description="Additional metadata of the span. Can be any JSON object. Metadata is merged when being updated " + "via the API.", + ) + level: Optional[str] = Field( + default=None, + description="The level of the span. Can be DEBUG, DEFAULT, WARNING or ERROR. Used for sorting/filtering of " + "traces with elevated error levels and for highlighting in the UI.", + ) + status_message: Optional[str] = Field( + default=None, + description="The status message of the span. Additional field for context of the event. E.g. the error " + "message of an error event.", + ) + input: Optional[Union[str, dict[str, Any], list, None]] = Field( + default=None, description="The input of the span. Can be any JSON object." + ) + output: Optional[Union[str, dict[str, Any], list, None]] = Field( + default=None, description="The output of the span. Can be any JSON object." + ) + version: Optional[str] = Field( + default=None, + description="The version of the span type. Used to understand how changes to the span type affect metrics. " + "Useful in debugging.", + ) + + @field_validator("input", "output") + def ensure_dict(cls, v, info: ValidationInfo): + field_name = info.field_name + return validate_input_output(v, field_name) + + +class UnitEnum(str, Enum): + CHARACTERS = "CHARACTERS" + TOKENS = "TOKENS" + SECONDS = "SECONDS" + MILLISECONDS = "MILLISECONDS" + IMAGES = "IMAGES" + + +class GenerationUsage(BaseModel): + promptTokens: Optional[int] = None + completionTokens: Optional[int] = None + totalTokens: Optional[int] = None + input: Optional[int] = None + output: Optional[int] = None + total: Optional[int] = None + unit: Optional[UnitEnum] = None + inputCost: Optional[float] = None + outputCost: Optional[float] = None + totalCost: Optional[float] = None + + @field_validator("input", "output") + def ensure_dict(cls, v, info: ValidationInfo): + field_name = info.field_name + return validate_input_output(v, field_name) + + +class LangfuseGeneration(BaseModel): + id: Optional[str] = Field( + default=None, + description="The id of the generation can be set, defaults to random id.", + ) + trace_id: Optional[str] = Field( + default=None, + description="The id of the trace the generation belongs to. Used to link generations to traces.", + ) + parent_observation_id: Optional[str] = Field( + default=None, + description="The id of the observation the generation belongs to. Used to link generations to observations.", + ) + name: Optional[str] = Field( + default=None, + description="Identifier of the generation. Useful for sorting/filtering in the UI.", + ) + start_time: Optional[datetime | str] = Field( + default_factory=datetime.now, + description="The time at which the generation started, defaults to the current time.", + ) + completion_start_time: Optional[datetime | str] = Field( + default=None, + description="The time at which the completion started (streaming). Set it to get latency analytics broken " + "down into time until completion started and completion duration.", + ) + end_time: Optional[datetime | str] = Field( + default=None, + description="The time at which the generation ended. Automatically set by generation.end().", + ) + model: Optional[str] = Field( + default=None, description="The name of the model used for the generation." + ) + model_parameters: Optional[dict[str, Any]] = Field( + default=None, + description="The parameters of the model used for the generation; can be any key-value pairs.", + ) + input: Optional[Any] = Field( + default=None, + description="The prompt used for the generation. Can be any string or JSON object.", + ) + output: Optional[Any] = Field( + default=None, + description="The completion generated by the model. Can be any string or JSON object.", + ) + usage: Optional[GenerationUsage] = Field( + default=None, + description="The usage object supports the OpenAi structure with tokens and a more generic version with " + "detailed costs and units.", + ) + metadata: Optional[dict[str, Any]] = Field( + default=None, + description="Additional metadata of the generation. Can be any JSON object. Metadata is merged when being " + "updated via the API.", + ) + level: Optional[LevelEnum] = Field( + default=None, + description="The level of the generation. Can be DEBUG, DEFAULT, WARNING or ERROR. Used for sorting/filtering " + "of traces with elevated error levels and for highlighting in the UI.", + ) + status_message: Optional[str] = Field( + default=None, + description="The status message of the generation. Additional field for context of the event. E.g. the error " + "message of an error event.", + ) + version: Optional[str] = Field( + default=None, + description="The version of the generation type. Used to understand how changes to the span type affect " + "metrics. Useful in debugging.", + ) + + @field_validator("input", "output") + def ensure_dict(cls, v, info: ValidationInfo): + field_name = info.field_name + return validate_input_output(v, field_name) diff --git a/api/core/ops/entities/langsmith_trace_entity.py b/api/core/ops/entities/langsmith_trace_entity.py new file mode 100644 index 00000000000000..f3fc46d99a8692 --- /dev/null +++ b/api/core/ops/entities/langsmith_trace_entity.py @@ -0,0 +1,167 @@ +from datetime import datetime +from enum import Enum +from typing import Any, Optional, Union + +from pydantic import BaseModel, Field, field_validator +from pydantic_core.core_schema import ValidationInfo + +from core.ops.utils import replace_text_with_content + + +class LangSmithRunType(str, Enum): + tool = "tool" + chain = "chain" + llm = "llm" + retriever = "retriever" + embedding = "embedding" + prompt = "prompt" + parser = "parser" + + +class LangSmithTokenUsage(BaseModel): + input_tokens: Optional[int] = None + output_tokens: Optional[int] = None + total_tokens: Optional[int] = None + + +class LangSmithMultiModel(BaseModel): + file_list: Optional[list[str]] = Field(None, description="List of files") + + +class LangSmithRunModel(LangSmithTokenUsage, LangSmithMultiModel): + name: Optional[str] = Field(..., description="Name of the run") + inputs: Optional[Union[str, dict[str, Any], list, None]] = Field(None, description="Inputs of the run") + outputs: Optional[Union[str, dict[str, Any], list, None]] = Field(None, description="Outputs of the run") + run_type: LangSmithRunType = Field(..., description="Type of the run") + start_time: Optional[datetime | str] = Field(None, description="Start time of the run") + end_time: Optional[datetime | str] = Field(None, description="End time of the run") + extra: Optional[dict[str, Any]] = Field( + None, description="Extra information of the run" + ) + error: Optional[str] = Field(None, description="Error message of the run") + serialized: Optional[dict[str, Any]] = Field( + None, description="Serialized data of the run" + ) + parent_run_id: Optional[str] = Field(None, description="Parent run ID") + events: Optional[list[dict[str, Any]]] = Field( + None, description="Events associated with the run" + ) + tags: Optional[list[str]] = Field(None, description="Tags associated with the run") + trace_id: Optional[str] = Field( + None, description="Trace ID associated with the run" + ) + dotted_order: Optional[str] = Field(None, description="Dotted order of the run") + id: Optional[str] = Field(None, description="ID of the run") + session_id: Optional[str] = Field( + None, description="Session ID associated with the run" + ) + session_name: Optional[str] = Field( + None, description="Session name associated with the run" + ) + reference_example_id: Optional[str] = Field( + None, description="Reference example ID associated with the run" + ) + input_attachments: Optional[dict[str, Any]] = Field( + None, description="Input attachments of the run" + ) + output_attachments: Optional[dict[str, Any]] = Field( + None, description="Output attachments of the run" + ) + + @field_validator("inputs", "outputs") + def ensure_dict(cls, v, info: ValidationInfo): + field_name = info.field_name + values = info.data + if v == {} or v is None: + return v + usage_metadata = { + "input_tokens": values.get('input_tokens', 0), + "output_tokens": values.get('output_tokens', 0), + "total_tokens": values.get('total_tokens', 0), + } + file_list = values.get("file_list", []) + if isinstance(v, str): + if field_name == "inputs": + return { + "messages": { + "role": "user", + "content": v, + "usage_metadata": usage_metadata, + "file_list": file_list, + }, + } + elif field_name == "outputs": + return { + "choices": { + "role": "ai", + "content": v, + "usage_metadata": usage_metadata, + "file_list": file_list, + }, + } + elif isinstance(v, list): + data = {} + if len(v) > 0 and isinstance(v[0], dict): + # rename text to content + v = replace_text_with_content(data=v) + if field_name == "inputs": + data = { + "messages": v, + } + elif field_name == "outputs": + data = { + "choices": { + "role": "ai", + "content": v, + "usage_metadata": usage_metadata, + "file_list": file_list, + }, + } + return data + else: + return { + "choices": { + "role": "ai" if field_name == "outputs" else "user", + "content": str(v), + "usage_metadata": usage_metadata, + "file_list": file_list, + }, + } + if isinstance(v, dict): + v["usage_metadata"] = usage_metadata + v["file_list"] = file_list + return v + return v + + @field_validator("start_time", "end_time") + def format_time(cls, v, info: ValidationInfo): + if not isinstance(v, datetime): + raise ValueError(f"{info.field_name} must be a datetime object") + else: + return v.strftime("%Y-%m-%dT%H:%M:%S.%fZ") + + +class LangSmithRunUpdateModel(BaseModel): + run_id: str = Field(..., description="ID of the run") + trace_id: Optional[str] = Field( + None, description="Trace ID associated with the run" + ) + dotted_order: Optional[str] = Field(None, description="Dotted order of the run") + parent_run_id: Optional[str] = Field(None, description="Parent run ID") + end_time: Optional[datetime | str] = Field(None, description="End time of the run") + error: Optional[str] = Field(None, description="Error message of the run") + inputs: Optional[dict[str, Any]] = Field(None, description="Inputs of the run") + outputs: Optional[dict[str, Any]] = Field(None, description="Outputs of the run") + events: Optional[list[dict[str, Any]]] = Field( + None, description="Events associated with the run" + ) + tags: Optional[list[str]] = Field(None, description="Tags associated with the run") + extra: Optional[dict[str, Any]] = Field( + None, description="Extra information of the run" + ) + input_attachments: Optional[dict[str, Any]] = Field( + None, description="Input attachments of the run" + ) + output_attachments: Optional[dict[str, Any]] = Field( + None, description="Output attachments of the run" + ) diff --git a/api/services/ops_trace/langfuse_trace.py b/api/core/ops/langfuse_trace.py similarity index 64% rename from api/services/ops_trace/langfuse_trace.py rename to api/core/ops/langfuse_trace.py index 9a565ec96406ff..3e1664edb2efbf 100644 --- a/api/services/ops_trace/langfuse_trace.py +++ b/api/core/ops/langfuse_trace.py @@ -2,289 +2,30 @@ import logging import os from datetime import datetime, timedelta -from enum import Enum -from typing import Any, Optional, Union +from typing import Any, Optional from langfuse import Langfuse -from pydantic import BaseModel, Field, field_validator -from pydantic_core.core_schema import ValidationInfo from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token from core.moderation.base import ModerationInputsResult +from core.ops.base_trace_instance import BaseTraceInstance +from core.ops.entities.langfuse_trace_entity import ( + GenerationUsage, + LangfuseGeneration, + LangfuseSpan, + LangfuseTrace, + LevelEnum, + UnitEnum, +) +from core.ops.model import LangfuseConfig +from core.ops.utils import filter_none_values from extensions.ext_database import db from models.dataset import Document from models.model import Message, MessageAgentThought, MessageFile from models.workflow import WorkflowNodeExecution, WorkflowRun -from services.ops_trace.base_trace_instance import BaseTraceInstance -from services.ops_trace.model import LangfuseConfig -from services.ops_trace.utils import filter_none_values, replace_text_with_content logger = logging.getLogger(__name__) -def validate_input_output(v, field_name): - """ - Validate input output - :param v: - :param field_name: - :return: - """ - if v == {} or v is None: - return v - if isinstance(v, str): - return [ - { - "role": "assistant" if field_name == "output" else "user", - "content": v, - } - ] - elif isinstance(v, list): - if len(v) > 0 and isinstance(v[0], dict): - v = replace_text_with_content(data=v) - return v - else: - return [ - { - "role": "assistant" if field_name == "output" else "user", - "content": str(v), - } - ] - - return v - - -class LevelEnum(str, Enum): - DEBUG = "DEBUG" - WARNING = "WARNING" - ERROR = "ERROR" - DEFAULT = "DEFAULT" - - -class LangfuseTrace(BaseModel): - """ - Langfuse trace model - """ - id: Optional[str] = Field( - default=None, - description="The id of the trace can be set, defaults to a random id. Used to link traces to external systems " - "or when creating a distributed trace. Traces are upserted on id.", - ) - name: Optional[str] = Field( - default=None, - description="Identifier of the trace. Useful for sorting/filtering in the UI.", - ) - input: Optional[Union[str, dict[str, Any], list, None]] = Field( - default=None, description="The input of the trace. Can be any JSON object." - ) - output: Optional[Union[str, dict[str, Any], list, None]] = Field( - default=None, description="The output of the trace. Can be any JSON object." - ) - metadata: Optional[dict[str, Any]] = Field( - default=None, - description="Additional metadata of the trace. Can be any JSON object. Metadata is merged when being updated " - "via the API.", - ) - user_id: Optional[str] = Field( - default=None, - description="The id of the user that triggered the execution. Used to provide user-level analytics.", - ) - session_id: Optional[str] = Field( - default=None, - description="Used to group multiple traces into a session in Langfuse. Use your own session/thread identifier.", - ) - version: Optional[str] = Field( - default=None, - description="The version of the trace type. Used to understand how changes to the trace type affect metrics. " - "Useful in debugging.", - ) - release: Optional[str] = Field( - default=None, - description="The release identifier of the current deployment. Used to understand how changes of different " - "deployments affect metrics. Useful in debugging.", - ) - tags: Optional[list[str]] = Field( - default=None, - description="Tags are used to categorize or label traces. Traces can be filtered by tags in the UI and GET " - "API. Tags can also be changed in the UI. Tags are merged and never deleted via the API.", - ) - public: Optional[bool] = Field( - default=None, - description="You can make a trace public to share it via a public link. This allows others to view the trace " - "without needing to log in or be members of your Langfuse project.", - ) - - @field_validator("input", "output") - def ensure_dict(cls, v, info: ValidationInfo): - field_name = info.field_name - return validate_input_output(v, field_name) - - -class LangfuseSpan(BaseModel): - """ - Langfuse span model - """ - id: Optional[str] = Field( - default=None, - description="The id of the span can be set, otherwise a random id is generated. Spans are upserted on id.", - ) - session_id: Optional[str] = Field( - default=None, - description="Used to group multiple spans into a session in Langfuse. Use your own session/thread identifier.", - ) - trace_id: Optional[str] = Field( - default=None, - description="The id of the trace the span belongs to. Used to link spans to traces.", - ) - user_id: Optional[str] = Field( - default=None, - description="The id of the user that triggered the execution. Used to provide user-level analytics.", - ) - start_time: Optional[datetime | str] = Field( - default_factory=datetime.now, - description="The time at which the span started, defaults to the current time.", - ) - end_time: Optional[datetime | str] = Field( - default=None, - description="The time at which the span ended. Automatically set by span.end().", - ) - name: Optional[str] = Field( - default=None, - description="Identifier of the span. Useful for sorting/filtering in the UI.", - ) - metadata: Optional[dict[str, Any]] = Field( - default=None, - description="Additional metadata of the span. Can be any JSON object. Metadata is merged when being updated " - "via the API.", - ) - level: Optional[str] = Field( - default=None, - description="The level of the span. Can be DEBUG, DEFAULT, WARNING or ERROR. Used for sorting/filtering of " - "traces with elevated error levels and for highlighting in the UI.", - ) - status_message: Optional[str] = Field( - default=None, - description="The status message of the span. Additional field for context of the event. E.g. the error " - "message of an error event.", - ) - input: Optional[Union[str, dict[str, Any], list, None]] = Field( - default=None, description="The input of the span. Can be any JSON object." - ) - output: Optional[Union[str, dict[str, Any], list, None]] = Field( - default=None, description="The output of the span. Can be any JSON object." - ) - version: Optional[str] = Field( - default=None, - description="The version of the span type. Used to understand how changes to the span type affect metrics. " - "Useful in debugging.", - ) - - @field_validator("input", "output") - def ensure_dict(cls, v, info: ValidationInfo): - field_name = info.field_name - return validate_input_output(v, field_name) - - -class UnitEnum(str, Enum): - CHARACTERS = "CHARACTERS" - TOKENS = "TOKENS" - SECONDS = "SECONDS" - MILLISECONDS = "MILLISECONDS" - IMAGES = "IMAGES" - - -class GenerationUsage(BaseModel): - promptTokens: Optional[int] = None - completionTokens: Optional[int] = None - totalTokens: Optional[int] = None - input: Optional[int] = None - output: Optional[int] = None - total: Optional[int] = None - unit: Optional[UnitEnum] = None - inputCost: Optional[float] = None - outputCost: Optional[float] = None - totalCost: Optional[float] = None - - @field_validator("input", "output") - def ensure_dict(cls, v, info: ValidationInfo): - field_name = info.field_name - return validate_input_output(v, field_name) - - -class LangfuseGeneration(BaseModel): - id: Optional[str] = Field( - default=None, - description="The id of the generation can be set, defaults to random id.", - ) - trace_id: Optional[str] = Field( - default=None, - description="The id of the trace the generation belongs to. Used to link generations to traces.", - ) - parent_observation_id: Optional[str] = Field( - default=None, - description="The id of the observation the generation belongs to. Used to link generations to observations.", - ) - name: Optional[str] = Field( - default=None, - description="Identifier of the generation. Useful for sorting/filtering in the UI.", - ) - start_time: Optional[datetime | str] = Field( - default_factory=datetime.now, - description="The time at which the generation started, defaults to the current time.", - ) - completion_start_time: Optional[datetime | str] = Field( - default=None, - description="The time at which the completion started (streaming). Set it to get latency analytics broken " - "down into time until completion started and completion duration.", - ) - end_time: Optional[datetime | str] = Field( - default=None, - description="The time at which the generation ended. Automatically set by generation.end().", - ) - model: Optional[str] = Field( - default=None, description="The name of the model used for the generation." - ) - model_parameters: Optional[dict[str, Any]] = Field( - default=None, - description="The parameters of the model used for the generation; can be any key-value pairs.", - ) - input: Optional[Any] = Field( - default=None, - description="The prompt used for the generation. Can be any string or JSON object.", - ) - output: Optional[Any] = Field( - default=None, - description="The completion generated by the model. Can be any string or JSON object.", - ) - usage: Optional[GenerationUsage] = Field( - default=None, - description="The usage object supports the OpenAi structure with tokens and a more generic version with " - "detailed costs and units.", - ) - metadata: Optional[dict[str, Any]] = Field( - default=None, - description="Additional metadata of the generation. Can be any JSON object. Metadata is merged when being " - "updated via the API.", - ) - level: Optional[LevelEnum] = Field( - default=None, - description="The level of the generation. Can be DEBUG, DEFAULT, WARNING or ERROR. Used for sorting/filtering " - "of traces with elevated error levels and for highlighting in the UI.", - ) - status_message: Optional[str] = Field( - default=None, - description="The status message of the generation. Additional field for context of the event. E.g. the error " - "message of an error event.", - ) - version: Optional[str] = Field( - default=None, - description="The version of the generation type. Used to understand how changes to the span type affect " - "metrics. Useful in debugging.", - ) - - @field_validator("input", "output") - def ensure_dict(cls, v, info: ValidationInfo): - field_name = info.field_name - return validate_input_output(v, field_name) - class LangFuseDataTrace(BaseTraceInstance): def __init__( diff --git a/api/services/ops_trace/langsmith_trace.py b/api/core/ops/langsmith_trace.py similarity index 72% rename from api/services/ops_trace/langsmith_trace.py rename to api/core/ops/langsmith_trace.py index fa7d36aa85d498..d3e84411855f06 100644 --- a/api/services/ops_trace/langsmith_trace.py +++ b/api/core/ops/langsmith_trace.py @@ -2,183 +2,23 @@ import logging import os from datetime import datetime, timedelta -from enum import Enum -from typing import Any, Optional, Union +from typing import Any from langsmith import Client -from pydantic import BaseModel, Field, field_validator -from pydantic_core.core_schema import ValidationInfo from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token from core.moderation.base import ModerationInputsResult +from core.ops.base_trace_instance import BaseTraceInstance +from core.ops.entities.langsmith_trace_entity import LangSmithRunModel, LangSmithRunType, LangSmithRunUpdateModel +from core.ops.model import LangSmithConfig +from core.ops.utils import filter_none_values from extensions.ext_database import db from models.dataset import Document from models.model import Message, MessageAgentThought, MessageFile from models.workflow import WorkflowNodeExecution, WorkflowRun -from services.ops_trace.base_trace_instance import BaseTraceInstance -from services.ops_trace.model import LangSmithConfig -from services.ops_trace.utils import filter_none_values, replace_text_with_content logger = logging.getLogger(__name__) -class LangSmithRunType(str, Enum): - tool = "tool" - chain = "chain" - llm = "llm" - retriever = "retriever" - embedding = "embedding" - prompt = "prompt" - parser = "parser" - - -class LangSmithTokenUsage(BaseModel): - input_tokens: Optional[int] = None - output_tokens: Optional[int] = None - total_tokens: Optional[int] = None - - -class LangSmithMultiModel(BaseModel): - file_list: Optional[list[str]] = Field(None, description="List of files") - - -class LangSmithRunModel(LangSmithTokenUsage, LangSmithMultiModel): - name: Optional[str] = Field(..., description="Name of the run") - inputs: Optional[Union[str, dict[str, Any], list, None]] = Field(None, description="Inputs of the run") - outputs: Optional[Union[str, dict[str, Any], list, None]] = Field(None, description="Outputs of the run") - run_type: LangSmithRunType = Field(..., description="Type of the run") - start_time: Optional[datetime | str] = Field(None, description="Start time of the run") - end_time: Optional[datetime | str] = Field(None, description="End time of the run") - extra: Optional[dict[str, Any]] = Field( - None, description="Extra information of the run" - ) - error: Optional[str] = Field(None, description="Error message of the run") - serialized: Optional[dict[str, Any]] = Field( - None, description="Serialized data of the run" - ) - parent_run_id: Optional[str] = Field(None, description="Parent run ID") - events: Optional[list[dict[str, Any]]] = Field( - None, description="Events associated with the run" - ) - tags: Optional[list[str]] = Field(None, description="Tags associated with the run") - trace_id: Optional[str] = Field( - None, description="Trace ID associated with the run" - ) - dotted_order: Optional[str] = Field(None, description="Dotted order of the run") - id: Optional[str] = Field(None, description="ID of the run") - session_id: Optional[str] = Field( - None, description="Session ID associated with the run" - ) - session_name: Optional[str] = Field( - None, description="Session name associated with the run" - ) - reference_example_id: Optional[str] = Field( - None, description="Reference example ID associated with the run" - ) - input_attachments: Optional[dict[str, Any]] = Field( - None, description="Input attachments of the run" - ) - output_attachments: Optional[dict[str, Any]] = Field( - None, description="Output attachments of the run" - ) - - @field_validator("inputs", "outputs") - def ensure_dict(cls, v, info: ValidationInfo): - field_name = info.field_name - values = info.data - if v == {} or v is None: - return v - usage_metadata = { - "input_tokens": values.get('input_tokens', 0), - "output_tokens": values.get('output_tokens', 0), - "total_tokens": values.get('total_tokens', 0), - } - file_list = values.get("file_list", []) - if isinstance(v, str): - if field_name == "inputs": - return { - "messages": { - "role": "user", - "content": v, - "usage_metadata": usage_metadata, - "file_list": file_list, - }, - } - elif field_name == "outputs": - return { - "choices": { - "role": "ai", - "content": v, - "usage_metadata": usage_metadata, - "file_list": file_list, - }, - } - elif isinstance(v, list): - data = {} - if len(v) > 0 and isinstance(v[0], dict): - # rename text to content - v = replace_text_with_content(data=v) - if field_name == "inputs": - data = { - "messages": v, - } - elif field_name == "outputs": - data = { - "choices": { - "role": "ai", - "content": v, - "usage_metadata": usage_metadata, - "file_list": file_list, - }, - } - return data - else: - return { - "choices": { - "role": "ai" if field_name == "outputs" else "user", - "content": str(v), - "usage_metadata": usage_metadata, - "file_list": file_list, - }, - } - if isinstance(v, dict): - v["usage_metadata"] = usage_metadata - v["file_list"] = file_list - return v - return v - - @field_validator("start_time", "end_time") - def format_time(cls, v, info: ValidationInfo): - if not isinstance(v, datetime): - raise ValueError(f"{info.field_name} must be a datetime object") - else: - return v.strftime("%Y-%m-%dT%H:%M:%S.%fZ") - - -class LangSmithRunUpdateModel(BaseModel): - run_id: str = Field(..., description="ID of the run") - trace_id: Optional[str] = Field( - None, description="Trace ID associated with the run" - ) - dotted_order: Optional[str] = Field(None, description="Dotted order of the run") - parent_run_id: Optional[str] = Field(None, description="Parent run ID") - end_time: Optional[datetime | str] = Field(None, description="End time of the run") - error: Optional[str] = Field(None, description="Error message of the run") - inputs: Optional[dict[str, Any]] = Field(None, description="Inputs of the run") - outputs: Optional[dict[str, Any]] = Field(None, description="Outputs of the run") - events: Optional[list[dict[str, Any]]] = Field( - None, description="Events associated with the run" - ) - tags: Optional[list[str]] = Field(None, description="Tags associated with the run") - extra: Optional[dict[str, Any]] = Field( - None, description="Extra information of the run" - ) - input_attachments: Optional[dict[str, Any]] = Field( - None, description="Input attachments of the run" - ) - output_attachments: Optional[dict[str, Any]] = Field( - None, description="Output attachments of the run" - ) - class LangSmithDataTrace(BaseTraceInstance): def __init__( diff --git a/api/services/ops_trace/model.py b/api/core/ops/model.py similarity index 100% rename from api/services/ops_trace/model.py rename to api/core/ops/model.py diff --git a/api/services/ops_trace/trace_queue_manager.py b/api/core/ops/trace_queue_manager.py similarity index 76% rename from api/services/ops_trace/trace_queue_manager.py rename to api/core/ops/trace_queue_manager.py index bb65d7e1a2cde0..34be64ee566003 100644 --- a/api/services/ops_trace/trace_queue_manager.py +++ b/api/core/ops/trace_queue_manager.py @@ -2,9 +2,9 @@ import threading from enum import Enum +from core.ops.utils import get_message_data from extensions.ext_database import db from models.model import Conversation, MessageFile -from services.ops_trace.utils import get_message_data class TraceTaskName(str, Enum): @@ -31,32 +31,32 @@ def execute(self): def preprocess(self): if self.trace_type == TraceTaskName.CONVERSATION_TRACE: - return TraceTaskName.CONVERSATION_TRACE, self.process_conversation_trace(**self.kwargs) + return TraceTaskName.CONVERSATION_TRACE, self.conversation_trace(**self.kwargs) if self.trace_type == TraceTaskName.WORKFLOW_TRACE: - return TraceTaskName.WORKFLOW_TRACE, self.process_workflow_trace(**self.kwargs) + return TraceTaskName.WORKFLOW_TRACE, self.workflow_trace(**self.kwargs) elif self.trace_type == TraceTaskName.MESSAGE_TRACE: - return TraceTaskName.MESSAGE_TRACE, self.process_message_trace(**self.kwargs) + return TraceTaskName.MESSAGE_TRACE, self.message_trace(**self.kwargs) elif self.trace_type == TraceTaskName.MODERATION_TRACE: - return TraceTaskName.MODERATION_TRACE, self.process_moderation_trace(**self.kwargs) + return TraceTaskName.MODERATION_TRACE, self.moderation_trace(**self.kwargs) elif self.trace_type == TraceTaskName.SUGGESTED_QUESTION_TRACE: - return TraceTaskName.SUGGESTED_QUESTION_TRACE, self.process_suggested_question_trace(**self.kwargs) + return TraceTaskName.SUGGESTED_QUESTION_TRACE, self.suggested_question_trace(**self.kwargs) elif self.trace_type == TraceTaskName.DATASET_RETRIEVAL_TRACE: - return TraceTaskName.DATASET_RETRIEVAL_TRACE, self.process_dataset_retrieval_trace(**self.kwargs) + return TraceTaskName.DATASET_RETRIEVAL_TRACE, self.dataset_retrieval_trace(**self.kwargs) elif self.trace_type == TraceTaskName.TOOL_TRACE: - return TraceTaskName.TOOL_TRACE, self.process_tool_trace(**self.kwargs) + return TraceTaskName.TOOL_TRACE, self.tool_trace(**self.kwargs) elif self.trace_type == TraceTaskName.GENERATE_NAME_TRACE: - return TraceTaskName.GENERATE_NAME_TRACE, self.process_generate_name_trace(**self.kwargs) + return TraceTaskName.GENERATE_NAME_TRACE, self.generate_name_trace(**self.kwargs) else: return '', {} # process methods for different trace types - def process_conversation_trace(self, **kwargs): + def conversation_trace(self, **kwargs): return kwargs - def process_workflow_trace(self, **kwargs): + def workflow_trace(self, **kwargs): return kwargs - def process_message_trace(self, **kwargs): + def message_trace(self, **kwargs): message_id = kwargs.get('message_id') message_data = get_message_data(message_id) if not message_data: @@ -69,15 +69,19 @@ def process_message_trace(self, **kwargs): kwargs['conversation_mode'] = conversation_mode return kwargs - def process_moderation_trace(self, **kwargs): - message_id = kwargs.get('message_id') + def moderation_trace( + self, + message_id=None, + **kwargs + ): + message_id = message_id message_data = get_message_data(message_id) if not message_data: return {} kwargs['message_data'] = message_data return kwargs - def process_suggested_question_trace(self, **kwargs): + def suggested_question_trace(self, **kwargs): message_id = kwargs.get('message_id') message_data = get_message_data(message_id) if not message_data: @@ -85,7 +89,7 @@ def process_suggested_question_trace(self, **kwargs): kwargs['message_data'] = message_data return kwargs - def process_dataset_retrieval_trace(self, **kwargs): + def dataset_retrieval_trace(self, **kwargs): message_id = kwargs.get('message_id') message_data = get_message_data(message_id) if not message_data: @@ -93,7 +97,7 @@ def process_dataset_retrieval_trace(self, **kwargs): kwargs['message_data'] = message_data return kwargs - def process_tool_trace(self, **kwargs): + def tool_trace(self, **kwargs): message_id = kwargs.get('message_id') message_data = get_message_data(message_id) if not message_data: @@ -103,7 +107,7 @@ def process_tool_trace(self, **kwargs): kwargs['message_file_data'] = message_file_data return kwargs - def process_generate_name_trace(self, **kwargs): + def generate_name_trace(self, **kwargs): return kwargs diff --git a/api/services/ops_trace/utils.py b/api/core/ops/utils.py similarity index 100% rename from api/services/ops_trace/utils.py rename to api/core/ops/utils.py diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index ae1644e205019d..8836f711dd0f0a 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -12,6 +12,8 @@ from core.model_runtime.entities.message_entities import PromptMessageTool from core.model_runtime.entities.model_entities import ModelFeature, ModelType from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel +from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName +from core.ops.utils import measure_time from core.rag.datasource.retrieval_service import RetrievalService from core.rag.models.document import Document from core.rag.rerank.rerank import RerankRunner @@ -24,8 +26,6 @@ from extensions.ext_database import db from models.dataset import Dataset, DatasetQuery, DocumentSegment from models.dataset import Document as DatasetDocument -from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName -from services.ops_trace.utils import measure_time default_retrieval_model = { 'search_method': RetrievalMethod.SEMANTIC_SEARCH, diff --git a/api/core/tools/tool_engine.py b/api/core/tools/tool_engine.py index 34dea1e82b510f..534f8855333486 100644 --- a/api/core/tools/tool_engine.py +++ b/api/core/tools/tool_engine.py @@ -9,6 +9,7 @@ from core.callback_handler.agent_tool_callback_handler import DifyAgentCallbackHandler from core.callback_handler.workflow_tool_callback_handler import DifyWorkflowCallbackHandler from core.file.file_obj import FileTransferMethod +from core.ops.base_trace_instance import BaseTraceInstance from core.tools.entities.tool_entities import ToolInvokeMessage, ToolInvokeMessageBinary, ToolInvokeMeta, ToolParameter from core.tools.errors import ( ToolEngineInvokeError, @@ -24,7 +25,6 @@ from core.tools.utils.message_transformer import ToolFileMessageTransformer from extensions.ext_database import db from models.model import Message, MessageFile -from services.ops_trace.base_trace_instance import BaseTraceInstance class ToolEngine: diff --git a/api/services/message_service.py b/api/services/message_service.py index 4922b775319cd9..aba334d5c25840 100644 --- a/api/services/message_service.py +++ b/api/services/message_service.py @@ -7,6 +7,8 @@ from core.memory.token_buffer_memory import TokenBufferMemory from core.model_manager import ModelManager from core.model_runtime.entities.model_entities import ModelType +from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName +from core.ops.utils import measure_time from extensions.ext_database import db from libs.infinite_scroll_pagination import InfiniteScrollPagination from models.account import Account @@ -20,8 +22,6 @@ SuggestedQuestionsAfterAnswerDisabledError, ) from services.ops_trace.ops_trace_service import OpsTraceService -from services.ops_trace.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName -from services.ops_trace.utils import measure_time from services.workflow_service import WorkflowService diff --git a/api/services/ops_trace/ops_trace_service.py b/api/services/ops_trace/ops_trace_service.py index 69dea3771fc6e4..7528fccb050f68 100644 --- a/api/services/ops_trace/ops_trace_service.py +++ b/api/services/ops_trace/ops_trace_service.py @@ -2,11 +2,11 @@ from typing import Union from uuid import UUID +from core.ops.langfuse_trace import LangFuseDataTrace +from core.ops.langsmith_trace import LangSmithDataTrace +from core.ops.model import LangfuseConfig, LangSmithConfig, TracingProviderEnum from extensions.ext_database import db from models.model import App, AppModelConfig, Conversation, Message, TraceAppConfig -from services.ops_trace.langfuse_trace import LangFuseDataTrace -from services.ops_trace.langsmith_trace import LangSmithDataTrace -from services.ops_trace.model import LangfuseConfig, LangSmithConfig, TracingProviderEnum class OpsTraceService: From d1c8d6949a55064e5577d6a91d4fe434475a8918 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Sun, 23 Jun 2024 15:50:51 +0800 Subject: [PATCH 19/65] fix: moderation trace message_id error --- api/core/ops/trace_queue_manager.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/api/core/ops/trace_queue_manager.py b/api/core/ops/trace_queue_manager.py index 34be64ee566003..c9ae70bedb6aa9 100644 --- a/api/core/ops/trace_queue_manager.py +++ b/api/core/ops/trace_queue_manager.py @@ -71,10 +71,9 @@ def message_trace(self, **kwargs): def moderation_trace( self, - message_id=None, **kwargs ): - message_id = message_id + message_id = kwargs.get('message_id') message_data = get_message_data(message_id) if not message_data: return {} From 8d2f08dec36d307e99b395253a4c2cad46ecdb66 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Sun, 23 Jun 2024 16:00:02 +0800 Subject: [PATCH 20/65] feat: adding comments for BaseTraceInstance --- api/core/ops/base_trace_instance.py | 32 +++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/api/core/ops/base_trace_instance.py b/api/core/ops/base_trace_instance.py index d785c09b75cd79..ecf5290df47d44 100644 --- a/api/core/ops/base_trace_instance.py +++ b/api/core/ops/base_trace_instance.py @@ -2,30 +2,62 @@ class BaseTraceInstance(ABC): + """ + Base trace instance for ops trace services + """ + @abstractmethod def __init__(self): + """ + Abstract initializer for the trace instance. + All subclasses must provide their own initialization logic. + """ ... @abstractmethod def message_trace(self, **kwargs): + """ + Abstract method to trace messaging activities. + Subclasses must implement specific tracing logic for messages. + """ return kwargs @abstractmethod def moderation_trace(self, **kwargs): + """ + Abstract method to trace moderation activities. + Subclasses must implement specific tracing logic for content moderation. + """ return kwargs @abstractmethod def suggested_question_trace(self, **kwargs): + """ + Abstract method to trace suggested questions in a conversation or system. + Subclasses must implement specific tracing logic for tracking suggested questions. + """ return kwargs @abstractmethod def dataset_retrieval_trace(self, **kwargs): + """ + Abstract method to trace data retrieval activities. + Subclasses must implement specific tracing logic for data retrieval operations. + """ return kwargs @abstractmethod def tool_trace(self, **kwargs): + """ + Abstract method to trace the usage of tools within the system. + Subclasses must implement specific tracing logic for tool interactions. + """ return kwargs @abstractmethod def generate_name_trace(self, **kwargs): + """ + Abstract method to trace the generation of names or identifiers within the system. + Subclasses must implement specific tracing logic for name generation activities. + """ return kwargs From 0d798acea10cccda423ecd86ccbe9467b34f84df Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 14:48:33 +0800 Subject: [PATCH 21/65] fix: tracing_provider null error --- api/services/ops_trace/ops_trace_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/services/ops_trace/ops_trace_service.py b/api/services/ops_trace/ops_trace_service.py index 7528fccb050f68..deebb0e1911f32 100644 --- a/api/services/ops_trace/ops_trace_service.py +++ b/api/services/ops_trace/ops_trace_service.py @@ -291,7 +291,7 @@ def update_app_tracing_config(cls, app_id: str, enabled: bool, tracing_provider: :return: """ # auth check - if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, TracingProviderEnum.LANGSMITH.value]: + if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, TracingProviderEnum.LANGSMITH.value, None, ""]: raise ValueError(f"Invalid tracing provider: {tracing_provider}") app_config: App = db.session.query(App).filter(App.id == app_id).first() From 41e2347c5c0b5cf96c5115f1c8567537e39fd5fd Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 14:53:20 +0800 Subject: [PATCH 22/65] fix: completion-messages trace instance error --- api/core/app/apps/completion/app_generator.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/api/core/app/apps/completion/app_generator.py b/api/core/app/apps/completion/app_generator.py index 2bb6e5d04f9b65..da914766f0eb41 100644 --- a/api/core/app/apps/completion/app_generator.py +++ b/api/core/app/apps/completion/app_generator.py @@ -97,7 +97,7 @@ def generate(self, app_model: App, # get tracing instance tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_model.id, + app_id=app_model.id ) # init application generate entity @@ -121,11 +121,6 @@ def generate(self, app_model: App, message ) = self._init_generate_records(application_generate_entity) - # get tracing instance - tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_model.id - ) - # init queue manager queue_manager = MessageBasedAppQueueManager( task_id=application_generate_entity.task_id, @@ -154,7 +149,6 @@ def generate(self, app_model: App, message=message, user=user, stream=stream, - tracing_instance=tracing_instance, ) return CompletionAppGenerateResponseConverter.convert( From 8cb809cce3c55646637bbf979e0cfc99042932cc Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 17:30:19 +0800 Subject: [PATCH 23/65] fix: add trace entity --- api/core/llm_generator/llm_generator.py | 2 +- api/core/ops/base_trace_instance.py | 10 +- api/core/ops/entities/trace_entity.py | 112 +++++++ api/core/ops/langfuse_trace.py | 384 +++++++++--------------- api/core/ops/trace_queue_manager.py | 358 +++++++++++++++++++--- 5 files changed, 581 insertions(+), 285 deletions(-) create mode 100644 api/core/ops/entities/trace_entity.py diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index a37d47221a4797..94da53dce93d8a 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -62,7 +62,7 @@ def generate_conversation_name(cls, tenant_id: str, query, conversation_id: Opti trace_manager.add_trace_task( TraceTask( tracing_instance, - TraceTaskName.SUGGESTED_QUESTION_TRACE, + TraceTaskName.GENERATE_NAME_TRACE, conversation_id=conversation_id, generate_conversation_name=name, inputs=prompt, diff --git a/api/core/ops/base_trace_instance.py b/api/core/ops/base_trace_instance.py index ecf5290df47d44..3ffba30f88b7da 100644 --- a/api/core/ops/base_trace_instance.py +++ b/api/core/ops/base_trace_instance.py @@ -10,10 +10,18 @@ class BaseTraceInstance(ABC): def __init__(self): """ Abstract initializer for the trace instance. - All subclasses must provide their own initialization logic. + Distribute trace tasks by matching entities """ ... + @abstractmethod + def trace(self, **kwargs): + """ + Abstract method to trace activities. + Subclasses must implement specific tracing logic for activities. + """ + return kwargs + @abstractmethod def message_trace(self, **kwargs): """ diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py new file mode 100644 index 00000000000000..f2ca994d3b6417 --- /dev/null +++ b/api/core/ops/entities/trace_entity.py @@ -0,0 +1,112 @@ +from datetime import datetime +from typing import Any, Union + +from pydantic import BaseModel + + +class WorkflowTraceInfo(BaseModel): + workflow_data: Any + conversation_id: Union[str, None] + workflow_id: str + tenant_id: str + workflow_run_id: str + workflow_run_elapsed_time: Union[int, float] + workflow_run_status: str + workflow_run_inputs: dict[str, Any] + workflow_run_outputs: dict[str, Any] + workflow_run_version: str + error: str + total_tokens: int + file_list: list[str] + query: str + metadata: dict[str, Any] + + +class MessageTraceInfo(BaseModel): + message_data: Any + conversation_model: str + message_tokens: int + answer_tokens: int + total_tokens: int + error: str + inputs: Union[str, dict[str, Any], list, None] + outputs: Union[str, dict[str, Any], list, None] + file_list: list[str] + created_at: datetime + end_time: datetime + metadata: dict[str, Any] + message_file_data: Any + conversation_mode: str + + +class ModerationTraceInfo(BaseModel): + message_id: str + inputs: dict[str, Any] + message_data: Any + flagged: bool + action: str + preset_response: str + query: str + start_time: datetime + end_time: datetime + metadata: dict[str, Any] + + +# +class SuggestedQuestionTraceInfo(BaseModel): + message_id: str + message_data: Any + inputs: Union[str, dict[str, Any], list, None] + outputs: Union[str, dict[str, Any], list, None] + start_time: datetime + end_time: datetime + metadata: dict[str, Any] + total_tokens: int + status: Union[str, None] + error: Union[str, None] + from_account_id: str + agent_based: bool + from_source: str + model_provider: str + model_id: str + suggested_question: list[str] + level: str + status_message: Union[str, None] + + +class DatasetRetrievalTraceInfo(BaseModel): + message_id: str + inputs: Union[str, dict[str, Any], list, None] + documents: Any + start_time: datetime + end_time: datetime + metadata: dict[str, Any] + + +class ToolTraceInfo(BaseModel): + message_id: str + message_data: Any + tool_name: str + start_time: datetime + end_time: datetime + tool_inputs: dict[str, Any] + tool_outputs: str + metadata: dict[str, Any] + message_file_data: Any + error: Union[str, None] + inputs: Union[str, dict[str, Any], list, None] + outputs: Union[str, dict[str, Any], list, None] + tool_config: dict[str, Any] + time_cost: Union[int, float] + tool_parameters: dict[str, Any] + + +class GenerateNameTraceInfo(BaseModel): + conversation_id: str + inputs: Union[str, dict[str, Any], list, None] + outputs: Union[str, dict[str, Any], list, None] + start_time: datetime + end_time: datetime + metadata: dict[str, Any] + tenant_id: str + diff --git a/api/core/ops/langfuse_trace.py b/api/core/ops/langfuse_trace.py index 3e1664edb2efbf..b5420762503b3a 100644 --- a/api/core/ops/langfuse_trace.py +++ b/api/core/ops/langfuse_trace.py @@ -17,12 +17,21 @@ LevelEnum, UnitEnum, ) +from core.ops.entities.trace_entity import ( + DatasetRetrievalTraceInfo, + GenerateNameTraceInfo, + MessageTraceInfo, + ModerationTraceInfo, + SuggestedQuestionTraceInfo, + ToolTraceInfo, + WorkflowTraceInfo, +) from core.ops.model import LangfuseConfig from core.ops.utils import filter_none_values from extensions.ext_database import db from models.dataset import Document -from models.model import Message, MessageAgentThought, MessageFile -from models.workflow import WorkflowNodeExecution, WorkflowRun +from models.model import MessageFile +from models.workflow import WorkflowNodeExecution logger = logging.getLogger(__name__) @@ -42,47 +51,31 @@ def __init__( ) self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") - def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): - conversion_id = kwargs.get("conversation_id") - workflow_id = workflow_run.workflow_id - tenant_id = workflow_run.tenant_id - workflow_run_id = workflow_run.id - workflow_run_elapsed_time = workflow_run.elapsed_time - workflow_run_status = workflow_run.status - workflow_run_inputs = ( - json.loads(workflow_run.inputs) if workflow_run.inputs else {} - ) - workflow_run_outputs = ( - json.loads(workflow_run.outputs) if workflow_run.outputs else {} - ) - workflow_run_version = workflow_run.version - error = workflow_run.error if workflow_run.error else "" - - total_tokens = workflow_run.total_tokens - - file_list = workflow_run_inputs.get("sys.file") if workflow_run_inputs.get("sys.file") else [] - query = workflow_run_inputs.get("query") or workflow_run_inputs.get("sys.query") or "" - - metadata = { - "workflow_id": workflow_id, - "conversation_id": conversion_id, - "workflow_run_id": workflow_run_id, - "tenant_id": tenant_id, - "elapsed_time": workflow_run_elapsed_time, - "status": workflow_run_status, - "version": workflow_run_version, - "total_tokens": total_tokens, - "file_list": file_list, - } - + def trace(self, trace_info, **kwargs): + if isinstance(trace_info, WorkflowTraceInfo): + self.workflow_trace(trace_info) + if isinstance(trace_info, MessageTraceInfo): + self.message_trace(trace_info) + if isinstance(trace_info, ModerationTraceInfo): + self.moderation_trace(trace_info) + if isinstance(trace_info, SuggestedQuestionTraceInfo): + self.suggested_question_trace(trace_info) + if isinstance(trace_info, DatasetRetrievalTraceInfo): + self.dataset_retrieval_trace(trace_info) + if isinstance(trace_info, ToolTraceInfo): + self.tool_trace(trace_info) + if isinstance(trace_info, GenerateNameTraceInfo): + self.generate_name_trace(trace_info) + + def workflow_trace(self, trace_info: WorkflowTraceInfo): trace_data = LangfuseTrace( - id=workflow_run_id, - name=f"workflow_{workflow_run_id}", - user_id=tenant_id, - input=query, - output=workflow_run_outputs, - metadata=metadata, - session_id=conversion_id if conversion_id else workflow_run_id, + id=trace_info.workflow_run_id, + name=f"workflow_{trace_info.workflow_run_id}", + user_id=trace_info.tenant_id, + input=trace_info.query, + output=trace_info.workflow_run_outputs, + metadata=trace_info.metadata, + session_id=trace_info.conversation_id if trace_info.conversation_id else trace_info.workflow_run_id, tags=["workflow"], ) @@ -91,7 +84,7 @@ def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): # through workflow_run_id get all_nodes_execution workflow_nodes_executions = ( db.session.query(WorkflowNodeExecution) - .filter(WorkflowNodeExecution.workflow_run_id == workflow_run_id) + .filter(WorkflowNodeExecution.workflow_run_id == trace_info.workflow_run_id) .order_by(WorkflowNodeExecution.index.desc()) .all() ) @@ -117,7 +110,7 @@ def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): metadata = json.loads(node_execution.execution_metadata) if node_execution.execution_metadata else {} metadata.update( { - "workflow_run_id": workflow_run_id, + "workflow_run_id": trace_info.workflow_run_id, "node_execution_id": node_execution_id, "tenant_id": tenant_id, "app_id": app_id, @@ -137,14 +130,14 @@ def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): langfuse_generation_data = LangfuseGeneration( name=f"{node_name}_{node_execution_id}", - trace_id=workflow_run_id, + trace_id=trace_info.workflow_run_id, start_time=created_at, end_time=finished_at, input=inputs, output=outputs, metadata=metadata, level=LevelEnum.DEFAULT if status == 'succeeded' else LevelEnum.ERROR, - status_message=error if error else "", + status_message=trace_info.error if trace_info.error else "", usage=generation_usage, ) @@ -155,83 +148,65 @@ def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): name=f"{node_name}_{node_execution_id}", input=inputs, output=outputs, - trace_id=workflow_run_id, + trace_id=trace_info.workflow_run_id, start_time=created_at, end_time=finished_at, metadata=metadata, level=LevelEnum.DEFAULT if status == 'succeeded' else LevelEnum.ERROR, - status_message=error if error else "", + status_message=trace_info.error if trace_info.error else "", ) self.add_span(langfuse_span_data=span_data) - def message_trace(self, message_id: str, conversation_id: str, **kwargs): - message_data = kwargs.get("message_data") - conversation_mode = kwargs.get("conversation_mode") - message_tokens = message_data.message_tokens - answer_tokens = message_data.answer_tokens - total_tokens = message_tokens + answer_tokens - error = message_data.error if message_data.error else "" - input = message_data.message - file_list = input[0].get("files", []) - provider_response_latency = message_data.provider_response_latency - created_at = message_data.created_at - end_time = created_at + timedelta(seconds=provider_response_latency) - + def message_trace( + self, trace_info: MessageTraceInfo, **kwargs + ): # get message file data - message_file_data: MessageFile = kwargs.get("message_file_data") + file_list = trace_info.file_list + message_file_data: MessageFile = trace_info.message_file_data file_url = f"{self.file_base_url}/{message_file_data.url}" if message_file_data else "" file_list.append(file_url) - - metadata = { - "conversation_id": conversation_id, - "ls_provider": message_data.model_provider, - "ls_model_name": message_data.model_id, - "status": message_data.status, - "from_end_user_id": message_data.from_account_id, - "from_account_id": message_data.from_account_id, - "agent_based": message_data.agent_based, - "workflow_run_id": message_data.workflow_run_id, - "from_source": message_data.from_source, - } + metadata = trace_info.metadata + message_data = trace_info.message_data + message_id = message_data.id trace_data = LangfuseTrace( id=message_id, user_id=message_data.from_end_user_id if message_data.from_end_user_id else message_data.from_account_id, name=f"message_{message_id}", input={ - "message": input, + "message": trace_info.inputs, "files": file_list, - "message_tokens": message_tokens, - "answer_tokens": answer_tokens, - "total_tokens": total_tokens, - "error": error, - "provider_response_latency": provider_response_latency, - "created_at": created_at, + "message_tokens": trace_info.message_tokens, + "answer_tokens": trace_info.answer_tokens, + "total_tokens": trace_info.total_tokens, + "error": trace_info.error, + "provider_response_latency": message_data.provider_response_latency, + "created_at": trace_info.created_at, }, - output=message_data.answer, + output=trace_info.outputs, metadata=metadata, - session_id=conversation_id, - tags=["message", str(conversation_mode)], - ) + session_id=message_data.conversation_id, + tags=["message", str(trace_info.conversation_mode)], + version=None, release=None, public=None, ) self.add_trace(langfuse_trace_data=trace_data) # start add span generation_usage = GenerationUsage( - totalTokens=total_tokens, - input=message_tokens, - output=answer_tokens, - total=total_tokens, + totalTokens=trace_info.total_tokens, + input=trace_info.message_tokens, + output=trace_info.answer_tokens, + total=trace_info.total_tokens, unit=UnitEnum.TOKENS, ) langfuse_generation_data = LangfuseGeneration( name=f"generation_{message_id}", trace_id=message_id, - start_time=created_at, - end_time=end_time, + start_time=trace_info.created_at, + end_time=trace_info.end_time, model=message_data.model_id, - input=input, + input=trace_info.inputs, output=message_data.answer, metadata=metadata, level=LevelEnum.DEFAULT if message_data.status != 'error' else LevelEnum.ERROR, @@ -241,76 +216,47 @@ def message_trace(self, message_id: str, conversation_id: str, **kwargs): self.add_generation(langfuse_generation_data) - def moderation_trace(self, message_id: str, moderation_result: ModerationInputsResult, **kwargs): - inputs = kwargs.get("inputs") - message_data = kwargs.get("message_data") - flagged = moderation_result.flagged - action = moderation_result.action - preset_response = moderation_result.preset_response - query = moderation_result.query - timer = kwargs.get("timer") - start_time = timer.get("start") - end_time = timer.get("end") - - metadata = { - "message_id": message_id, - "action": action, - "preset_response": preset_response, - "query": query, - } - + def moderation_trace( + self, trace_info: ModerationTraceInfo, message_id: str = None, moderation_result: ModerationInputsResult = None, + **kwargs + ): span_data = LangfuseSpan( name="moderation", - input=inputs, + input=trace_info.inputs, output={ - "action": action, - "flagged": flagged, - "preset_response": preset_response, - "inputs": inputs, + "action": trace_info.action, + "flagged": trace_info.flagged, + "preset_response": trace_info.preset_response, + "inputs": trace_info.inputs, }, - trace_id=message_id, - start_time=start_time or message_data.created_at, - end_time=end_time or message_data.created_at, - metadata=metadata, + trace_id=trace_info.message_id, + start_time=trace_info.start_time or trace_info.message_data.created_at, + end_time=trace_info.end_time or trace_info.message_data.created_at, + metadata=trace_info.metadata, ) self.add_span(langfuse_span_data=span_data) - def suggested_question_trace(self, message_id: str, suggested_question: str, **kwargs): - message_data = kwargs.get("message_data") - timer = kwargs.get("timer") - start_time = timer.get("start") - end_time = timer.get("end") - input = message_data.query - - metadata = { - "message_id": message_id, - "ls_provider": message_data.model_provider, - "ls_model_name": message_data.model_id, - "status": message_data.status, - "from_end_user_id": message_data.from_account_id, - "from_account_id": message_data.from_account_id, - "agent_based": message_data.agent_based, - "workflow_run_id": message_data.workflow_run_id, - "from_source": message_data.from_source, - } - + def suggested_question_trace( + self, trace_info: SuggestedQuestionTraceInfo, message_id: str = None, suggested_question: str = None, **kwargs + ): + message_data = trace_info.message_data generation_usage = GenerationUsage( - totalTokens=len(suggested_question), - input=len(input), - output=len(suggested_question), - total=len(suggested_question), + totalTokens=len(str(trace_info.suggested_question)), + input=len(trace_info.inputs), + output=len(trace_info.suggested_question), + total=len(trace_info.suggested_question), unit=UnitEnum.CHARACTERS, ) generation_data = LangfuseGeneration( name="suggested_question", - input=input, - output=str(suggested_question), - trace_id=message_id, - start_time=start_time, - end_time=end_time, - metadata=metadata, + input=trace_info.inputs, + output=str(trace_info.suggested_question), + trace_id=trace_info.message_id, + start_time=trace_info.start_time, + end_time=trace_info.end_time, + metadata=trace_info.metadata, level=LevelEnum.DEFAULT if message_data.status != 'error' else LevelEnum.ERROR, status_message=message_data.error if message_data.error else "", usage=generation_usage, @@ -318,120 +264,78 @@ def suggested_question_trace(self, message_id: str, suggested_question: str, **k self.add_generation(langfuse_generation_data=generation_data) - def dataset_retrieval_trace(self, message_id: str, documents: list[Document], **kwargs): - message_data = kwargs.get("message_data") - inputs = message_data.query if message_data.query else message_data.inputs - metadata = { - "message_id": message_id, - "documents": documents - } - timer = kwargs.get("timer") - start_time = timer.get("start") - end_time = timer.get("end") - + def dataset_retrieval_trace( + self, trace_info: DatasetRetrievalTraceInfo, message_id: str = None, documents: list[Document] = None, **kwargs + ): dataset_retrieval_span_data = LangfuseSpan( name="dataset_retrieval", - input=inputs, - output={"documents": documents}, - trace_id=message_id, - start_time=start_time, - end_time=end_time, - metadata=metadata, + input=trace_info.inputs, + output={"documents": trace_info.documents}, + trace_id=trace_info.message_id, + start_time=trace_info.start_time, + end_time=trace_info.end_time, + metadata=trace_info.metadata, ) self.add_span(langfuse_span_data=dataset_retrieval_span_data) - def tool_trace(self, message_id: str, tool_name: str, tool_inputs: dict[str, Any], tool_outputs: str, **kwargs): - message_data: Message = kwargs.get("message_data") - created_time = message_data.created_at - end_time = message_data.updated_at - tool_config = {} - time_cost = 0 - error = None - tool_parameters = {} - - agent_thoughts: list[MessageAgentThought] = message_data.agent_thoughts - for agent_thought in agent_thoughts: - if tool_name in agent_thought.tools: - created_time = agent_thought.created_at - tool_meta_data = agent_thought.tool_meta.get(tool_name, {}) - tool_config = tool_meta_data.get('tool_config', {}) - time_cost = tool_meta_data.get('time_cost', 0) - end_time = created_time + timedelta(seconds=time_cost) - error = tool_meta_data.get('error', "") - tool_parameters = tool_meta_data.get('tool_parameters', {}) - + def tool_trace( + self, trace_info: ToolTraceInfo, message_id: str = None, tool_name: str = None, + tool_inputs: dict[str, Any] = None, + tool_outputs: str = None, + **kwargs + ): metadata = { - "message_id": message_id, - "tool_name": tool_name, - "tool_inputs": tool_inputs, - "tool_outputs": tool_outputs, - "tool_config": tool_config, - "time_cost": time_cost, - "error": error, - "tool_parameters": tool_parameters, + "message_id": trace_info.message_id, + "tool_name": trace_info.tool_name, + "tool_inputs": trace_info.tool_inputs, + "tool_outputs": trace_info.tool_outputs, + "tool_config": trace_info.tool_config, + "time_cost": trace_info.time_cost, + "error": trace_info.error, + "tool_parameters": trace_info.tool_parameters, } - - # get message file data - message_file_data: MessageFile = kwargs.get("message_file_data") - if message_file_data: - message_file_id = message_file_data.id if message_file_data else None - type = message_file_data.type - created_by_role = message_file_data.created_by_role - created_user_id = message_file_data.created_by - - metadata.update( - { - "message_file_id": message_file_id, - "created_by_role": created_by_role, - "created_user_id": created_user_id, - "type": type, - } - ) - tool_span_data = LangfuseSpan( - name=tool_name, - input=tool_inputs, - output=tool_outputs, - trace_id=message_id, - start_time=created_time, - end_time=end_time, + name=trace_info.tool_name, + input=trace_info.tool_inputs, + output=trace_info.tool_outputs, + trace_id=trace_info.message_id, + start_time=trace_info.start_time, + end_time=trace_info.end_time, metadata=metadata, - level=LevelEnum.DEFAULT if error == "" else LevelEnum.ERROR, - status_message=error, + level=LevelEnum.DEFAULT if trace_info.error == "" else LevelEnum.ERROR, + status_message=trace_info.error, ) self.add_span(langfuse_span_data=tool_span_data) - def generate_name_trace(self, conversation_id: str, inputs: str, generate_conversation_name: str, **kwargs): - timer = kwargs.get("timer") - tenant_id = kwargs.get("tenant_id") - start_time = timer.get("start") - end_time = timer.get("end") - - metadata = { - "conversation_id": conversation_id, - } - + def generate_name_trace( + self, + trace_info: GenerateNameTraceInfo, + conversation_id: str = None, + inputs: str = None, + generate_conversation_name: str = None, + **kwargs + ): name_generation_trace_data = LangfuseTrace( name="generate_name", - input=inputs, - output=generate_conversation_name, - user_id=tenant_id, - metadata=metadata, - session_id=conversation_id, + input=trace_info.inputs, + output=trace_info.outputs, + user_id=trace_info.tenant_id, + metadata=trace_info.metadata, + session_id=trace_info.conversation_id, ) self.add_trace(langfuse_trace_data=name_generation_trace_data) name_generation_span_data = LangfuseSpan( name="generate_name", - input=inputs, - output=generate_conversation_name, - trace_id=conversation_id, - start_time=start_time, - end_time=end_time, - metadata=metadata, + input=trace_info.inputs, + output=trace_info.outputs, + trace_id=trace_info.conversation_id, + start_time=trace_info.start_time, + end_time=trace_info.end_time, + metadata=trace_info.metadata, ) self.add_span(langfuse_span_data=name_generation_span_data) diff --git a/api/core/ops/trace_queue_manager.py b/api/core/ops/trace_queue_manager.py index c9ae70bedb6aa9..23597768884564 100644 --- a/api/core/ops/trace_queue_manager.py +++ b/api/core/ops/trace_queue_manager.py @@ -1,10 +1,24 @@ +import json +import os import queue import threading +from datetime import timedelta from enum import Enum +from typing import Any +from core.ops.entities.trace_entity import ( + DatasetRetrievalTraceInfo, + GenerateNameTraceInfo, + MessageTraceInfo, + ModerationTraceInfo, + SuggestedQuestionTraceInfo, + ToolTraceInfo, + WorkflowTraceInfo, +) from core.ops.utils import get_message_data from extensions.ext_database import db -from models.model import Conversation, MessageFile +from models.model import Conversation, MessageAgentThought, MessageFile +from models.workflow import WorkflowRun class TraceTaskName(str, Enum): @@ -19,33 +33,57 @@ class TraceTaskName(str, Enum): class TraceTask: - def __init__(self, trace_instance, trace_type, **kwargs): + def __init__( + self, + trace_instance: Any, + trace_type: Any, + message_id: str = None, + workflow_run: WorkflowRun = None, + conversation_id: str = None, + timer: Any = None, + **kwargs + ): self.trace_instance = trace_instance self.trace_type = trace_type + self.message_id = message_id + self.workflow_run = workflow_run + self.conversation_id = conversation_id + self.timer = timer self.kwargs = kwargs + self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") def execute(self): - method_name, processed_kwargs = self.preprocess() - method = getattr(self.trace_instance, method_name) - method(**processed_kwargs) + # method_name, processed_kwargs = self.preprocess() + # method = self.trace_instance.trace + # method(**processed_kwargs) + + method_name, trace_info = self.preprocess() + method = self.trace_instance.trace + method(trace_info) def preprocess(self): if self.trace_type == TraceTaskName.CONVERSATION_TRACE: return TraceTaskName.CONVERSATION_TRACE, self.conversation_trace(**self.kwargs) if self.trace_type == TraceTaskName.WORKFLOW_TRACE: - return TraceTaskName.WORKFLOW_TRACE, self.workflow_trace(**self.kwargs) + return TraceTaskName.WORKFLOW_TRACE, self.workflow_trace(self.workflow_run, self.conversation_id) elif self.trace_type == TraceTaskName.MESSAGE_TRACE: - return TraceTaskName.MESSAGE_TRACE, self.message_trace(**self.kwargs) + return TraceTaskName.MESSAGE_TRACE, self.message_trace(self.message_id) elif self.trace_type == TraceTaskName.MODERATION_TRACE: - return TraceTaskName.MODERATION_TRACE, self.moderation_trace(**self.kwargs) + return TraceTaskName.MODERATION_TRACE, self.moderation_trace(self.message_id, self.timer, **self.kwargs) elif self.trace_type == TraceTaskName.SUGGESTED_QUESTION_TRACE: - return TraceTaskName.SUGGESTED_QUESTION_TRACE, self.suggested_question_trace(**self.kwargs) + return TraceTaskName.SUGGESTED_QUESTION_TRACE, self.suggested_question_trace( + self.message_id, self.timer, **self.kwargs + ) elif self.trace_type == TraceTaskName.DATASET_RETRIEVAL_TRACE: - return TraceTaskName.DATASET_RETRIEVAL_TRACE, self.dataset_retrieval_trace(**self.kwargs) + return TraceTaskName.DATASET_RETRIEVAL_TRACE, self.dataset_retrieval_trace( + self.message_id, self.timer, **self.kwargs + ) elif self.trace_type == TraceTaskName.TOOL_TRACE: - return TraceTaskName.TOOL_TRACE, self.tool_trace(**self.kwargs) + return TraceTaskName.TOOL_TRACE, self.tool_trace(self.message_id, **self.kwargs) elif self.trace_type == TraceTaskName.GENERATE_NAME_TRACE: - return TraceTaskName.GENERATE_NAME_TRACE, self.generate_name_trace(**self.kwargs) + return TraceTaskName.GENERATE_NAME_TRACE, self.generate_name_trace( + self.conversation_id, self.timer, **self.kwargs + ) else: return '', {} @@ -53,61 +91,295 @@ def preprocess(self): def conversation_trace(self, **kwargs): return kwargs - def workflow_trace(self, **kwargs): - return kwargs + def workflow_trace(self, workflow_run: WorkflowRun, conversation_id): + workflow_id = workflow_run.workflow_id + tenant_id = workflow_run.tenant_id + workflow_run_id = workflow_run.id + workflow_run_elapsed_time = workflow_run.elapsed_time + workflow_run_status = workflow_run.status + workflow_run_inputs = ( + json.loads(workflow_run.inputs) if workflow_run.inputs else {} + ) + workflow_run_outputs = ( + json.loads(workflow_run.outputs) if workflow_run.outputs else {} + ) + workflow_run_version = workflow_run.version + error = workflow_run.error if workflow_run.error else "" + + total_tokens = workflow_run.total_tokens + + file_list = workflow_run_inputs.get("sys.file") if workflow_run_inputs.get("sys.file") else [] + query = workflow_run_inputs.get("query") or workflow_run_inputs.get("sys.query") or "" + + metadata = { + "workflow_id": workflow_id, + "conversation_id": conversation_id, + "workflow_run_id": workflow_run_id, + "tenant_id": tenant_id, + "elapsed_time": workflow_run_elapsed_time, + "status": workflow_run_status, + "version": workflow_run_version, + "total_tokens": total_tokens, + "file_list": file_list, + "triggered_form": workflow_run.triggered_from, + } + + workflow_trace_info = WorkflowTraceInfo( + workflow_data=workflow_run, + conversation_id=conversation_id, + workflow_id=workflow_id, + tenant_id=tenant_id, + workflow_run_id=workflow_run_id, + workflow_run_elapsed_time=workflow_run_elapsed_time, + workflow_run_status=workflow_run_status, + workflow_run_inputs=workflow_run_inputs, + workflow_run_outputs=workflow_run_outputs, + workflow_run_version=workflow_run_version, + error=error, + total_tokens=total_tokens, + file_list=file_list, + query=query, + metadata=metadata, + ) + + return workflow_trace_info - def message_trace(self, **kwargs): - message_id = kwargs.get('message_id') + def message_trace(self, message_id): message_data = get_message_data(message_id) if not message_data: return {} - message_file_data = db.session.query(MessageFile).filter_by(message_id=message_id).first() conversation_mode = db.session.query(Conversation.mode).filter_by(id=message_data.conversation_id).first() conversation_mode = conversation_mode[0] - kwargs['message_data'] = message_data - kwargs['message_file_data'] = message_file_data - kwargs['conversation_mode'] = conversation_mode - return kwargs + created_at = message_data.created_at + inputs = message_data.message - def moderation_trace( - self, - **kwargs - ): - message_id = kwargs.get('message_id') + # get message file data + message_file_data = db.session.query(MessageFile).filter_by(message_id=message_id).first() + file_url = f"{self.file_base_url}/{message_file_data.url}" if message_file_data else "" + file_list = inputs[0].get("files", []) + file_list.append(file_url) + + metadata = { + "conversation_id": message_data.conversation_id, + "ls_provider": message_data.model_provider, + "ls_model_name": message_data.model_id, + "status": message_data.status, + "from_end_user_id": message_data.from_account_id, + "from_account_id": message_data.from_account_id, + "agent_based": message_data.agent_based, + "workflow_run_id": message_data.workflow_run_id, + "from_source": message_data.from_source, + } + + message_tokens = message_data.message_tokens + + message_trace_info = MessageTraceInfo( + message_data=message_data, + conversation_model=conversation_mode, + message_tokens=message_tokens, + answer_tokens=message_data.answer_tokens, + total_tokens=message_tokens + message_data.answer_tokens, + error=message_data.error if message_data.error else "", + inputs=inputs, + outputs=message_data.answer, + file_list=message_data.message[0].get("files", []), + created_at=created_at, + end_time=created_at + timedelta(seconds=message_data.provider_response_latency), + metadata=metadata, + message_file_data=message_file_data, + conversation_mode=conversation_mode, + ) + + return message_trace_info + + def moderation_trace(self, message_id, timer, **kwargs): + moderation_result = kwargs.get("moderation_result") + inputs = kwargs.get("inputs") message_data = get_message_data(message_id) if not message_data: return {} - kwargs['message_data'] = message_data - return kwargs + metadata = { + "message_id": message_id, + "action": moderation_result.action, + "preset_response": moderation_result.preset_response, + "query": moderation_result.query, + } + moderation_trace_info = ModerationTraceInfo( + message_id=message_id, + inputs=inputs, + message_data=message_data, + flagged=moderation_result.flagged, + action=moderation_result.action, + preset_response=moderation_result.preset_response, + query=moderation_result.query, + start_time=timer.get("start"), + end_time=timer.get("end"), + metadata=metadata, + ) - def suggested_question_trace(self, **kwargs): - message_id = kwargs.get('message_id') + return moderation_trace_info + + def suggested_question_trace(self, message_id, timer, **kwargs): + suggested_question = kwargs.get("suggested_question") message_data = get_message_data(message_id) if not message_data: return {} - kwargs['message_data'] = message_data - return kwargs + metadata = { + "message_id": message_id, + "ls_provider": message_data.model_provider, + "ls_model_name": message_data.model_id, + "status": message_data.status, + "from_end_user_id": message_data.from_account_id, + "from_account_id": message_data.from_account_id, + "agent_based": message_data.agent_based, + "workflow_run_id": message_data.workflow_run_id, + "from_source": message_data.from_source, + } + suggested_question_trace_info = SuggestedQuestionTraceInfo( + message_id=message_id, + message_data=message_data, + inputs=message_data.message, + outputs=message_data.answer, + start_time=timer.get("start"), + end_time=timer.get("end"), + metadata=metadata, + total_tokens=message_data.message_tokens + message_data.answer_tokens, + status=message_data.status, + error=message_data.error, + from_account_id=message_data.from_account_id, + agent_based=message_data.agent_based, + from_source=message_data.from_source, + model_provider=message_data.model_provider, + model_id=message_data.model_id, + suggested_question=suggested_question, + level=message_data.status, + status_message=message_data.error, + ) - def dataset_retrieval_trace(self, **kwargs): - message_id = kwargs.get('message_id') + return suggested_question_trace_info + + def dataset_retrieval_trace(self, message_id, timer, **kwargs): + documents = kwargs.get("documents") message_data = get_message_data(message_id) if not message_data: return {} - kwargs['message_data'] = message_data - return kwargs - def tool_trace(self, **kwargs): - message_id = kwargs.get('message_id') + metadata = { + "message_id": message_id, + "ls_provider": message_data.model_provider, + "ls_model_name": message_data.model_id, + "status": message_data.status, + "from_end_user_id": message_data.from_account_id, + "from_account_id": message_data.from_account_id, + "agent_based": message_data.agent_based, + "workflow_run_id": message_data.workflow_run_id, + "from_source": message_data.from_source, + } + + dataset_retrieval_trace_info = DatasetRetrievalTraceInfo( + message_id=message_id, + inputs=message_data.query if message_data.query else message_data.inputs, + documents=documents, + start_time=timer.get("start"), + end_time=timer.get("end"), + metadata=metadata, + ) + + return dataset_retrieval_trace_info + + def tool_trace(self, message_id, **kwargs): + tool_name = kwargs.get('tool_name') + tool_inputs = kwargs.get('tool_inputs') + tool_outputs = kwargs.get('tool_outputs') message_data = get_message_data(message_id) if not message_data: return {} + tool_config = {} + time_cost = 0 + error = None + tool_parameters = {} + created_time = message_data.created_at + end_time = message_data.updated_at + agent_thoughts: list[MessageAgentThought] = message_data.agent_thoughts + for agent_thought in agent_thoughts: + if tool_name in agent_thought.tools: + created_time = agent_thought.created_at + tool_meta_data = agent_thought.tool_meta.get(tool_name, {}) + tool_config = tool_meta_data.get('tool_config', {}) + time_cost = tool_meta_data.get('time_cost', 0) + end_time = created_time + timedelta(seconds=time_cost) + error = tool_meta_data.get('error', "") + tool_parameters = tool_meta_data.get('tool_parameters', {}) + metadata = { + "message_id": message_id, + "tool_name": tool_name, + "tool_inputs": tool_inputs, + "tool_outputs": tool_outputs, + "tool_config": tool_config, + "time_cost": time_cost, + "error": error, + "tool_parameters": tool_parameters, + } + message_file_data = db.session.query(MessageFile).filter_by(message_id=message_id).first() - kwargs['message_data'] = message_data - kwargs['message_file_data'] = message_file_data - return kwargs + if message_file_data: + message_file_id = message_file_data.id if message_file_data else None + type = message_file_data.type + created_by_role = message_file_data.created_by_role + created_user_id = message_file_data.created_by - def generate_name_trace(self, **kwargs): - return kwargs + metadata.update( + { + "message_file_id": message_file_id, + "created_by_role": created_by_role, + "created_user_id": created_user_id, + "type": type, + } + ) + + tool_trace_info = ToolTraceInfo( + message_id=message_id, + message_data=message_data, + tool_name=tool_name, + start_time=created_time, + end_time=end_time, + tool_inputs=tool_inputs, + tool_outputs=tool_outputs, + metadata=metadata, + message_file_data=message_file_data, + error=error, + inputs=message_data.message, + outputs=message_data.answer, + tool_config=tool_config, + time_cost=time_cost, + tool_parameters=tool_parameters, + ) + + return tool_trace_info + + def generate_name_trace(self, conversation_id, timer, **kwargs): + generate_conversation_name = kwargs.get("generate_conversation_name") + inputs = kwargs.get("inputs") + tenant_id = kwargs.get("tenant_id") + start_time = timer.get("start") + end_time = timer.get("end") + + metadata = { + "conversation_id": conversation_id, + "tenant_id": tenant_id, + } + + generate_name_trace_info = GenerateNameTraceInfo( + conversation_id=conversation_id, + inputs=inputs, + outputs=generate_conversation_name, + start_time=start_time, + end_time=end_time, + metadata=metadata, + tenant_id=tenant_id, + ) + + return generate_name_trace_info class TraceQueueManager: From 41e936d8ec55f88c5cf5f325593b7dfe35bb5b18 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 17:48:45 +0800 Subject: [PATCH 24/65] feat: add langfuse and langsmith trace class --- api/core/ops/entities/trace_entity.py | 6 +- api/core/ops/langfuse_trace.py | 49 +--- api/core/ops/langsmith_trace.py | 340 ++++++++------------------ api/core/ops/trace_queue_manager.py | 6 +- 4 files changed, 123 insertions(+), 278 deletions(-) diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py index f2ca994d3b6417..375dc8c50de0b3 100644 --- a/api/core/ops/entities/trace_entity.py +++ b/api/core/ops/entities/trace_entity.py @@ -15,7 +15,7 @@ class WorkflowTraceInfo(BaseModel): workflow_run_inputs: dict[str, Any] workflow_run_outputs: dict[str, Any] workflow_run_version: str - error: str + error: Union[str, None] total_tokens: int file_list: list[str] query: str @@ -32,7 +32,7 @@ class MessageTraceInfo(BaseModel): inputs: Union[str, dict[str, Any], list, None] outputs: Union[str, dict[str, Any], list, None] file_list: list[str] - created_at: datetime + start_at: datetime end_time: datetime metadata: dict[str, Any] message_file_data: Any @@ -81,6 +81,7 @@ class DatasetRetrievalTraceInfo(BaseModel): start_time: datetime end_time: datetime metadata: dict[str, Any] + message_data: Any class ToolTraceInfo(BaseModel): @@ -99,6 +100,7 @@ class ToolTraceInfo(BaseModel): tool_config: dict[str, Any] time_cost: Union[int, float] tool_parameters: dict[str, Any] + file_url: Union[str, None, list] class GenerateNameTraceInfo(BaseModel): diff --git a/api/core/ops/langfuse_trace.py b/api/core/ops/langfuse_trace.py index b5420762503b3a..b4bac502d62394 100644 --- a/api/core/ops/langfuse_trace.py +++ b/api/core/ops/langfuse_trace.py @@ -2,12 +2,11 @@ import logging import os from datetime import datetime, timedelta -from typing import Any, Optional +from typing import Optional from langfuse import Langfuse from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token -from core.moderation.base import ModerationInputsResult from core.ops.base_trace_instance import BaseTraceInstance from core.ops.entities.langfuse_trace_entity import ( GenerationUsage, @@ -29,7 +28,6 @@ from core.ops.model import LangfuseConfig from core.ops.utils import filter_none_values from extensions.ext_database import db -from models.dataset import Document from models.model import MessageFile from models.workflow import WorkflowNodeExecution @@ -216,10 +214,7 @@ def message_trace( self.add_generation(langfuse_generation_data) - def moderation_trace( - self, trace_info: ModerationTraceInfo, message_id: str = None, moderation_result: ModerationInputsResult = None, - **kwargs - ): + def moderation_trace(self, trace_info: ModerationTraceInfo, **kwargs): span_data = LangfuseSpan( name="moderation", input=trace_info.inputs, @@ -237,9 +232,7 @@ def moderation_trace( self.add_span(langfuse_span_data=span_data) - def suggested_question_trace( - self, trace_info: SuggestedQuestionTraceInfo, message_id: str = None, suggested_question: str = None, **kwargs - ): + def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo, **kwargs): message_data = trace_info.message_data generation_usage = GenerationUsage( totalTokens=len(str(trace_info.suggested_question)), @@ -264,37 +257,20 @@ def suggested_question_trace( self.add_generation(langfuse_generation_data=generation_data) - def dataset_retrieval_trace( - self, trace_info: DatasetRetrievalTraceInfo, message_id: str = None, documents: list[Document] = None, **kwargs - ): + def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo, **kwargs): dataset_retrieval_span_data = LangfuseSpan( name="dataset_retrieval", input=trace_info.inputs, output={"documents": trace_info.documents}, trace_id=trace_info.message_id, - start_time=trace_info.start_time, - end_time=trace_info.end_time, + start_time=trace_info.start_time or trace_info.message_data.created_at, + end_time=trace_info.end_time or trace_info.message_data.updated_at, metadata=trace_info.metadata, ) self.add_span(langfuse_span_data=dataset_retrieval_span_data) - def tool_trace( - self, trace_info: ToolTraceInfo, message_id: str = None, tool_name: str = None, - tool_inputs: dict[str, Any] = None, - tool_outputs: str = None, - **kwargs - ): - metadata = { - "message_id": trace_info.message_id, - "tool_name": trace_info.tool_name, - "tool_inputs": trace_info.tool_inputs, - "tool_outputs": trace_info.tool_outputs, - "tool_config": trace_info.tool_config, - "time_cost": trace_info.time_cost, - "error": trace_info.error, - "tool_parameters": trace_info.tool_parameters, - } + def tool_trace(self, trace_info: ToolTraceInfo, **kwargs): tool_span_data = LangfuseSpan( name=trace_info.tool_name, input=trace_info.tool_inputs, @@ -302,21 +278,14 @@ def tool_trace( trace_id=trace_info.message_id, start_time=trace_info.start_time, end_time=trace_info.end_time, - metadata=metadata, + metadata=trace_info.metadata, level=LevelEnum.DEFAULT if trace_info.error == "" else LevelEnum.ERROR, status_message=trace_info.error, ) self.add_span(langfuse_span_data=tool_span_data) - def generate_name_trace( - self, - trace_info: GenerateNameTraceInfo, - conversation_id: str = None, - inputs: str = None, - generate_conversation_name: str = None, - **kwargs - ): + def generate_name_trace(self, trace_info: GenerateNameTraceInfo, **kwargs): name_generation_trace_data = LangfuseTrace( name="generate_name", input=trace_info.inputs, diff --git a/api/core/ops/langsmith_trace.py b/api/core/ops/langsmith_trace.py index d3e84411855f06..2b2733218c3164 100644 --- a/api/core/ops/langsmith_trace.py +++ b/api/core/ops/langsmith_trace.py @@ -2,20 +2,26 @@ import logging import os from datetime import datetime, timedelta -from typing import Any from langsmith import Client from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token -from core.moderation.base import ModerationInputsResult from core.ops.base_trace_instance import BaseTraceInstance from core.ops.entities.langsmith_trace_entity import LangSmithRunModel, LangSmithRunType, LangSmithRunUpdateModel +from core.ops.entities.trace_entity import ( + DatasetRetrievalTraceInfo, + GenerateNameTraceInfo, + MessageTraceInfo, + ModerationTraceInfo, + SuggestedQuestionTraceInfo, + ToolTraceInfo, + WorkflowTraceInfo, +) from core.ops.model import LangSmithConfig from core.ops.utils import filter_none_values from extensions.ext_database import db -from models.dataset import Document -from models.model import Message, MessageAgentThought, MessageFile -from models.workflow import WorkflowNodeExecution, WorkflowRun +from models.model import MessageFile +from models.workflow import WorkflowNodeExecution logger = logging.getLogger(__name__) @@ -36,54 +42,37 @@ def __init__( ) self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") - def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): - conversion_id = kwargs.get("conversation_id") - workflow_id = workflow_run.workflow_id - tenant_id = workflow_run.tenant_id - workflow_run_id = workflow_run.id - workflow_run_created_at = workflow_run.created_at - workflow_run_finished_at = workflow_run.finished_at - workflow_run_elapsed_time = workflow_run.elapsed_time - workflow_run_status = workflow_run.status - workflow_run_inputs = ( - json.loads(workflow_run.inputs) if workflow_run.inputs else {} - ) - workflow_run_outputs = ( - json.loads(workflow_run.outputs) if workflow_run.outputs else {} - ) - workflow_run_version = workflow_run.version - error = workflow_run.error if workflow_run.error else "" - - total_tokens = workflow_run.total_tokens - - file_list = workflow_run_inputs.get("sys.file") if workflow_run_inputs.get("sys.file") else [] - query = workflow_run_inputs.get("query") or workflow_run_inputs.get("sys.query") or "" - - metadata = { - "workflow_id": workflow_id, - "conversation_id": conversion_id, - "workflow_run_id": workflow_run_id, - "tenant_id": tenant_id, - "elapsed_time": workflow_run_elapsed_time, - "status": workflow_run_status, - "version": workflow_run_version, - "total_tokens": total_tokens, - } - + def trace(self, trace_info, **kwargs): + if isinstance(trace_info, WorkflowTraceInfo): + self.workflow_trace(trace_info) + if isinstance(trace_info, MessageTraceInfo): + self.message_trace(trace_info) + if isinstance(trace_info, ModerationTraceInfo): + self.moderation_trace(trace_info) + if isinstance(trace_info, SuggestedQuestionTraceInfo): + self.suggested_question_trace(trace_info) + if isinstance(trace_info, DatasetRetrievalTraceInfo): + self.dataset_retrieval_trace(trace_info) + if isinstance(trace_info, ToolTraceInfo): + self.tool_trace(trace_info) + if isinstance(trace_info, GenerateNameTraceInfo): + self.generate_name_trace(trace_info) + + def workflow_trace(self, trace_info: WorkflowTraceInfo): langsmith_run = LangSmithRunModel( - file_list=file_list, - total_tokens=total_tokens, - id=workflow_run_id, - name=f"workflow_run_{workflow_run_id}", - inputs=query, + file_list=trace_info.file_list, + total_tokens=trace_info.total_tokens, + id=trace_info.workflow_run_id, + name=f"workflow_run_{trace_info.workflow_run_id}", + inputs=trace_info.query, run_type=LangSmithRunType.tool, - start_time=workflow_run_created_at, - end_time=workflow_run_finished_at, - outputs=workflow_run_outputs, + start_time=trace_info.workflow_data.workflow_run_created_at, + end_time=trace_info.workflow_data.workflow_run_finished_at, + outputs=trace_info.workflow_data.workflow_run_outputs, extra={ - "metadata": metadata, + "metadata": trace_info.metadata, }, - error=error, + error=trace_info.error, tags=["workflow"], ) @@ -92,7 +81,7 @@ def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): # through workflow_run_id get all_nodes_execution workflow_nodes_executions = ( db.session.query(WorkflowNodeExecution) - .filter(WorkflowNodeExecution.workflow_run_id == workflow_run_id) + .filter(WorkflowNodeExecution.workflow_run_id == trace_info.workflow_run_id) .order_by(WorkflowNodeExecution.index.desc()) .all() ) @@ -125,6 +114,7 @@ def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): metadata = json.loads(node_execution.execution_metadata) if node_execution.execution_metadata else {} metadata.update( { + "workflow_run_id": trace_info.workflow_run_id, "node_execution_id": node_execution_id, "tenant_id": tenant_id, "app_id": app_id, @@ -150,274 +140,154 @@ def workflow_trace(self, workflow_run: WorkflowRun, **kwargs): start_time=created_at, end_time=finished_at, outputs=outputs, - file_list=file_list, + file_list=trace_info.file_list, extra={ "metadata": metadata, }, - parent_run_id=workflow_run_id, + parent_run_id=trace_info.workflow_run_id, tags=["node_execution"], ) self.add_run(langsmith_run) - def message_trace(self, message_id: str, conversation_id: str, **kwargs): - message_data = kwargs.get("message_data") - conversation_mode = kwargs.get("conversation_mode") - message_tokens = message_data.message_tokens - answer_tokens = message_data.answer_tokens - total_tokens = message_tokens + answer_tokens - error = message_data.error if message_data.error else "" - inputs = message_data.message - file_list = inputs[0].get("files", []) - provider_response_latency = message_data.provider_response_latency - created_at = message_data.created_at - end_time = created_at + timedelta(seconds=provider_response_latency) - + def message_trace(self, trace_info: MessageTraceInfo, **kwargs): # get message file data - message_file_data: MessageFile = kwargs.get("message_file_data") + file_list = trace_info.file_list + message_file_data: MessageFile = trace_info.message_file_data file_url = f"{self.file_base_url}/{message_file_data.url}" if message_file_data else "" file_list.append(file_url) + metadata = trace_info.metadata + message_data = trace_info.message_data + message_id = message_data.id - metadata = { - "conversation_id": conversation_id, - "ls_provider": message_data.model_provider, - "ls_model_name": message_data.model_id, - "status": message_data.status, - "from_end_user_id": message_data.from_account_id, - "from_account_id": message_data.from_account_id, - "agent_based": message_data.agent_based, - "workflow_run_id": message_data.workflow_run_id, - "from_source": message_data.from_source, - } message_run = LangSmithRunModel( - input_tokens=message_tokens, - output_tokens=answer_tokens, - total_tokens=total_tokens, + input_tokens=trace_info.message_tokens, + output_tokens=trace_info.answer_tokens, + total_tokens=trace_info.total_tokens, id=message_id, name=f"message_{message_id}", - inputs=inputs, + inputs=trace_info.inputs, run_type=LangSmithRunType.chain, - start_time=created_at, - end_time=end_time, + start_time=trace_info.created_at, + end_time=trace_info.end_time, outputs=message_data.answer, extra={ "metadata": metadata, }, - tags=["message", str(conversation_mode)], - error=error, + tags=["message", str(trace_info.conversation_mode)], + error=trace_info.error, file_list=file_list, ) self.add_run(message_run) # create llm run parented to message run llm_run = LangSmithRunModel( - input_tokens=message_tokens, - output_tokens=answer_tokens, - total_tokens=total_tokens, + input_tokens=trace_info.message_tokens, + output_tokens=trace_info.answer_tokens, + total_tokens=trace_info.total_tokens, name=f"llm_{message_id}", - inputs=inputs, + inputs=trace_info.inputs, run_type=LangSmithRunType.llm, - start_time=created_at, - end_time=end_time, + start_time=trace_info.start_at, + end_time=trace_info.end_time, outputs=message_data.answer, extra={ "metadata": metadata, }, parent_run_id=message_id, - tags=["llm", str(conversation_mode)], - error=error, + tags=["llm", str(trace_info.conversation_mode)], + error=trace_info.error, file_list=file_list, ) self.add_run(llm_run) - def moderation_trace(self, message_id: str, moderation_result: ModerationInputsResult, **kwargs): - inputs = kwargs.get("inputs") - message_data = kwargs.get("message_data") - flagged = moderation_result.flagged - action = moderation_result.action - preset_response = moderation_result.preset_response - query = moderation_result.query - timer = kwargs.get("timer") - start_time = timer.get("start") - end_time = timer.get("end") - - metadata = { - "message_id": message_id, - "action": action, - "preset_response": preset_response, - "query": query, - } - + def moderation_trace(self, trace_info: ModerationTraceInfo, **kwargs): langsmith_run = LangSmithRunModel( name="moderation", - inputs=inputs, + inputs=trace_info.inputs, outputs={ - "action": action, - "flagged": flagged, - "preset_response": preset_response, - "inputs": inputs, + "action": trace_info.action, + "flagged": trace_info.flagged, + "preset_response": trace_info.preset_response, + "inputs": trace_info.inputs, }, run_type=LangSmithRunType.tool, extra={ - "metadata": metadata, + "metadata": trace_info.metadata, }, tags=["moderation"], - parent_run_id=message_id, - start_time=start_time or message_data.created_at, - end_time=end_time or message_data.updated_at, + parent_run_id=trace_info.message_id, + start_time=trace_info.start_time or trace_info.message_data.created_at, + end_time=trace_info.end_time or trace_info.message_data.updated_at, ) self.add_run(langsmith_run) - def suggested_question_trace(self, message_id: str, suggested_question: str, **kwargs): - message_data = kwargs.get("message_data") - timer = kwargs.get("timer") - start_time = timer.get("start") - end_time = timer.get("end") - inputs = message_data.query - - metadata = { - "message_id": message_id, - "ls_provider": message_data.model_provider, - "ls_model_name": message_data.model_id, - "status": message_data.status, - "from_end_user_id": message_data.from_account_id, - "from_account_id": message_data.from_account_id, - "agent_based": message_data.agent_based, - "workflow_run_id": message_data.workflow_run_id, - "from_source": message_data.from_source, - } - + def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo, **kwargs): + message_data = trace_info.message_data suggested_question_run = LangSmithRunModel( name="suggested_question", - inputs=inputs, - outputs=suggested_question, + inputs=trace_info.inputs, + outputs=trace_info.suggested_question, run_type=LangSmithRunType.tool, extra={ - "metadata": metadata, + "metadata": trace_info.metadata, }, tags=["suggested_question"], - parent_run_id=message_id, - start_time=start_time or message_data.created_at, - end_time=end_time or message_data.updated_at, + parent_run_id=trace_info.message_id, + start_time=trace_info.start_time or message_data.created_at, + end_time=trace_info.end_time or message_data.updated_at, ) self.add_run(suggested_question_run) - def dataset_retrieval_trace(self, message_id: str, documents: list[Document], **kwargs): - message_data = kwargs.get("message_data") - inputs = message_data.query if message_data.query else message_data.inputs - metadata = { - "message_id": message_id, - "documents": documents - } - timer = kwargs.get("timer") - start_time = timer.get("start") - end_time = timer.get("end") - + def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo, **kwargs): dataset_retrieval_run = LangSmithRunModel( name="dataset_retrieval", - inputs=inputs, - outputs={"documents": documents}, + inputs=trace_info.inputs, + outputs={"documents": trace_info.documents}, run_type=LangSmithRunType.retriever, extra={ - "metadata": metadata, + "metadata": trace_info.metadata, }, tags=["dataset_retrieval"], - parent_run_id=message_id, - start_time=start_time or message_data.created_at, - end_time=end_time or message_data.updated_at, + parent_run_id=trace_info.message_id, + start_time=trace_info.start_time or trace_info.message_data.created_at, + end_time=trace_info.end_time or trace_info.message_data.updated_at, ) self.add_run(dataset_retrieval_run) - def tool_trace(self, message_id: str, tool_name: str, tool_inputs: dict[str, Any], tool_outputs: str, **kwargs): - message_data: Message = kwargs.get("message_data") - created_time = message_data.created_at - end_time = message_data.updated_at - tool_config = {} - time_cost = 0 - error = "" - tool_parameters = {} - file_url = "" - - agent_thoughts: list[MessageAgentThought] = message_data.agent_thoughts - for agent_thought in agent_thoughts: - if tool_name in agent_thought.tools: - created_time = agent_thought.created_at - tool_meta_data = agent_thought.tool_meta.get(tool_name, {}) - tool_config = tool_meta_data.get('tool_config', {}) - time_cost = tool_meta_data.get('time_cost', 0) - end_time = created_time + timedelta(seconds=time_cost) - error = tool_meta_data.get('error', "") - tool_parameters = tool_meta_data.get('tool_parameters', {}) - - metadata = { - "message_id": message_id, - "tool_name": tool_name, - "tool_inputs": tool_inputs, - "tool_outputs": tool_outputs, - "tool_config": tool_config, - "time_cost": time_cost, - "error": error, - "tool_parameters": tool_parameters, - } - - # get message file data - message_file_data: MessageFile = kwargs.get("message_file_data") - if message_file_data: - message_file_id = message_file_data.id if message_file_data else None - type = message_file_data.type - created_by_role = message_file_data.created_by_role - created_user_id = message_file_data.created_by - file_url = f"{self.file_base_url}/{message_file_data.url}" - - metadata.update( - { - "message_file_id": message_file_id, - "created_by_role": created_by_role, - "created_user_id": created_user_id, - "type": type, - } - ) - + def tool_trace(self, trace_info: ToolTraceInfo, **kwargs): tool_run = LangSmithRunModel( - name=tool_name, - inputs=tool_inputs, - outputs=tool_outputs, + name=trace_info.tool_name, + inputs=trace_info.tool_inputs, + outputs=trace_info.tool_outputs, run_type=LangSmithRunType.tool, extra={ - "metadata": metadata, + "metadata": trace_info.metadata, }, - tags=["tool", tool_name], - parent_run_id=message_id, - start_time=created_time, - end_time=end_time, - file_list=[file_url], + tags=["tool", trace_info.tool_name], + parent_run_id=trace_info.message_id, + start_time=trace_info.start_time, + end_time=trace_info.end_time, + file_list=[trace_info.file_url], ) self.add_run(tool_run) - def generate_name_trace(self, conversation_id: str, inputs: str, generate_conversation_name: str, **kwargs): - timer = kwargs.get("timer") - start_time = timer.get("start") - end_time = timer.get("end") - - metadata = { - "conversation_id": conversation_id, - } - + def generate_name_trace(self, trace_info: GenerateNameTraceInfo, **kwargs): name_run = LangSmithRunModel( name="generate_name", - inputs=inputs, - outputs=generate_conversation_name, + inputs=trace_info.inputs, + outputs=trace_info.outputs, run_type=LangSmithRunType.tool, extra={ - "metadata": metadata, + "metadata": trace_info.metadata, }, tags=["generate_name"], - start_time=start_time or datetime.now(), - end_time=end_time or datetime.now(), + start_time=trace_info.start_time or datetime.now(), + end_time=trace_info.end_time or datetime.now(), ) self.add_run(name_run) diff --git a/api/core/ops/trace_queue_manager.py b/api/core/ops/trace_queue_manager.py index 23597768884564..9d3175cfa6b1b2 100644 --- a/api/core/ops/trace_queue_manager.py +++ b/api/core/ops/trace_queue_manager.py @@ -183,7 +183,7 @@ def message_trace(self, message_id): inputs=inputs, outputs=message_data.answer, file_list=message_data.message[0].get("files", []), - created_at=created_at, + start_at=created_at, end_time=created_at + timedelta(seconds=message_data.provider_response_latency), metadata=metadata, message_file_data=message_file_data, @@ -283,6 +283,7 @@ def dataset_retrieval_trace(self, message_id, timer, **kwargs): start_time=timer.get("start"), end_time=timer.get("end"), metadata=metadata, + message_data=message_data, ) return dataset_retrieval_trace_info @@ -321,12 +322,14 @@ def tool_trace(self, message_id, **kwargs): "tool_parameters": tool_parameters, } + file_url = "" message_file_data = db.session.query(MessageFile).filter_by(message_id=message_id).first() if message_file_data: message_file_id = message_file_data.id if message_file_data else None type = message_file_data.type created_by_role = message_file_data.created_by_role created_user_id = message_file_data.created_by + file_url = f"{self.file_base_url}/{message_file_data.url}" metadata.update( { @@ -353,6 +356,7 @@ def tool_trace(self, message_id, **kwargs): tool_config=tool_config, time_cost=time_cost, tool_parameters=tool_parameters, + file_url=file_url, ) return tool_trace_info From c1bc774fb00f17a82d980e20ff685cd6184fd712 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 18:00:08 +0800 Subject: [PATCH 25/65] fix: message trace start time error --- api/core/ops/langfuse_trace.py | 4 ++-- api/core/ops/langsmith_trace.py | 2 +- api/core/ops/trace_queue_manager.py | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/api/core/ops/langfuse_trace.py b/api/core/ops/langfuse_trace.py index b4bac502d62394..88c3ac3c554382 100644 --- a/api/core/ops/langfuse_trace.py +++ b/api/core/ops/langfuse_trace.py @@ -180,7 +180,7 @@ def message_trace( "total_tokens": trace_info.total_tokens, "error": trace_info.error, "provider_response_latency": message_data.provider_response_latency, - "created_at": trace_info.created_at, + "created_at": trace_info.start_at, }, output=trace_info.outputs, metadata=metadata, @@ -201,7 +201,7 @@ def message_trace( langfuse_generation_data = LangfuseGeneration( name=f"generation_{message_id}", trace_id=message_id, - start_time=trace_info.created_at, + start_time=trace_info.start_at, end_time=trace_info.end_time, model=message_data.model_id, input=trace_info.inputs, diff --git a/api/core/ops/langsmith_trace.py b/api/core/ops/langsmith_trace.py index 2b2733218c3164..968ef866ee5d23 100644 --- a/api/core/ops/langsmith_trace.py +++ b/api/core/ops/langsmith_trace.py @@ -168,7 +168,7 @@ def message_trace(self, trace_info: MessageTraceInfo, **kwargs): name=f"message_{message_id}", inputs=trace_info.inputs, run_type=LangSmithRunType.chain, - start_time=trace_info.created_at, + start_time=trace_info.start_at, end_time=trace_info.end_time, outputs=message_data.answer, extra={ diff --git a/api/core/ops/trace_queue_manager.py b/api/core/ops/trace_queue_manager.py index 9d3175cfa6b1b2..4a177c51b307de 100644 --- a/api/core/ops/trace_queue_manager.py +++ b/api/core/ops/trace_queue_manager.py @@ -79,7 +79,7 @@ def preprocess(self): self.message_id, self.timer, **self.kwargs ) elif self.trace_type == TraceTaskName.TOOL_TRACE: - return TraceTaskName.TOOL_TRACE, self.tool_trace(self.message_id, **self.kwargs) + return TraceTaskName.TOOL_TRACE, self.tool_trace(self.message_id, self.timer, **self.kwargs) elif self.trace_type == TraceTaskName.GENERATE_NAME_TRACE: return TraceTaskName.GENERATE_NAME_TRACE, self.generate_name_trace( self.conversation_id, self.timer, **self.kwargs @@ -288,7 +288,7 @@ def dataset_retrieval_trace(self, message_id, timer, **kwargs): return dataset_retrieval_trace_info - def tool_trace(self, message_id, **kwargs): + def tool_trace(self, message_id, timer, **kwargs): tool_name = kwargs.get('tool_name') tool_inputs = kwargs.get('tool_inputs') tool_outputs = kwargs.get('tool_outputs') @@ -344,8 +344,8 @@ def tool_trace(self, message_id, **kwargs): message_id=message_id, message_data=message_data, tool_name=tool_name, - start_time=created_time, - end_time=end_time, + start_time=timer.get("start") if timer else created_time, + end_time=timer.get("end") if timer else end_time, tool_inputs=tool_inputs, tool_outputs=tool_outputs, metadata=metadata, From 7ee9616db672f5927e4594c8ffd45ca2331fed1c Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 18:03:41 +0800 Subject: [PATCH 26/65] feat: update base_trace_instance.py --- api/core/ops/base_trace_instance.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/api/core/ops/base_trace_instance.py b/api/core/ops/base_trace_instance.py index 3ffba30f88b7da..eeaa02f87260f8 100644 --- a/api/core/ops/base_trace_instance.py +++ b/api/core/ops/base_trace_instance.py @@ -69,3 +69,32 @@ def generate_name_trace(self, **kwargs): Subclasses must implement specific tracing logic for name generation activities. """ return kwargs + + @abstractmethod + def api_check_trace(self, **kwargs): + """ + Abstract method to trace API check activities. + Subclasses must implement specific tracing logic for API check operations. + """ + return kwargs + + @abstractmethod + def obfuscate_config(self, **kwargs): + """ + Obfuscate configuration data. + """ + return kwargs + + @abstractmethod + def encrypt_config(self, **kwargs): + """ + Encrypt configuration data. + """ + return kwargs + + @abstractmethod + def decryption_config(self, **kwargs): + """ + Decrypt configuration data. + """ + return kwargs From 54fc284923605ed84c44f1883f781bd563c80252 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 18:22:27 +0800 Subject: [PATCH 27/65] fix: workflow_trace llm error --- api/core/ops/langfuse_trace.py | 23 ----------------------- api/core/ops/langsmith_trace.py | 4 ++-- 2 files changed, 2 insertions(+), 25 deletions(-) diff --git a/api/core/ops/langfuse_trace.py b/api/core/ops/langfuse_trace.py index 88c3ac3c554382..7e34067aab57b5 100644 --- a/api/core/ops/langfuse_trace.py +++ b/api/core/ops/langfuse_trace.py @@ -118,29 +118,6 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): } ) - process_data = json.loads(node_execution.process_data) if node_execution.process_data else {} - if process_data and process_data.get("model_mode") == "chat": - # add generation - node_total_tokens = json.loads(node_execution.execution_metadata).get("total_tokens") - generation_usage = GenerationUsage( - totalTokens=node_total_tokens, - ) - - langfuse_generation_data = LangfuseGeneration( - name=f"{node_name}_{node_execution_id}", - trace_id=trace_info.workflow_run_id, - start_time=created_at, - end_time=finished_at, - input=inputs, - output=outputs, - metadata=metadata, - level=LevelEnum.DEFAULT if status == 'succeeded' else LevelEnum.ERROR, - status_message=trace_info.error if trace_info.error else "", - usage=generation_usage, - ) - - self.add_generation(langfuse_generation_data) - # add span span_data = LangfuseSpan( name=f"{node_name}_{node_execution_id}", diff --git a/api/core/ops/langsmith_trace.py b/api/core/ops/langsmith_trace.py index 968ef866ee5d23..563842bacec098 100644 --- a/api/core/ops/langsmith_trace.py +++ b/api/core/ops/langsmith_trace.py @@ -66,8 +66,8 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): name=f"workflow_run_{trace_info.workflow_run_id}", inputs=trace_info.query, run_type=LangSmithRunType.tool, - start_time=trace_info.workflow_data.workflow_run_created_at, - end_time=trace_info.workflow_data.workflow_run_finished_at, + start_time=trace_info.workflow_data.created_at, + end_time=trace_info.workflow_data.finished_at, outputs=trace_info.workflow_data.workflow_run_outputs, extra={ "metadata": trace_info.metadata, From 0c10f772643ffa496e6942902d5f4d609fba7a81 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 18:34:19 +0800 Subject: [PATCH 28/65] fix: api_check_trace error --- api/core/ops/base_trace_instance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/ops/base_trace_instance.py b/api/core/ops/base_trace_instance.py index eeaa02f87260f8..63975d8a0c37d0 100644 --- a/api/core/ops/base_trace_instance.py +++ b/api/core/ops/base_trace_instance.py @@ -71,7 +71,7 @@ def generate_name_trace(self, **kwargs): return kwargs @abstractmethod - def api_check_trace(self, **kwargs): + def api_check(self, **kwargs): """ Abstract method to trace API check activities. Subclasses must implement specific tracing logic for API check operations. From 4293361ded1f880f540c8d67b1f5adf8bdafeeae Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 19:11:32 +0800 Subject: [PATCH 29/65] fix: type definition error --- api/core/app/apps/workflow/app_generator.py | 3 +- .../apps/workflow/generate_task_pipeline.py | 6 +- api/core/ops/base_trace_instance.py | 83 +------------------ api/core/ops/entities/trace_entity.py | 77 ++++++----------- api/core/ops/langfuse_trace.py | 9 +- api/core/ops/langsmith_trace.py | 7 +- 6 files changed, 40 insertions(+), 145 deletions(-) diff --git a/api/core/app/apps/workflow/app_generator.py b/api/core/app/apps/workflow/app_generator.py index d2d4947e783e7d..abc1174951d7b3 100644 --- a/api/core/app/apps/workflow/app_generator.py +++ b/api/core/app/apps/workflow/app_generator.py @@ -283,10 +283,9 @@ def _handle_response(self, application_generate_entity: WorkflowAppGenerateEntit user=user, stream=stream ) - app_id = application_generate_entity.app_config.app_id try: - return generate_task_pipeline.process(app_id, workflow) + return generate_task_pipeline.process() except ValueError as e: if e.args[0] == "I/O operation on closed file.": # ignore this error raise GenerateTaskStoppedException() diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index 49a3cbbbad6f6d..f94e9e0ba4f88d 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -95,11 +95,7 @@ def __init__(self, application_generate_entity: WorkflowAppGenerateEntity, self._stream_generate_nodes = self._get_stream_generate_nodes() self._iteration_nested_relations = self._get_iteration_nested_relations(self._workflow.graph_dict) - def process( - self, - app_id: Optional[str] = None, - workflow: Optional[Workflow] = None, - ) -> Union[WorkflowAppBlockingResponse, Generator[WorkflowAppStreamResponse, None, None]]: + def process(self) -> Union[WorkflowAppBlockingResponse, Generator[WorkflowAppStreamResponse, None, None]]: """ Process generate task pipeline. :return: diff --git a/api/core/ops/base_trace_instance.py b/api/core/ops/base_trace_instance.py index 63975d8a0c37d0..3094c19f3a21f7 100644 --- a/api/core/ops/base_trace_instance.py +++ b/api/core/ops/base_trace_instance.py @@ -1,5 +1,7 @@ from abc import ABC, abstractmethod +from core.ops.entities.trace_entity import BaseTraceInfo + class BaseTraceInstance(ABC): """ @@ -15,86 +17,9 @@ def __init__(self): ... @abstractmethod - def trace(self, **kwargs): + def trace(self, trace_info: BaseTraceInfo): """ Abstract method to trace activities. Subclasses must implement specific tracing logic for activities. """ - return kwargs - - @abstractmethod - def message_trace(self, **kwargs): - """ - Abstract method to trace messaging activities. - Subclasses must implement specific tracing logic for messages. - """ - return kwargs - - @abstractmethod - def moderation_trace(self, **kwargs): - """ - Abstract method to trace moderation activities. - Subclasses must implement specific tracing logic for content moderation. - """ - return kwargs - - @abstractmethod - def suggested_question_trace(self, **kwargs): - """ - Abstract method to trace suggested questions in a conversation or system. - Subclasses must implement specific tracing logic for tracking suggested questions. - """ - return kwargs - - @abstractmethod - def dataset_retrieval_trace(self, **kwargs): - """ - Abstract method to trace data retrieval activities. - Subclasses must implement specific tracing logic for data retrieval operations. - """ - return kwargs - - @abstractmethod - def tool_trace(self, **kwargs): - """ - Abstract method to trace the usage of tools within the system. - Subclasses must implement specific tracing logic for tool interactions. - """ - return kwargs - - @abstractmethod - def generate_name_trace(self, **kwargs): - """ - Abstract method to trace the generation of names or identifiers within the system. - Subclasses must implement specific tracing logic for name generation activities. - """ - return kwargs - - @abstractmethod - def api_check(self, **kwargs): - """ - Abstract method to trace API check activities. - Subclasses must implement specific tracing logic for API check operations. - """ - return kwargs - - @abstractmethod - def obfuscate_config(self, **kwargs): - """ - Obfuscate configuration data. - """ - return kwargs - - @abstractmethod - def encrypt_config(self, **kwargs): - """ - Encrypt configuration data. - """ - return kwargs - - @abstractmethod - def decryption_config(self, **kwargs): - """ - Decrypt configuration data. - """ - return kwargs + ... diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py index 375dc8c50de0b3..3dc3ac60a3169c 100644 --- a/api/core/ops/entities/trace_entity.py +++ b/api/core/ops/entities/trace_entity.py @@ -1,12 +1,22 @@ from datetime import datetime -from typing import Any, Union +from typing import Any, Optional, Union from pydantic import BaseModel -class WorkflowTraceInfo(BaseModel): +class BaseTraceInfo(BaseModel): + message_id: str + message_data: Any + inputs: Union[str, dict[str, Any], list, None] + outputs: Union[str, dict[str, Any], list, None] + start_time: datetime + end_time: datetime + metadata: dict[str, Any] + + +class WorkflowTraceInfo(BaseTraceInfo): workflow_data: Any - conversation_id: Union[str, None] + conversation_id: Optional[str] = None workflow_id: str tenant_id: str workflow_run_id: str @@ -15,55 +25,36 @@ class WorkflowTraceInfo(BaseModel): workflow_run_inputs: dict[str, Any] workflow_run_outputs: dict[str, Any] workflow_run_version: str - error: Union[str, None] + error: Optional[str] = None total_tokens: int file_list: list[str] query: str metadata: dict[str, Any] -class MessageTraceInfo(BaseModel): - message_data: Any +class MessageTraceInfo(BaseTraceInfo): conversation_model: str message_tokens: int answer_tokens: int total_tokens: int - error: str - inputs: Union[str, dict[str, Any], list, None] - outputs: Union[str, dict[str, Any], list, None] + error: Optional[str] = None file_list: list[str] - start_at: datetime - end_time: datetime - metadata: dict[str, Any] message_file_data: Any conversation_mode: str -class ModerationTraceInfo(BaseModel): - message_id: str - inputs: dict[str, Any] - message_data: Any +class ModerationTraceInfo(BaseTraceInfo): flagged: bool action: str preset_response: str query: str - start_time: datetime - end_time: datetime - metadata: dict[str, Any] # -class SuggestedQuestionTraceInfo(BaseModel): - message_id: str - message_data: Any - inputs: Union[str, dict[str, Any], list, None] - outputs: Union[str, dict[str, Any], list, None] - start_time: datetime - end_time: datetime - metadata: dict[str, Any] +class SuggestedQuestionTraceInfo(BaseTraceInfo): total_tokens: int - status: Union[str, None] - error: Union[str, None] + status: Optional[str] = None + error: Optional[str] = None from_account_id: str agent_based: bool from_source: str @@ -71,44 +62,26 @@ class SuggestedQuestionTraceInfo(BaseModel): model_id: str suggested_question: list[str] level: str - status_message: Union[str, None] + status_message: Optional[str] = None -class DatasetRetrievalTraceInfo(BaseModel): - message_id: str - inputs: Union[str, dict[str, Any], list, None] +class DatasetRetrievalTraceInfo(BaseTraceInfo): documents: Any - start_time: datetime - end_time: datetime - metadata: dict[str, Any] - message_data: Any -class ToolTraceInfo(BaseModel): - message_id: str - message_data: Any +class ToolTraceInfo(BaseTraceInfo): tool_name: str - start_time: datetime - end_time: datetime tool_inputs: dict[str, Any] tool_outputs: str metadata: dict[str, Any] message_file_data: Any - error: Union[str, None] - inputs: Union[str, dict[str, Any], list, None] - outputs: Union[str, dict[str, Any], list, None] + error: Optional[str] = None tool_config: dict[str, Any] time_cost: Union[int, float] tool_parameters: dict[str, Any] file_url: Union[str, None, list] -class GenerateNameTraceInfo(BaseModel): +class GenerateNameTraceInfo(BaseTraceInfo): conversation_id: str - inputs: Union[str, dict[str, Any], list, None] - outputs: Union[str, dict[str, Any], list, None] - start_time: datetime - end_time: datetime - metadata: dict[str, Any] tenant_id: str - diff --git a/api/core/ops/langfuse_trace.py b/api/core/ops/langfuse_trace.py index 7e34067aab57b5..4a44c8e685fb25 100644 --- a/api/core/ops/langfuse_trace.py +++ b/api/core/ops/langfuse_trace.py @@ -17,6 +17,7 @@ UnitEnum, ) from core.ops.entities.trace_entity import ( + BaseTraceInfo, DatasetRetrievalTraceInfo, GenerateNameTraceInfo, MessageTraceInfo, @@ -49,7 +50,7 @@ def __init__( ) self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") - def trace(self, trace_info, **kwargs): + def trace(self, trace_info: BaseTraceInfo): if isinstance(trace_info, WorkflowTraceInfo): self.workflow_trace(trace_info) if isinstance(trace_info, MessageTraceInfo): @@ -293,7 +294,7 @@ def add_trace(self, langfuse_trace_data: Optional[LangfuseTrace] = None): self.langfuse_client.trace(**format_trace_data) logger.debug("LangFuse Trace created successfully") except Exception as e: - raise f"LangFuse Failed to create trace: {str(e)}" + raise ValueError(f"LangFuse Failed to create trace: {str(e)}") def add_span(self, langfuse_span_data: Optional[LangfuseSpan] = None): format_span_data = ( @@ -303,7 +304,7 @@ def add_span(self, langfuse_span_data: Optional[LangfuseSpan] = None): self.langfuse_client.span(**format_span_data) logger.debug("LangFuse Span created successfully") except Exception as e: - raise f"LangFuse Failed to create span: {str(e)}" + raise ValueError(f"LangFuse Failed to create span: {str(e)}") def update_span(self, span, langfuse_span_data: Optional[LangfuseSpan] = None): format_span_data = ( @@ -324,7 +325,7 @@ def add_generation( self.langfuse_client.generation(**format_generation_data) logger.debug("LangFuse Generation created successfully") except Exception as e: - raise f"LangFuse Failed to create generation: {str(e)}" + raise ValueError(f"LangFuse Failed to create generation: {str(e)}") def update_generation( self, generation, langfuse_generation_data: Optional[LangfuseGeneration] = None diff --git a/api/core/ops/langsmith_trace.py b/api/core/ops/langsmith_trace.py index 563842bacec098..5e446e5d405d82 100644 --- a/api/core/ops/langsmith_trace.py +++ b/api/core/ops/langsmith_trace.py @@ -9,6 +9,7 @@ from core.ops.base_trace_instance import BaseTraceInstance from core.ops.entities.langsmith_trace_entity import LangSmithRunModel, LangSmithRunType, LangSmithRunUpdateModel from core.ops.entities.trace_entity import ( + BaseTraceInfo, DatasetRetrievalTraceInfo, GenerateNameTraceInfo, MessageTraceInfo, @@ -42,7 +43,7 @@ def __init__( ) self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") - def trace(self, trace_info, **kwargs): + def trace(self, trace_info: BaseTraceInfo): if isinstance(trace_info, WorkflowTraceInfo): self.workflow_trace(trace_info) if isinstance(trace_info, MessageTraceInfo): @@ -304,7 +305,7 @@ def add_run(self, run_data: LangSmithRunModel): self.langsmith_client.create_run(**data) logger.debug("LangSmith Run created successfully.") except Exception as e: - raise f"LangSmith Failed to create run: {str(e)}" + raise ValueError(f"LangSmith Failed to create run: {str(e)}") def update_run(self, update_run_data: LangSmithRunUpdateModel): data = update_run_data.model_dump() @@ -313,7 +314,7 @@ def update_run(self, update_run_data: LangSmithRunUpdateModel): self.langsmith_client.update_run(**data) logger.debug("LangSmith Run updated successfully.") except Exception as e: - raise f"LangSmith Failed to update run: {str(e)}" + raise ValueError(f"LangSmith Failed to update run: {str(e)}") def api_check(self): try: From c77a7383d0caf13f0605107613d85f3a63de53fe Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 19:14:17 +0800 Subject: [PATCH 30/65] fix: remove chinese --- .../c031d46af369_remove_app_model_config_trace_config_.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/migrations/versions/c031d46af369_remove_app_model_config_trace_config_.py b/api/migrations/versions/c031d46af369_remove_app_model_config_trace_config_.py index 64489b11f50136..20d9c5d1fb4524 100644 --- a/api/migrations/versions/c031d46af369_remove_app_model_config_trace_config_.py +++ b/api/migrations/versions/c031d46af369_remove_app_model_config_trace_config_.py @@ -28,7 +28,7 @@ def upgrade(): sa.Column('created_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.Column('updated_at', sa.DateTime(), server_default=sa.text('now()'), nullable=False), sa.Column('is_active', sa.Boolean(), server_default=sa.text('true'), nullable=False), - sa.PrimaryKeyConstraint('id', name='trace_app_config_pkey') # 修改了主键约束名称以避免冲突 + sa.PrimaryKeyConstraint('id', name='trace_app_config_pkey') ) with op.batch_alter_table('trace_app_config', schema=None) as batch_op: batch_op.create_index('trace_app_config_app_id_idx', ['app_id'], unique=False) From 1d652e69c615988ea5ca8bbd7e18fa9c54776845 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 19:21:52 +0800 Subject: [PATCH 31/65] feat: remove trace folder --- api/core/ops/entities/__init__.py | 0 api/core/ops/{model.py => entities/config_entity.py} | 0 api/core/ops/langfuse_trace/__init__.py | 0 api/core/ops/{ => langfuse_trace}/langfuse_trace.py | 2 +- api/core/ops/langsmith_trace/__init__.py | 0 api/core/ops/{ => langsmith_trace}/langsmith_trace.py | 2 +- api/services/ops_trace/ops_trace_service.py | 6 +++--- 7 files changed, 5 insertions(+), 5 deletions(-) create mode 100644 api/core/ops/entities/__init__.py rename api/core/ops/{model.py => entities/config_entity.py} (100%) create mode 100644 api/core/ops/langfuse_trace/__init__.py rename api/core/ops/{ => langfuse_trace}/langfuse_trace.py (99%) create mode 100644 api/core/ops/langsmith_trace/__init__.py rename api/core/ops/{ => langsmith_trace}/langsmith_trace.py (99%) diff --git a/api/core/ops/entities/__init__.py b/api/core/ops/entities/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/api/core/ops/model.py b/api/core/ops/entities/config_entity.py similarity index 100% rename from api/core/ops/model.py rename to api/core/ops/entities/config_entity.py diff --git a/api/core/ops/langfuse_trace/__init__.py b/api/core/ops/langfuse_trace/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/api/core/ops/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py similarity index 99% rename from api/core/ops/langfuse_trace.py rename to api/core/ops/langfuse_trace/langfuse_trace.py index 4a44c8e685fb25..e966e220f92ec4 100644 --- a/api/core/ops/langfuse_trace.py +++ b/api/core/ops/langfuse_trace/langfuse_trace.py @@ -8,6 +8,7 @@ from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token from core.ops.base_trace_instance import BaseTraceInstance +from core.ops.entities.config_entity import LangfuseConfig from core.ops.entities.langfuse_trace_entity import ( GenerationUsage, LangfuseGeneration, @@ -26,7 +27,6 @@ ToolTraceInfo, WorkflowTraceInfo, ) -from core.ops.model import LangfuseConfig from core.ops.utils import filter_none_values from extensions.ext_database import db from models.model import MessageFile diff --git a/api/core/ops/langsmith_trace/__init__.py b/api/core/ops/langsmith_trace/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/api/core/ops/langsmith_trace.py b/api/core/ops/langsmith_trace/langsmith_trace.py similarity index 99% rename from api/core/ops/langsmith_trace.py rename to api/core/ops/langsmith_trace/langsmith_trace.py index 5e446e5d405d82..3e173ea22985ed 100644 --- a/api/core/ops/langsmith_trace.py +++ b/api/core/ops/langsmith_trace/langsmith_trace.py @@ -7,6 +7,7 @@ from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token from core.ops.base_trace_instance import BaseTraceInstance +from core.ops.entities.config_entity import LangSmithConfig from core.ops.entities.langsmith_trace_entity import LangSmithRunModel, LangSmithRunType, LangSmithRunUpdateModel from core.ops.entities.trace_entity import ( BaseTraceInfo, @@ -18,7 +19,6 @@ ToolTraceInfo, WorkflowTraceInfo, ) -from core.ops.model import LangSmithConfig from core.ops.utils import filter_none_values from extensions.ext_database import db from models.model import MessageFile diff --git a/api/services/ops_trace/ops_trace_service.py b/api/services/ops_trace/ops_trace_service.py index deebb0e1911f32..220652e7a75199 100644 --- a/api/services/ops_trace/ops_trace_service.py +++ b/api/services/ops_trace/ops_trace_service.py @@ -2,9 +2,9 @@ from typing import Union from uuid import UUID -from core.ops.langfuse_trace import LangFuseDataTrace -from core.ops.langsmith_trace import LangSmithDataTrace -from core.ops.model import LangfuseConfig, LangSmithConfig, TracingProviderEnum +from core.ops.entities.config_entity import LangfuseConfig, LangSmithConfig, TracingProviderEnum +from core.ops.langfuse_trace.langfuse_trace import LangFuseDataTrace +from core.ops.langsmith_trace.langsmith_trace import LangSmithDataTrace from extensions.ext_database import db from models.model import App, AppModelConfig, Conversation, Message, TraceAppConfig From ad7fbc79123bdef4f4edfa39bd89ecf61ad15620 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 19:28:03 +0800 Subject: [PATCH 32/65] fix: the field is indeed wrong --- api/core/ops/entities/trace_entity.py | 6 +++--- api/core/ops/langfuse_trace/langfuse_trace.py | 4 ++-- api/core/ops/langsmith_trace/langsmith_trace.py | 4 ++-- api/core/ops/trace_queue_manager.py | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py index 3dc3ac60a3169c..17eba251dc8551 100644 --- a/api/core/ops/entities/trace_entity.py +++ b/api/core/ops/entities/trace_entity.py @@ -5,12 +5,12 @@ class BaseTraceInfo(BaseModel): - message_id: str + message_id: Optional[str] = None message_data: Any inputs: Union[str, dict[str, Any], list, None] outputs: Union[str, dict[str, Any], list, None] - start_time: datetime - end_time: datetime + start_time: Optional[datetime] = None + end_time: Optional[datetime] = None metadata: dict[str, Any] diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py index e966e220f92ec4..ba37441c8aa2cd 100644 --- a/api/core/ops/langfuse_trace/langfuse_trace.py +++ b/api/core/ops/langfuse_trace/langfuse_trace.py @@ -158,7 +158,7 @@ def message_trace( "total_tokens": trace_info.total_tokens, "error": trace_info.error, "provider_response_latency": message_data.provider_response_latency, - "created_at": trace_info.start_at, + "created_at": trace_info.start_time, }, output=trace_info.outputs, metadata=metadata, @@ -179,7 +179,7 @@ def message_trace( langfuse_generation_data = LangfuseGeneration( name=f"generation_{message_id}", trace_id=message_id, - start_time=trace_info.start_at, + start_time=trace_info.start_time, end_time=trace_info.end_time, model=message_data.model_id, input=trace_info.inputs, diff --git a/api/core/ops/langsmith_trace/langsmith_trace.py b/api/core/ops/langsmith_trace/langsmith_trace.py index 3e173ea22985ed..a2db80e9c248c8 100644 --- a/api/core/ops/langsmith_trace/langsmith_trace.py +++ b/api/core/ops/langsmith_trace/langsmith_trace.py @@ -169,7 +169,7 @@ def message_trace(self, trace_info: MessageTraceInfo, **kwargs): name=f"message_{message_id}", inputs=trace_info.inputs, run_type=LangSmithRunType.chain, - start_time=trace_info.start_at, + start_time=trace_info.start_time, end_time=trace_info.end_time, outputs=message_data.answer, extra={ @@ -189,7 +189,7 @@ def message_trace(self, trace_info: MessageTraceInfo, **kwargs): name=f"llm_{message_id}", inputs=trace_info.inputs, run_type=LangSmithRunType.llm, - start_time=trace_info.start_at, + start_time=trace_info.start_time, end_time=trace_info.end_time, outputs=message_data.answer, extra={ diff --git a/api/core/ops/trace_queue_manager.py b/api/core/ops/trace_queue_manager.py index 4a177c51b307de..444b983b34d303 100644 --- a/api/core/ops/trace_queue_manager.py +++ b/api/core/ops/trace_queue_manager.py @@ -183,7 +183,7 @@ def message_trace(self, message_id): inputs=inputs, outputs=message_data.answer, file_list=message_data.message[0].get("files", []), - start_at=created_at, + start_time=created_at, end_time=created_at + timedelta(seconds=message_data.provider_response_latency), metadata=metadata, message_file_data=message_file_data, From d2ffc48e842d84f4f4ce27e5c4a2c698a548d259 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 20:48:56 +0800 Subject: [PATCH 33/65] feat: trace manager generated in app generator --- api/core/agent/cot_agent_runner.py | 7 ++++- api/core/agent/fc_agent_runner.py | 4 ++- .../app/apps/advanced_chat/app_generator.py | 5 +++- .../advanced_chat/generate_task_pipeline.py | 14 +++++++--- api/core/app/apps/agent_chat/app_generator.py | 5 +++- api/core/app/apps/base_app_runner.py | 3 ++- api/core/app/apps/chat/app_generator.py | 3 +++ api/core/app/apps/completion/app_generator.py | 6 +++-- .../app/apps/message_based_app_generator.py | 3 +-- api/core/app/apps/workflow/app_generator.py | 5 +++- .../apps/workflow/generate_task_pipeline.py | 14 +++++++--- api/core/app/entities/app_invoke_entities.py | 8 +++++- .../easy_ui_based_generate_task_pipeline.py | 16 +++++++----- .../task_pipeline/workflow_cycle_manage.py | 17 +++++++----- .../agent_tool_callback_handler.py | 5 ++-- api/core/moderation/input_moderation.py | 7 ++--- api/core/ops/base_trace_instance.py | 2 +- api/core/ops/entities/trace_entity.py | 7 +++-- .../ops/langfuse_trace/entities/__init__.py | 0 .../entities/langfuse_trace_entity.py | 0 api/core/ops/langfuse_trace/langfuse_trace.py | 26 +++++++++---------- .../ops/langsmith_trace/entities/__init__.py | 0 .../entities/langsmith_trace_entity.py | 0 .../ops/langsmith_trace/langsmith_trace.py | 22 +++++++++------- api/core/ops/trace_queue_manager.py | 20 +++++++------- api/core/rag/retrieval/dataset_retrieval.py | 4 +-- api/core/tools/tool_engine.py | 5 +++- 27 files changed, 133 insertions(+), 75 deletions(-) create mode 100644 api/core/ops/langfuse_trace/entities/__init__.py rename api/core/ops/{ => langfuse_trace}/entities/langfuse_trace_entity.py (100%) create mode 100644 api/core/ops/langsmith_trace/entities/__init__.py rename api/core/ops/{ => langsmith_trace}/entities/langsmith_trace_entity.py (100%) diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py index d4b3a7f06a0989..c00a5e34a93411 100644 --- a/api/core/agent/cot_agent_runner.py +++ b/api/core/agent/cot_agent_runner.py @@ -16,6 +16,7 @@ UserPromptMessage, ) from core.ops.base_trace_instance import BaseTraceInstance +from core.ops.trace_queue_manager import TraceQueueManager from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform from core.tools.entities.tool_entities import ToolInvokeMeta from core.tools.tool.tool import Tool @@ -46,6 +47,7 @@ def run( # get tracing instance tracing_instance = app_generate_entity.tracing_instance + trace_manager = app_generate_entity.trace_manager # check model mode if 'Observation' not in app_generate_entity.model_conf.stop: @@ -218,6 +220,7 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): tool_instances=tool_instances, message_file_ids=message_file_ids, tracing_instance=tracing_instance, + trace_manager=trace_manager, ) scratchpad.observation = tool_invoke_response scratchpad.agent_response = tool_invoke_response @@ -287,7 +290,8 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): def _handle_invoke_action(self, action: AgentScratchpadUnit.Action, tool_instances: dict[str, Tool], message_file_ids: list[str], - tracing_instance: Optional[BaseTraceInstance] = None + tracing_instance: Optional[BaseTraceInstance] = None, + trace_manager: Optional[TraceQueueManager] = None ) -> tuple[str, ToolInvokeMeta]: """ handle invoke action @@ -320,6 +324,7 @@ def _handle_invoke_action(self, action: AgentScratchpadUnit.Action, invoke_from=self.application_generate_entity.invoke_from, agent_tool_callback=self.agent_callback, tracing_instance=tracing_instance, + trace_manager=trace_manager, ) # publish files diff --git a/api/core/agent/fc_agent_runner.py b/api/core/agent/fc_agent_runner.py index 6b2997655f1c1b..b1265007956676 100644 --- a/api/core/agent/fc_agent_runner.py +++ b/api/core/agent/fc_agent_runner.py @@ -52,6 +52,7 @@ def run(self, # get tracing instance tracing_instance = app_generate_entity.tracing_instance + trace_manager = app_generate_entity.trace_manager def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): if not final_llm_usage_dict['usage']: @@ -246,7 +247,8 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): message=self.message, invoke_from=self.application_generate_entity.invoke_from, agent_tool_callback=self.agent_callback, - tracing_instance=tracing_instance + tracing_instance=tracing_instance, + trace_manager=trace_manager, ) # publish files for message_file, save_as in message_files: diff --git a/api/core/app/apps/advanced_chat/app_generator.py b/api/core/app/apps/advanced_chat/app_generator.py index 8060bf036f3943..a9ef69ce380131 100644 --- a/api/core/app/apps/advanced_chat/app_generator.py +++ b/api/core/app/apps/advanced_chat/app_generator.py @@ -20,6 +20,7 @@ from core.app.entities.task_entities import ChatbotAppBlockingResponse, ChatbotAppStreamResponse from core.file.message_file_parser import MessageFileParser from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError +from core.ops.trace_queue_manager import TraceQueueManager from extensions.ext_database import db from models.account import Account from models.model import App, Conversation, EndUser, Message @@ -91,6 +92,7 @@ def generate( tracing_instance = OpsTraceService.get_ops_trace_instance( app_id=app_model.id ) + trace_manager = TraceQueueManager() # init application generate entity application_generate_entity = AdvancedChatAppGenerateEntity( @@ -104,7 +106,8 @@ def generate( stream=stream, invoke_from=invoke_from, extras=extras, - tracing_instance=tracing_instance + tracing_instance=tracing_instance, + trace_manager=trace_manager ) return self._generate( diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index f4c97033d47c1d..421cd8667f97c1 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -42,6 +42,8 @@ from core.file.file_obj import FileVar from core.model_runtime.entities.llm_entities import LLMUsage from core.model_runtime.utils.encoders import jsonable_encoder +from core.ops.base_trace_instance import BaseTraceInstance +from core.ops.trace_queue_manager import TraceQueueManager from core.workflow.entities.node_entities import NodeType, SystemVariable from core.workflow.nodes.answer.answer_node import AnswerNode from core.workflow.nodes.answer.entities import TextGenerateRouteChunk, VarGenerateRouteChunk @@ -131,7 +133,10 @@ def process( self._application_generate_entity.query ) - generator = self._process_stream_response(self._application_generate_entity.tracing_instance) + generator = self._process_stream_response( + tracing_instance=self._application_generate_entity.tracing_instance, + trace_manager=self._application_generate_entity.trace_manager + ) if self._stream: return self._to_stream_response(generator) else: @@ -182,7 +187,9 @@ def _to_stream_response(self, generator: Generator[StreamResponse, None, None]) stream_response=stream_response ) - def _process_stream_response(self, tracing_instance) -> Generator[StreamResponse, None, None]: + def _process_stream_response( + self, tracing_instance: Optional[BaseTraceInstance] = None, trace_manager: Optional[TraceQueueManager] = None + ) -> Generator[StreamResponse, None, None]: """ Process stream response. :return: @@ -255,7 +262,8 @@ def _process_stream_response(self, tracing_instance) -> Generator[StreamResponse self._handle_iteration_operation(event) elif isinstance(event, QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent): workflow_run = self._handle_workflow_finished( - event, tracing_instance=tracing_instance, conversation_id=self._conversation.id + event, tracing_instance=tracing_instance, conversation_id=self._conversation.id, + trace_manager=trace_manager ) if workflow_run: yield self._workflow_finish_to_stream_response( diff --git a/api/core/app/apps/agent_chat/app_generator.py b/api/core/app/apps/agent_chat/app_generator.py index 3e93fad59cf893..05f3c24897baf1 100644 --- a/api/core/app/apps/agent_chat/app_generator.py +++ b/api/core/app/apps/agent_chat/app_generator.py @@ -19,6 +19,7 @@ from core.app.entities.app_invoke_entities import AgentChatAppGenerateEntity, InvokeFrom from core.file.message_file_parser import MessageFileParser from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError +from core.ops.trace_queue_manager import TraceQueueManager from extensions.ext_database import db from models.account import Account from models.model import App, EndUser @@ -113,6 +114,7 @@ def generate(self, app_model: App, tracing_instance = OpsTraceService.get_ops_trace_instance( app_id=app_model.id, ) + trace_manager = TraceQueueManager() # init application generate entity application_generate_entity = AgentChatAppGenerateEntity( @@ -128,7 +130,8 @@ def generate(self, app_model: App, invoke_from=invoke_from, extras=extras, call_depth=0, - tracing_instance=tracing_instance + tracing_instance=tracing_instance, + trace_manager=trace_manager ) # init generate records diff --git a/api/core/app/apps/base_app_runner.py b/api/core/app/apps/base_app_runner.py index b0e55bbf605a8d..e81cfb3a2f0bd3 100644 --- a/api/core/app/apps/base_app_runner.py +++ b/api/core/app/apps/base_app_runner.py @@ -364,7 +364,8 @@ def moderation_for_inputs( inputs=inputs, query=query if query else '', message_id=message_id, - tracing_instance=app_generate_entity.tracing_instance + tracing_instance=app_generate_entity.tracing_instance, + trace_manager=app_generate_entity.trace_manager ) def check_hosting_moderation(self, application_generate_entity: EasyUIBasedAppGenerateEntity, diff --git a/api/core/app/apps/chat/app_generator.py b/api/core/app/apps/chat/app_generator.py index 093e54f3701623..24c0d861699411 100644 --- a/api/core/app/apps/chat/app_generator.py +++ b/api/core/app/apps/chat/app_generator.py @@ -19,6 +19,7 @@ from core.app.entities.app_invoke_entities import ChatAppGenerateEntity, InvokeFrom from core.file.message_file_parser import MessageFileParser from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError +from core.ops.trace_queue_manager import TraceQueueManager from extensions.ext_database import db from models.account import Account from models.model import App, EndUser @@ -107,6 +108,7 @@ def generate( tracing_instance = OpsTraceService.get_ops_trace_instance( app_id=app_model.id, ) + trace_manager = TraceQueueManager() # init application generate entity application_generate_entity = ChatAppGenerateEntity( @@ -122,6 +124,7 @@ def generate( invoke_from=invoke_from, extras=extras, tracing_instance=tracing_instance, + trace_manager=trace_manager ) # init generate records diff --git a/api/core/app/apps/completion/app_generator.py b/api/core/app/apps/completion/app_generator.py index da914766f0eb41..609519ca77e8e2 100644 --- a/api/core/app/apps/completion/app_generator.py +++ b/api/core/app/apps/completion/app_generator.py @@ -19,6 +19,7 @@ from core.app.entities.app_invoke_entities import CompletionAppGenerateEntity, InvokeFrom from core.file.message_file_parser import MessageFileParser from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError +from core.ops.trace_queue_manager import TraceQueueManager from extensions.ext_database import db from models.account import Account from models.model import App, EndUser, Message @@ -99,6 +100,7 @@ def generate(self, app_model: App, tracing_instance = OpsTraceService.get_ops_trace_instance( app_id=app_model.id ) + trace_manager = TraceQueueManager() # init application generate entity application_generate_entity = CompletionAppGenerateEntity( @@ -112,7 +114,8 @@ def generate(self, app_model: App, stream=stream, invoke_from=invoke_from, extras=extras, - tracing_instance=tracing_instance + tracing_instance=tracing_instance, + trace_manager=trace_manager ) # init generate records @@ -165,7 +168,6 @@ def _generate_worker(self, flask_app: Flask, :param flask_app: Flask app :param application_generate_entity: application generate entity :param queue_manager: queue manager - :param conversation_id: conversation ID :param message_id: message ID :return: """ diff --git a/api/core/app/apps/message_based_app_generator.py b/api/core/app/apps/message_based_app_generator.py index fbfd2353011bca..c5cd6864020b33 100644 --- a/api/core/app/apps/message_based_app_generator.py +++ b/api/core/app/apps/message_based_app_generator.py @@ -73,8 +73,7 @@ def _handle_response( ) try: - tracing_instance = application_generate_entity.tracing_instance - return generate_task_pipeline.process(tracing_instance) + return generate_task_pipeline.process() except ValueError as e: if e.args[0] == "I/O operation on closed file.": # ignore this error raise GenerateTaskStoppedException() diff --git a/api/core/app/apps/workflow/app_generator.py b/api/core/app/apps/workflow/app_generator.py index abc1174951d7b3..b4991e454c147c 100644 --- a/api/core/app/apps/workflow/app_generator.py +++ b/api/core/app/apps/workflow/app_generator.py @@ -20,6 +20,7 @@ from core.app.entities.task_entities import WorkflowAppBlockingResponse, WorkflowAppStreamResponse from core.file.message_file_parser import MessageFileParser from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError +from core.ops.trace_queue_manager import TraceQueueManager from extensions.ext_database import db from models.account import Account from models.model import App, EndUser @@ -77,6 +78,7 @@ def generate( tracing_instance = OpsTraceService.get_ops_trace_instance( app_id=app_model.id, ) + trace_manager = TraceQueueManager() # init application generate entity application_generate_entity = WorkflowAppGenerateEntity( @@ -88,7 +90,8 @@ def generate( stream=stream, invoke_from=invoke_from, call_depth=call_depth, - tracing_instance=tracing_instance + tracing_instance=tracing_instance, + trace_manager=trace_manager ) return self._generate( diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index f94e9e0ba4f88d..f6e6e9a0fae84e 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -36,6 +36,8 @@ ) from core.app.task_pipeline.based_generate_task_pipeline import BasedGenerateTaskPipeline from core.app.task_pipeline.workflow_cycle_manage import WorkflowCycleManage +from core.ops.base_trace_instance import BaseTraceInstance +from core.ops.trace_queue_manager import TraceQueueManager from core.workflow.entities.node_entities import NodeType, SystemVariable from core.workflow.nodes.end.end_node import EndNode from extensions.ext_database import db @@ -104,7 +106,10 @@ def process(self) -> Union[WorkflowAppBlockingResponse, Generator[WorkflowAppStr db.session.refresh(self._user) db.session.close() - generator = self._process_stream_response(self._application_generate_entity.tracing_instance) + generator = self._process_stream_response( + tracing_instance=self._application_generate_entity.tracing_instance, + trace_manager=self._application_generate_entity.trace_manager + ) if self._stream: return self._to_stream_response(generator) else: @@ -160,7 +165,8 @@ def _to_stream_response(self, generator: Generator[StreamResponse, None, None]) def _process_stream_response( self, - tracing_instance: Optional[Any] = None + tracing_instance: Optional[BaseTraceInstance] = None, + trace_manager: Optional[TraceQueueManager] = None ) -> Generator[StreamResponse, None, None]: """ Process stream response. @@ -218,7 +224,9 @@ def _process_stream_response( yield self._handle_iteration_to_stream_response(self._application_generate_entity.task_id, event) self._handle_iteration_operation(event) elif isinstance(event, QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent): - workflow_run = self._handle_workflow_finished(event, tracing_instance=tracing_instance) + workflow_run = self._handle_workflow_finished( + event, tracing_instance=tracing_instance, trace_manager=trace_manager + ) # save workflow app log self._save_workflow_app_log(workflow_run) diff --git a/api/core/app/entities/app_invoke_entities.py b/api/core/app/entities/app_invoke_entities.py index 77905e296c652e..44eb999c5c54e7 100644 --- a/api/core/app/entities/app_invoke_entities.py +++ b/api/core/app/entities/app_invoke_entities.py @@ -7,6 +7,8 @@ from core.entities.provider_configuration import ProviderModelBundle from core.file.file_obj import FileVar from core.model_runtime.entities.model_entities import AIModelEntity +from core.ops.base_trace_instance import BaseTraceInstance +from core.ops.trace_queue_manager import TraceQueueManager class InvokeFrom(Enum): @@ -90,7 +92,11 @@ class AppGenerateEntity(BaseModel): extras: dict[str, Any] = {} # tracing instance - tracing_instance: Optional[Any] = None + tracing_instance: Optional[BaseTraceInstance] = None + trace_manager: Optional[TraceQueueManager] = None + + class Config: + arbitrary_types_allowed = True class EasyUIBasedAppGenerateEntity(AppGenerateEntity): diff --git a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py index 9b9a4fc56b06fa..36155a69f05928 100644 --- a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py +++ b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py @@ -2,7 +2,7 @@ import logging import time from collections.abc import Generator -from typing import Any, Optional, Union, cast +from typing import Optional, Union, cast from core.app.apps.base_app_queue_manager import AppQueueManager, PublishFrom from core.app.entities.app_invoke_entities import ( @@ -44,6 +44,7 @@ ) from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.model_runtime.utils.encoders import jsonable_encoder +from core.ops.base_trace_instance import BaseTraceInstance from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName from core.prompt.utils.prompt_message_util import PromptMessageUtil from core.prompt.utils.prompt_template_parser import PromptTemplateParser @@ -103,7 +104,6 @@ def __init__(self, application_generate_entity: Union[ def process( self, - tracing_instance: Optional[Any] = None ) -> Union[ ChatbotAppBlockingResponse, CompletionAppBlockingResponse, @@ -124,7 +124,10 @@ def process( self._application_generate_entity.query ) - generator = self._process_stream_response(tracing_instance) + generator = self._process_stream_response( + tracing_instance=self._application_generate_entity.tracing_instance, + trace_manager=self._application_generate_entity.trace_manager + ) if self._stream: return self._to_stream_response(generator) else: @@ -202,7 +205,7 @@ def _to_stream_response(self, generator: Generator[StreamResponse, None, None]) ) def _process_stream_response( - self, tracing_instance: Optional[Any] = None + self, tracing_instance: Optional[BaseTraceInstance] = None, trace_manager: Optional[TraceQueueManager] = None ) -> Generator[StreamResponse, None, None]: """ Process stream response. @@ -230,7 +233,7 @@ def _process_stream_response( yield self._message_replace_to_stream_response(answer=output_moderation_answer) # Save message - self._save_message(tracing_instance) + self._save_message(tracing_instance, trace_manager) yield self._message_end_to_stream_response() elif isinstance(event, QueueRetrieverResourcesEvent): @@ -276,7 +279,7 @@ def _process_stream_response( self._conversation_name_generate_thread.join() def _save_message( - self, tracing_instance: Optional[Any] = None, + self, tracing_instance: Optional[BaseTraceInstance] = None, trace_manager: Optional[TraceQueueManager] = None ) -> None: """ Save message. @@ -309,7 +312,6 @@ def _save_message( db.session.commit() if tracing_instance: - trace_manager = TraceQueueManager() trace_manager.add_trace_task( TraceTask( tracing_instance, diff --git a/api/core/app/task_pipeline/workflow_cycle_manage.py b/api/core/app/task_pipeline/workflow_cycle_manage.py index ac589a01b35291..bb8bad8153150f 100644 --- a/api/core/app/task_pipeline/workflow_cycle_manage.py +++ b/api/core/app/task_pipeline/workflow_cycle_manage.py @@ -1,7 +1,7 @@ import json import time from datetime import datetime, timezone -from typing import Any, Optional, Union, cast +from typing import Optional, Union, cast from core.app.entities.app_invoke_entities import InvokeFrom from core.app.entities.queue_entities import ( @@ -103,7 +103,8 @@ def _workflow_run_success( total_steps: int, outputs: Optional[str] = None, conversation_id: Optional[str] = None, - tracing_instance: Optional[BaseTraceInstance] = None + tracing_instance: Optional[BaseTraceInstance] = None, + trace_manager: Optional[TraceQueueManager] = None ) -> WorkflowRun: """ Workflow run success @@ -128,7 +129,6 @@ def _workflow_run_success( db.session.close() if tracing_instance: - trace_manager = TraceQueueManager() trace_manager.add_trace_task( TraceTask( tracing_instance, @@ -148,7 +148,8 @@ def _workflow_run_failed( status: WorkflowRunStatus, error: str, conversation_id: Optional[str] = None, - tracing_instance: Optional[Any] = None + tracing_instance: Optional[BaseTraceInstance] = None, + trace_manager: Optional[TraceQueueManager] = None ) -> WorkflowRun: """ Workflow run failed @@ -172,7 +173,6 @@ def _workflow_run_failed( db.session.close() if tracing_instance: - trace_manager = TraceQueueManager() trace_manager.add_trace_task( TraceTask( tracing_instance, @@ -533,7 +533,8 @@ def _handle_node_finished(self, event: QueueNodeSucceededEvent | QueueNodeFailed def _handle_workflow_finished( self, event: QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent, tracing_instance: Optional[BaseTraceInstance] = None, - conversation_id: Optional[str] = None + conversation_id: Optional[str] = None, + trace_manager: Optional[TraceQueueManager] = None ) -> Optional[WorkflowRun]: workflow_run = db.session.query(WorkflowRun).filter( WorkflowRun.id == self._task_state.workflow_run_id).first() @@ -551,6 +552,8 @@ def _handle_workflow_finished( status=WorkflowRunStatus.STOPPED, error='Workflow stopped.', conversation_id=conversation_id, + tracing_instance=tracing_instance, + trace_manager=trace_manager ) latest_node_execution_info = self._task_state.latest_node_execution_info @@ -574,6 +577,7 @@ def _handle_workflow_finished( error=event.error, conversation_id=conversation_id, tracing_instance=tracing_instance, + trace_manager=trace_manager ) else: if self._task_state.latest_node_execution_info: @@ -591,6 +595,7 @@ def _handle_workflow_finished( outputs=outputs, conversation_id=conversation_id, tracing_instance=tracing_instance, + trace_manager=trace_manager ) self._task_state.workflow_run_id = workflow_run.id diff --git a/api/core/callback_handler/agent_tool_callback_handler.py b/api/core/callback_handler/agent_tool_callback_handler.py index 28a5fb8ec73e49..b075e315dbf0b3 100644 --- a/api/core/callback_handler/agent_tool_callback_handler.py +++ b/api/core/callback_handler/agent_tool_callback_handler.py @@ -3,6 +3,7 @@ from pydantic import BaseModel +from core.ops.base_trace_instance import BaseTraceInstance from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName _TEXT_COLOR_MAPPING = { @@ -55,7 +56,8 @@ def on_tool_end( tool_outputs: str, message_id: Optional[str] = None, timer: Optional[Any] = None, - tracing_instance: Optional[Any] = None, + tracing_instance: Optional[BaseTraceInstance] = None, + trace_manager: Optional[TraceQueueManager] = None ) -> None: """If not the final action, print out observation.""" print_text("\n[on_tool_end]\n", color=self.color) @@ -65,7 +67,6 @@ def on_tool_end( print_text("\n") if tracing_instance: - trace_manager = TraceQueueManager() trace_manager.add_trace_task( TraceTask( tracing_instance, diff --git a/api/core/moderation/input_moderation.py b/api/core/moderation/input_moderation.py index 7689e6b18b9e96..3817c0e24e5282 100644 --- a/api/core/moderation/input_moderation.py +++ b/api/core/moderation/input_moderation.py @@ -1,9 +1,10 @@ import logging -from typing import Any, Optional +from typing import Optional from core.app.app_config.entities import AppConfig from core.moderation.base import ModerationAction, ModerationException from core.moderation.factory import ModerationFactory +from core.ops.base_trace_instance import BaseTraceInstance from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName from core.ops.utils import measure_time @@ -18,7 +19,8 @@ def check( inputs: dict, query: str, message_id: str, - tracing_instance: Optional[Any] = None + tracing_instance: Optional[BaseTraceInstance] = None, + trace_manager: Optional[TraceQueueManager] = None ) -> tuple[bool, dict, str]: """ Process sensitive_word_avoidance. @@ -48,7 +50,6 @@ def check( moderation_result = moderation_factory.moderation_for_inputs(inputs, query) if tracing_instance: - trace_manager = TraceQueueManager() trace_manager.add_trace_task( TraceTask( tracing_instance, diff --git a/api/core/ops/base_trace_instance.py b/api/core/ops/base_trace_instance.py index 3094c19f3a21f7..7d7eab5e3051aa 100644 --- a/api/core/ops/base_trace_instance.py +++ b/api/core/ops/base_trace_instance.py @@ -22,4 +22,4 @@ def trace(self, trace_info: BaseTraceInfo): Abstract method to trace activities. Subclasses must implement specific tracing logic for activities. """ - ... + ... \ No newline at end of file diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py index 17eba251dc8551..456b87e7fbed89 100644 --- a/api/core/ops/entities/trace_entity.py +++ b/api/core/ops/entities/trace_entity.py @@ -6,9 +6,9 @@ class BaseTraceInfo(BaseModel): message_id: Optional[str] = None - message_data: Any - inputs: Union[str, dict[str, Any], list, None] - outputs: Union[str, dict[str, Any], list, None] + message_data: Optional[Any] = None + inputs: Optional[Union[str, dict[str, Any], list]] = None + outputs: Optional[Union[str, dict[str, Any], list]] = None start_time: Optional[datetime] = None end_time: Optional[datetime] = None metadata: dict[str, Any] @@ -50,7 +50,6 @@ class ModerationTraceInfo(BaseTraceInfo): query: str -# class SuggestedQuestionTraceInfo(BaseTraceInfo): total_tokens: int status: Optional[str] = None diff --git a/api/core/ops/langfuse_trace/entities/__init__.py b/api/core/ops/langfuse_trace/entities/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/api/core/ops/entities/langfuse_trace_entity.py b/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py similarity index 100% rename from api/core/ops/entities/langfuse_trace_entity.py rename to api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py index ba37441c8aa2cd..5bada62d5f1ca8 100644 --- a/api/core/ops/langfuse_trace/langfuse_trace.py +++ b/api/core/ops/langfuse_trace/langfuse_trace.py @@ -9,14 +9,6 @@ from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token from core.ops.base_trace_instance import BaseTraceInstance from core.ops.entities.config_entity import LangfuseConfig -from core.ops.entities.langfuse_trace_entity import ( - GenerationUsage, - LangfuseGeneration, - LangfuseSpan, - LangfuseTrace, - LevelEnum, - UnitEnum, -) from core.ops.entities.trace_entity import ( BaseTraceInfo, DatasetRetrievalTraceInfo, @@ -27,6 +19,14 @@ ToolTraceInfo, WorkflowTraceInfo, ) +from core.ops.langfuse_trace.entities.langfuse_trace_entity import ( + GenerationUsage, + LangfuseGeneration, + LangfuseSpan, + LangfuseTrace, + LevelEnum, + UnitEnum, +) from core.ops.utils import filter_none_values from extensions.ext_database import db from models.model import MessageFile @@ -192,7 +192,7 @@ def message_trace( self.add_generation(langfuse_generation_data) - def moderation_trace(self, trace_info: ModerationTraceInfo, **kwargs): + def moderation_trace(self, trace_info: ModerationTraceInfo): span_data = LangfuseSpan( name="moderation", input=trace_info.inputs, @@ -210,7 +210,7 @@ def moderation_trace(self, trace_info: ModerationTraceInfo, **kwargs): self.add_span(langfuse_span_data=span_data) - def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo, **kwargs): + def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo): message_data = trace_info.message_data generation_usage = GenerationUsage( totalTokens=len(str(trace_info.suggested_question)), @@ -235,7 +235,7 @@ def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo, **kwa self.add_generation(langfuse_generation_data=generation_data) - def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo, **kwargs): + def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo): dataset_retrieval_span_data = LangfuseSpan( name="dataset_retrieval", input=trace_info.inputs, @@ -248,7 +248,7 @@ def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo, **kwarg self.add_span(langfuse_span_data=dataset_retrieval_span_data) - def tool_trace(self, trace_info: ToolTraceInfo, **kwargs): + def tool_trace(self, trace_info: ToolTraceInfo): tool_span_data = LangfuseSpan( name=trace_info.tool_name, input=trace_info.tool_inputs, @@ -263,7 +263,7 @@ def tool_trace(self, trace_info: ToolTraceInfo, **kwargs): self.add_span(langfuse_span_data=tool_span_data) - def generate_name_trace(self, trace_info: GenerateNameTraceInfo, **kwargs): + def generate_name_trace(self, trace_info: GenerateNameTraceInfo): name_generation_trace_data = LangfuseTrace( name="generate_name", input=trace_info.inputs, diff --git a/api/core/ops/langsmith_trace/entities/__init__.py b/api/core/ops/langsmith_trace/entities/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/api/core/ops/entities/langsmith_trace_entity.py b/api/core/ops/langsmith_trace/entities/langsmith_trace_entity.py similarity index 100% rename from api/core/ops/entities/langsmith_trace_entity.py rename to api/core/ops/langsmith_trace/entities/langsmith_trace_entity.py diff --git a/api/core/ops/langsmith_trace/langsmith_trace.py b/api/core/ops/langsmith_trace/langsmith_trace.py index a2db80e9c248c8..b3ec7bf657fe38 100644 --- a/api/core/ops/langsmith_trace/langsmith_trace.py +++ b/api/core/ops/langsmith_trace/langsmith_trace.py @@ -8,7 +8,6 @@ from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token from core.ops.base_trace_instance import BaseTraceInstance from core.ops.entities.config_entity import LangSmithConfig -from core.ops.entities.langsmith_trace_entity import LangSmithRunModel, LangSmithRunType, LangSmithRunUpdateModel from core.ops.entities.trace_entity import ( BaseTraceInfo, DatasetRetrievalTraceInfo, @@ -19,6 +18,11 @@ ToolTraceInfo, WorkflowTraceInfo, ) +from core.ops.langsmith_trace.entities.langsmith_trace_entity import ( + LangSmithRunModel, + LangSmithRunType, + LangSmithRunUpdateModel, +) from core.ops.utils import filter_none_values from extensions.ext_database import db from models.model import MessageFile @@ -65,11 +69,11 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): total_tokens=trace_info.total_tokens, id=trace_info.workflow_run_id, name=f"workflow_run_{trace_info.workflow_run_id}", - inputs=trace_info.query, + inputs=trace_info.workflow_run_inputs, run_type=LangSmithRunType.tool, start_time=trace_info.workflow_data.created_at, end_time=trace_info.workflow_data.finished_at, - outputs=trace_info.workflow_data.workflow_run_outputs, + outputs=trace_info.workflow_run_outputs, extra={ "metadata": trace_info.metadata, }, @@ -151,7 +155,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): self.add_run(langsmith_run) - def message_trace(self, trace_info: MessageTraceInfo, **kwargs): + def message_trace(self, trace_info: MessageTraceInfo): # get message file data file_list = trace_info.file_list message_file_data: MessageFile = trace_info.message_file_data @@ -202,7 +206,7 @@ def message_trace(self, trace_info: MessageTraceInfo, **kwargs): ) self.add_run(llm_run) - def moderation_trace(self, trace_info: ModerationTraceInfo, **kwargs): + def moderation_trace(self, trace_info: ModerationTraceInfo): langsmith_run = LangSmithRunModel( name="moderation", inputs=trace_info.inputs, @@ -224,7 +228,7 @@ def moderation_trace(self, trace_info: ModerationTraceInfo, **kwargs): self.add_run(langsmith_run) - def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo, **kwargs): + def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo): message_data = trace_info.message_data suggested_question_run = LangSmithRunModel( name="suggested_question", @@ -242,7 +246,7 @@ def suggested_question_trace(self, trace_info: SuggestedQuestionTraceInfo, **kwa self.add_run(suggested_question_run) - def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo, **kwargs): + def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo): dataset_retrieval_run = LangSmithRunModel( name="dataset_retrieval", inputs=trace_info.inputs, @@ -259,7 +263,7 @@ def dataset_retrieval_trace(self, trace_info: DatasetRetrievalTraceInfo, **kwarg self.add_run(dataset_retrieval_run) - def tool_trace(self, trace_info: ToolTraceInfo, **kwargs): + def tool_trace(self, trace_info: ToolTraceInfo): tool_run = LangSmithRunModel( name=trace_info.tool_name, inputs=trace_info.tool_inputs, @@ -277,7 +281,7 @@ def tool_trace(self, trace_info: ToolTraceInfo, **kwargs): self.add_run(tool_run) - def generate_name_trace(self, trace_info: GenerateNameTraceInfo, **kwargs): + def generate_name_trace(self, trace_info: GenerateNameTraceInfo): name_run = LangSmithRunModel( name="generate_name", inputs=trace_info.inputs, diff --git a/api/core/ops/trace_queue_manager.py b/api/core/ops/trace_queue_manager.py index 444b983b34d303..49b4959f6d03a6 100644 --- a/api/core/ops/trace_queue_manager.py +++ b/api/core/ops/trace_queue_manager.py @@ -6,6 +6,8 @@ from enum import Enum from typing import Any +from flask import Flask, current_app + from core.ops.entities.trace_entity import ( DatasetRetrievalTraceInfo, GenerateNameTraceInfo, @@ -53,10 +55,6 @@ def __init__( self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") def execute(self): - # method_name, processed_kwargs = self.preprocess() - # method = self.trace_instance.trace - # method(**processed_kwargs) - method_name, trace_info = self.preprocess() method = self.trace_instance.trace method(trace_info) @@ -388,21 +386,23 @@ def generate_name_trace(self, conversation_id, timer, **kwargs): class TraceQueueManager: def __init__(self): - from app import app - self.app = app self.queue = queue.Queue() self.is_running = True - self.thread = threading.Thread(target=self.process_queue) + self.thread = threading.Thread( + target=self.process_queue, kwargs={ + 'flask_app': current_app._get_current_object() + } + ) self.thread.start() def stop(self): self.is_running = False - def process_queue(self): - with self.app.app_context(): + def process_queue(self, flask_app: Flask): + with flask_app.app_context(): while self.is_running: try: - task = self.queue.get(timeout=1) + task = self.queue.get(timeout=60) task.execute() self.queue.task_done() except queue.Empty: diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index 8836f711dd0f0a..c8d940098b4104 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -12,7 +12,7 @@ from core.model_runtime.entities.message_entities import PromptMessageTool from core.model_runtime.entities.model_entities import ModelFeature, ModelType from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName +from core.ops.trace_queue_manager import TraceTask, TraceTaskName from core.ops.utils import measure_time from core.rag.datasource.retrieval_service import RetrievalService from core.rag.models.document import Document @@ -358,9 +358,9 @@ def _on_retrival_end( # get tracing instance tracing_instance = self.application_generate_entity.tracing_instance if self.application_generate_entity else None + trace_manager = self.application_generate_entity.trace_manager if self.application_generate_entity else None if tracing_instance: - trace_manager = TraceQueueManager() trace_manager.add_trace_task( TraceTask( tracing_instance, diff --git a/api/core/tools/tool_engine.py b/api/core/tools/tool_engine.py index 534f8855333486..e9ac72839171cd 100644 --- a/api/core/tools/tool_engine.py +++ b/api/core/tools/tool_engine.py @@ -10,6 +10,7 @@ from core.callback_handler.workflow_tool_callback_handler import DifyWorkflowCallbackHandler from core.file.file_obj import FileTransferMethod from core.ops.base_trace_instance import BaseTraceInstance +from core.ops.trace_queue_manager import TraceQueueManager from core.tools.entities.tool_entities import ToolInvokeMessage, ToolInvokeMessageBinary, ToolInvokeMeta, ToolParameter from core.tools.errors import ( ToolEngineInvokeError, @@ -36,7 +37,8 @@ def agent_invoke( tool: Tool, tool_parameters: Union[str, dict], user_id: str, tenant_id: str, message: Message, invoke_from: InvokeFrom, agent_tool_callback: DifyAgentCallbackHandler, - tracing_instance: Optional[BaseTraceInstance] = None + tracing_instance: Optional[BaseTraceInstance] = None, + trace_manager: Optional[TraceQueueManager] = None ) -> tuple[str, list[tuple[MessageFile, bool]], ToolInvokeMeta]: """ Agent invokes the tool with the given arguments. @@ -90,6 +92,7 @@ def agent_invoke( tool_outputs=plain_text, message_id=message.id, tracing_instance=tracing_instance, + trace_manager=trace_manager ) # transform tool invoke message to get LLM friendly message From f815e7e21bbce2205e5fe64e175f1e6559c0351c Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 21:20:18 +0800 Subject: [PATCH 34/65] feat: add workflow_node_executions created time precision --- api/core/ops/langfuse_trace/langfuse_trace.py | 2 +- ...workflow_node_executions_time_precision.py | 33 +++++++++++++++++++ api/models/workflow.py | 2 +- 3 files changed, 35 insertions(+), 2 deletions(-) create mode 100644 api/migrations/versions/fef88df743eb.update_workflow_node_executions_time_precision.py diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py index 5bada62d5f1ca8..4b5394449fb99a 100644 --- a/api/core/ops/langfuse_trace/langfuse_trace.py +++ b/api/core/ops/langfuse_trace/langfuse_trace.py @@ -361,4 +361,4 @@ def encrypt_config(cls, tenant_id, config: LangfuseConfig): def decrypt_config(cls, tenant_id, config: LangfuseConfig): decrypt_public_key = decrypt_token(tenant_id, config.public_key) decrypt_secret_key = decrypt_token(tenant_id, config.secret_key) - return LangfuseConfig(public_key=decrypt_public_key, secret_key=decrypt_secret_key, host=config.host) + return LangfuseConfig(public_key=decrypt_public_key, secret_key=decrypt_secret_key, host=config.host) \ No newline at end of file diff --git a/api/migrations/versions/fef88df743eb.update_workflow_node_executions_time_precision.py b/api/migrations/versions/fef88df743eb.update_workflow_node_executions_time_precision.py new file mode 100644 index 00000000000000..40f52437211fb3 --- /dev/null +++ b/api/migrations/versions/fef88df743eb.update_workflow_node_executions_time_precision.py @@ -0,0 +1,33 @@ +"""add-dataset-retrival-model + +Revision ID: fca025d3b60f +Revises: b3a09c049e8e +Create Date: 2023-11-03 13:08:23.246396 + +""" +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = 'fef88df743eb' +down_revision = '2a3aebbbf4bb' +branch_labels = None +depends_on = None + + +def upgrade(): + # created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) + # created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(3)')) + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('workflow_node_executions') as batch_op: + batch_op.alter_column('created_at', server_default=None) + batch_op.alter_column('created_at', type_=sa.DateTime(), nullable=False, server_default=sa.text('CURRENT_TIMESTAMP(3)')) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('workflow_node_executions') as batch_op: + batch_op.alter_column('created_at', server_default=None) + batch_op.alter_column('created_at', type_=sa.DateTime(), nullable=False, server_default=sa.text('CURRENT_TIMESTAMP(0)')) + # ### end Alembic commands ### diff --git a/api/models/workflow.py b/api/models/workflow.py index d9bc7848787925..85714c6fb1e87a 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -443,7 +443,7 @@ class WorkflowNodeExecution(db.Model): error = db.Column(db.Text) elapsed_time = db.Column(db.Float, nullable=False, server_default=db.text('0')) execution_metadata = db.Column(db.Text) - created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) + created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(3)')) created_by_role = db.Column(db.String(255), nullable=False) created_by = db.Column(StringUUID, nullable=False) finished_at = db.Column(db.DateTime) From ad45808d7b850d31350f0fdc29667a5ba90bff2e Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 21:43:46 +0800 Subject: [PATCH 35/65] fix: update trace config error --- api/core/ops/langfuse_trace/langfuse_trace.py | 22 +++++++++++++++- .../ops/langsmith_trace/langsmith_trace.py | 6 ++++- api/services/ops_trace/ops_trace_service.py | 26 +++++++++++-------- 3 files changed, 41 insertions(+), 13 deletions(-) diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py index 4b5394449fb99a..2a3526b3ee5ed7 100644 --- a/api/core/ops/langfuse_trace/langfuse_trace.py +++ b/api/core/ops/langfuse_trace/langfuse_trace.py @@ -352,7 +352,27 @@ def obfuscate_config(cls, config: LangfuseConfig): return LangfuseConfig(public_key=public_key, secret_key=secret_key, host=config.host) @classmethod - def encrypt_config(cls, tenant_id, config: LangfuseConfig): + def encrypt_config(cls, tenant_id, config: LangfuseConfig, current_trace_config: dict = None): + if "*" in config.public_key and "*" in config.secret_key: + return LangfuseConfig( + public_key=current_trace_config.get("public_key"), + secret_key=current_trace_config.get("secret_key"), + host=config.host + ) + if "*" in config.public_key: + decrypt_secret_key = encrypt_token(tenant_id, config.secret_key) + return LangfuseConfig( + public_key=current_trace_config.get("public_key"), + secret_key=decrypt_secret_key, + host=config.host + ) + if "*" in config.secret_key: + decrypt_public_key = encrypt_token(tenant_id, config.public_key) + return LangfuseConfig( + public_key=decrypt_public_key, + secret_key=current_trace_config.get("secret_key"), + host=config.host + ) decrypt_public_key = encrypt_token(tenant_id, config.public_key) decrypt_secret_key = encrypt_token(tenant_id, config.secret_key) return LangfuseConfig(public_key=decrypt_public_key, secret_key=decrypt_secret_key, host=config.host) diff --git a/api/core/ops/langsmith_trace/langsmith_trace.py b/api/core/ops/langsmith_trace/langsmith_trace.py index b3ec7bf657fe38..259077ee5c9f21 100644 --- a/api/core/ops/langsmith_trace/langsmith_trace.py +++ b/api/core/ops/langsmith_trace/langsmith_trace.py @@ -336,7 +336,11 @@ def obfuscate_config(cls, config: LangSmithConfig): return LangSmithConfig(api_key=api_key, project=config.project, endpoint=config.endpoint) @classmethod - def encrypt_config(cls, tenant_id, config: LangSmithConfig): + def encrypt_config(cls, tenant_id, config: LangSmithConfig, current_trace_config=None): + if "*" in config.api_key: + return LangSmithConfig( + api_key=current_trace_config.get("api_key"), project=config.project, endpoint=config.endpoint + ) api_key = encrypt_token(tenant_id, config.api_key) return LangSmithConfig(api_key=api_key, project=config.project, endpoint=config.endpoint) diff --git a/api/services/ops_trace/ops_trace_service.py b/api/services/ops_trace/ops_trace_service.py index 220652e7a75199..0db2bae4e26529 100644 --- a/api/services/ops_trace/ops_trace_service.py +++ b/api/services/ops_trace/ops_trace_service.py @@ -85,25 +85,26 @@ def update_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c raise ValueError(f"Invalid tracing provider: {tracing_provider}") # api check - if not cls.check_trace_config_is_effective(tracing_config, tracing_provider): - raise ValueError("Invalid Credentials") + # if not cls.check_trace_config_is_effective(tracing_config, tracing_provider): + # raise ValueError("Invalid Credentials") # check if trace config already exists - trace_config = db.session.query(TraceAppConfig).filter( + current_trace_config = db.session.query(TraceAppConfig).filter( TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider ).first() - if not trace_config: + if not current_trace_config: return None # get tenant id tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id - tracing_config = cls.encrypt_tracing_config(tenant_id, tracing_provider, tracing_config) - - trace_config.tracing_config = tracing_config + tracing_config = cls.encrypt_tracing_config( + tenant_id, tracing_provider, tracing_config, current_trace_config.tracing_config + ) + current_trace_config.tracing_config = tracing_config db.session.commit() - return trace_config.to_dict() + return current_trace_config.to_dict() @classmethod def delete_tracing_app_config(cls, app_id: str, tracing_provider: str): @@ -126,20 +127,23 @@ def delete_tracing_app_config(cls, app_id: str, tracing_provider: str): return True @classmethod - def encrypt_tracing_config(cls, tenant_id: str, tracing_provider: str, tracing_config: dict): + def encrypt_tracing_config( + cls, tenant_id: str, tracing_provider: str, tracing_config: dict, current_trace_config=None + ): """ Encrypt tracing config :param tenant_id: tenant id :param tracing_provider: tracing provider :param tracing_config: tracing config + :param current_trace_config: current trace config :return: """ if tracing_provider == TracingProviderEnum.LANGFUSE.value: tracing_config = LangfuseConfig(**tracing_config) - tracing_config = LangFuseDataTrace.encrypt_config(tenant_id, tracing_config) + tracing_config = LangFuseDataTrace.encrypt_config(tenant_id, tracing_config, current_trace_config) elif tracing_provider == TracingProviderEnum.LANGSMITH.value: tracing_config = LangSmithConfig(**tracing_config) - tracing_config = LangSmithDataTrace.encrypt_config(tenant_id, tracing_config) + tracing_config = LangSmithDataTrace.encrypt_config(tenant_id, tracing_config, current_trace_config) return tracing_config.model_dump() From c787fac0d3a6d2f1712c3286254416b9128ab97b Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 22:27:31 +0800 Subject: [PATCH 36/65] fix: remove superfluous tracing instance --- .../app/apps/advanced_chat/app_generator.py | 2 +- .../advanced_chat/generate_task_pipeline.py | 5 +--- api/core/app/apps/agent_chat/app_generator.py | 24 +++++++------------ api/core/app/apps/completion/app_generator.py | 1 - api/core/app/apps/workflow/app_generator.py | 4 +--- api/core/tools/tool/workflow_tool.py | 5 ++-- 6 files changed, 14 insertions(+), 27 deletions(-) diff --git a/api/core/app/apps/advanced_chat/app_generator.py b/api/core/app/apps/advanced_chat/app_generator.py index a9ef69ce380131..1143e6255e27ab 100644 --- a/api/core/app/apps/advanced_chat/app_generator.py +++ b/api/core/app/apps/advanced_chat/app_generator.py @@ -338,7 +338,7 @@ def _handle_advanced_chat_response(self, application_generate_entity: AdvancedCh ) try: - return generate_task_pipeline.process(workflow) + return generate_task_pipeline.process() except ValueError as e: if e.args[0] == "I/O operation on closed file.": # ignore this error raise GenerateTaskStoppedException() diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index 421cd8667f97c1..17dc05f2d7f8f8 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -115,10 +115,7 @@ def __init__( self._stream_generate_routes = self._get_stream_generate_routes() self._conversation_name_generate_thread = None - def process( - self, - workflow: Optional[Workflow] = None - ) -> Union[ChatbotAppBlockingResponse, Generator[ChatbotAppStreamResponse, None, None]]: + def process(self) -> Union[ChatbotAppBlockingResponse, Generator[ChatbotAppStreamResponse, None, None]]: """ Process generate task pipeline. :return: diff --git a/api/core/app/apps/agent_chat/app_generator.py b/api/core/app/apps/agent_chat/app_generator.py index 05f3c24897baf1..ffa65f5fb395dc 100644 --- a/api/core/app/apps/agent_chat/app_generator.py +++ b/api/core/app/apps/agent_chat/app_generator.py @@ -3,7 +3,7 @@ import threading import uuid from collections.abc import Generator -from typing import Any, Optional, Union +from typing import Any, Union from flask import Flask, current_app from pydantic import ValidationError @@ -71,12 +71,7 @@ def generate(self, app_model: App, app_model=app_model, conversation=conversation ) - - # get tracing instance - tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_model.id, - ) - + # validate override model config override_model_config_dict = None if args.get('model_config'): @@ -177,13 +172,13 @@ def generate(self, app_model: App, invoke_from=invoke_from ) - def _generate_worker(self, flask_app: Flask, - application_generate_entity: AgentChatAppGenerateEntity, - queue_manager: AppQueueManager, - conversation_id: str, - message_id: str, - tracing_instance: Optional[Any] = None - ) -> None: + def _generate_worker( + self, flask_app: Flask, + application_generate_entity: AgentChatAppGenerateEntity, + queue_manager: AppQueueManager, + conversation_id: str, + message_id: str, + ) -> None: """ Generate worker in a new thread. :param flask_app: Flask app @@ -191,7 +186,6 @@ def _generate_worker(self, flask_app: Flask, :param queue_manager: queue manager :param conversation_id: conversation ID :param message_id: message ID - :param tracing_instance: tracing instance :return: """ with flask_app.app_context(): diff --git a/api/core/app/apps/completion/app_generator.py b/api/core/app/apps/completion/app_generator.py index 609519ca77e8e2..d9528eb41bca82 100644 --- a/api/core/app/apps/completion/app_generator.py +++ b/api/core/app/apps/completion/app_generator.py @@ -315,7 +315,6 @@ def generate_more_like_this(self, app_model: App, message=message, user=user, stream=stream, - tracing_instance=tracing_instance, ) return CompletionAppGenerateResponseConverter.convert( diff --git a/api/core/app/apps/workflow/app_generator.py b/api/core/app/apps/workflow/app_generator.py index b4991e454c147c..30f0ec8db76970 100644 --- a/api/core/app/apps/workflow/app_generator.py +++ b/api/core/app/apps/workflow/app_generator.py @@ -3,7 +3,7 @@ import threading import uuid from collections.abc import Generator -from typing import Any, Optional, Union +from typing import Union from flask import Flask, current_app from pydantic import ValidationError @@ -39,7 +39,6 @@ def generate( invoke_from: InvokeFrom, stream: bool = True, call_depth: int = 0, - tracing_instance: Optional[Any] = None ) -> Union[dict, Generator[dict, None, None]]: """ Generate App response. @@ -51,7 +50,6 @@ def generate( :param invoke_from: invoke from source :param stream: is stream :param call_depth: call depth - :param tracing_instance: ops tracing instance """ inputs = args['inputs'] diff --git a/api/core/tools/tool/workflow_tool.py b/api/core/tools/tool/workflow_tool.py index 2c362d80c47831..071081303c3b2a 100644 --- a/api/core/tools/tool/workflow_tool.py +++ b/api/core/tools/tool/workflow_tool.py @@ -1,7 +1,7 @@ import json import logging from copy import deepcopy -from typing import Any, Optional, Union +from typing import Any, Union from core.file.file_obj import FileTransferMethod, FileVar from core.tools.entities.tool_entities import ToolInvokeMessage, ToolParameter, ToolProviderType @@ -33,7 +33,7 @@ def tool_provider_type(self) -> ToolProviderType: return ToolProviderType.WORKFLOW def _invoke( - self, user_id: str, tool_parameters: dict[str, Any], tracing_instance: Optional[Any] = None + self, user_id: str, tool_parameters: dict[str, Any] ) -> Union[ToolInvokeMessage, list[ToolInvokeMessage]]: """ invoke the tool @@ -57,7 +57,6 @@ def _invoke( invoke_from=self.runtime.invoke_from, stream=False, call_depth=self.workflow_call_depth + 1, - tracing_instance=tracing_instance, ) data = result.get('data', {}) From 53c033e76adfa45adda13c1565108550727356f7 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 22:41:55 +0800 Subject: [PATCH 37/65] feat: through trace queue manager generate trace_instance --- api/core/agent/cot_agent_runner.py | 4 --- api/core/agent/fc_agent_runner.py | 2 -- .../app/apps/advanced_chat/app_generator.py | 8 +---- .../advanced_chat/generate_task_pipeline.py | 7 ++-- api/core/app/apps/agent_chat/app_generator.py | 9 ++--- api/core/app/apps/base_app_runner.py | 1 - api/core/app/apps/chat/app_generator.py | 7 +--- api/core/app/apps/completion/app_generator.py | 6 +--- api/core/app/apps/workflow/app_generator.py | 7 +--- .../apps/workflow/generate_task_pipeline.py | 5 +-- api/core/app/entities/app_invoke_entities.py | 2 -- .../easy_ui_based_generate_task_pipeline.py | 22 +++++------- .../task_pipeline/workflow_cycle_manage.py | 35 +++++++------------ .../agent_tool_callback_handler.py | 22 +++++------- api/core/llm_generator/llm_generator.py | 26 ++++++-------- api/core/moderation/input_moderation.py | 22 +++++------- api/core/ops/trace_queue_manager.py | 23 ++++++------ api/core/rag/retrieval/dataset_retrieval.py | 17 ++++----- api/core/tools/tool_engine.py | 3 -- api/services/message_service.py | 23 +++++------- 20 files changed, 86 insertions(+), 165 deletions(-) diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py index c00a5e34a93411..54a90b098ae276 100644 --- a/api/core/agent/cot_agent_runner.py +++ b/api/core/agent/cot_agent_runner.py @@ -15,7 +15,6 @@ ToolPromptMessage, UserPromptMessage, ) -from core.ops.base_trace_instance import BaseTraceInstance from core.ops.trace_queue_manager import TraceQueueManager from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform from core.tools.entities.tool_entities import ToolInvokeMeta @@ -219,7 +218,6 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): action=scratchpad.action, tool_instances=tool_instances, message_file_ids=message_file_ids, - tracing_instance=tracing_instance, trace_manager=trace_manager, ) scratchpad.observation = tool_invoke_response @@ -290,7 +288,6 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): def _handle_invoke_action(self, action: AgentScratchpadUnit.Action, tool_instances: dict[str, Tool], message_file_ids: list[str], - tracing_instance: Optional[BaseTraceInstance] = None, trace_manager: Optional[TraceQueueManager] = None ) -> tuple[str, ToolInvokeMeta]: """ @@ -323,7 +320,6 @@ def _handle_invoke_action(self, action: AgentScratchpadUnit.Action, message=self.message, invoke_from=self.application_generate_entity.invoke_from, agent_tool_callback=self.agent_callback, - tracing_instance=tracing_instance, trace_manager=trace_manager, ) diff --git a/api/core/agent/fc_agent_runner.py b/api/core/agent/fc_agent_runner.py index b1265007956676..73b78a7b1b9979 100644 --- a/api/core/agent/fc_agent_runner.py +++ b/api/core/agent/fc_agent_runner.py @@ -51,7 +51,6 @@ def run(self, final_answer = '' # get tracing instance - tracing_instance = app_generate_entity.tracing_instance trace_manager = app_generate_entity.trace_manager def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): @@ -247,7 +246,6 @@ def increase_usage(final_llm_usage_dict: dict[str, LLMUsage], usage: LLMUsage): message=self.message, invoke_from=self.application_generate_entity.invoke_from, agent_tool_callback=self.agent_callback, - tracing_instance=tracing_instance, trace_manager=trace_manager, ) # publish files diff --git a/api/core/app/apps/advanced_chat/app_generator.py b/api/core/app/apps/advanced_chat/app_generator.py index 1143e6255e27ab..f9b62808e001e5 100644 --- a/api/core/app/apps/advanced_chat/app_generator.py +++ b/api/core/app/apps/advanced_chat/app_generator.py @@ -25,7 +25,6 @@ from models.account import Account from models.model import App, Conversation, EndUser, Message from models.workflow import Workflow -from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) @@ -48,7 +47,6 @@ def generate( :param args: request args :param invoke_from: invoke from source :param stream: is stream - :param tracing_instance: tracing instance """ if not args.get('query'): raise ValueError('query is required') @@ -89,10 +87,7 @@ def generate( ) # get tracing instance - tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_model.id - ) - trace_manager = TraceQueueManager() + trace_manager = TraceQueueManager(app_id=app_model.id) # init application generate entity application_generate_entity = AdvancedChatAppGenerateEntity( @@ -106,7 +101,6 @@ def generate( stream=stream, invoke_from=invoke_from, extras=extras, - tracing_instance=tracing_instance, trace_manager=trace_manager ) diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index 17dc05f2d7f8f8..8fad5126a42b88 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -42,7 +42,6 @@ from core.file.file_obj import FileVar from core.model_runtime.entities.llm_entities import LLMUsage from core.model_runtime.utils.encoders import jsonable_encoder -from core.ops.base_trace_instance import BaseTraceInstance from core.ops.trace_queue_manager import TraceQueueManager from core.workflow.entities.node_entities import NodeType, SystemVariable from core.workflow.nodes.answer.answer_node import AnswerNode @@ -131,7 +130,6 @@ def process(self) -> Union[ChatbotAppBlockingResponse, Generator[ChatbotAppStrea ) generator = self._process_stream_response( - tracing_instance=self._application_generate_entity.tracing_instance, trace_manager=self._application_generate_entity.trace_manager ) if self._stream: @@ -185,7 +183,7 @@ def _to_stream_response(self, generator: Generator[StreamResponse, None, None]) ) def _process_stream_response( - self, tracing_instance: Optional[BaseTraceInstance] = None, trace_manager: Optional[TraceQueueManager] = None + self, trace_manager: Optional[TraceQueueManager] = None ) -> Generator[StreamResponse, None, None]: """ Process stream response. @@ -259,8 +257,7 @@ def _process_stream_response( self._handle_iteration_operation(event) elif isinstance(event, QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent): workflow_run = self._handle_workflow_finished( - event, tracing_instance=tracing_instance, conversation_id=self._conversation.id, - trace_manager=trace_manager + event, conversation_id=self._conversation.id, trace_manager=trace_manager ) if workflow_run: yield self._workflow_finish_to_stream_response( diff --git a/api/core/app/apps/agent_chat/app_generator.py b/api/core/app/apps/agent_chat/app_generator.py index ffa65f5fb395dc..6335040c01992c 100644 --- a/api/core/app/apps/agent_chat/app_generator.py +++ b/api/core/app/apps/agent_chat/app_generator.py @@ -23,7 +23,6 @@ from extensions.ext_database import db from models.account import Account from models.model import App, EndUser -from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) @@ -71,7 +70,7 @@ def generate(self, app_model: App, app_model=app_model, conversation=conversation ) - + # validate override model config override_model_config_dict = None if args.get('model_config'): @@ -106,10 +105,7 @@ def generate(self, app_model: App, ) # get tracing instance - tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_model.id, - ) - trace_manager = TraceQueueManager() + trace_manager = TraceQueueManager(app_model.id) # init application generate entity application_generate_entity = AgentChatAppGenerateEntity( @@ -125,7 +121,6 @@ def generate(self, app_model: App, invoke_from=invoke_from, extras=extras, call_depth=0, - tracing_instance=tracing_instance, trace_manager=trace_manager ) diff --git a/api/core/app/apps/base_app_runner.py b/api/core/app/apps/base_app_runner.py index e81cfb3a2f0bd3..58c7d04b8348f8 100644 --- a/api/core/app/apps/base_app_runner.py +++ b/api/core/app/apps/base_app_runner.py @@ -364,7 +364,6 @@ def moderation_for_inputs( inputs=inputs, query=query if query else '', message_id=message_id, - tracing_instance=app_generate_entity.tracing_instance, trace_manager=app_generate_entity.trace_manager ) diff --git a/api/core/app/apps/chat/app_generator.py b/api/core/app/apps/chat/app_generator.py index 24c0d861699411..08db97ff6c6e9e 100644 --- a/api/core/app/apps/chat/app_generator.py +++ b/api/core/app/apps/chat/app_generator.py @@ -23,7 +23,6 @@ from extensions.ext_database import db from models.account import Account from models.model import App, EndUser -from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) @@ -105,10 +104,7 @@ def generate( ) # get tracing instance - tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_model.id, - ) - trace_manager = TraceQueueManager() + trace_manager = TraceQueueManager(app_model.id) # init application generate entity application_generate_entity = ChatAppGenerateEntity( @@ -123,7 +119,6 @@ def generate( stream=stream, invoke_from=invoke_from, extras=extras, - tracing_instance=tracing_instance, trace_manager=trace_manager ) diff --git a/api/core/app/apps/completion/app_generator.py b/api/core/app/apps/completion/app_generator.py index d9528eb41bca82..45f16ee2b7daef 100644 --- a/api/core/app/apps/completion/app_generator.py +++ b/api/core/app/apps/completion/app_generator.py @@ -97,10 +97,7 @@ def generate(self, app_model: App, ) # get tracing instance - tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_model.id - ) - trace_manager = TraceQueueManager() + trace_manager = TraceQueueManager(app_model.id) # init application generate entity application_generate_entity = CompletionAppGenerateEntity( @@ -114,7 +111,6 @@ def generate(self, app_model: App, stream=stream, invoke_from=invoke_from, extras=extras, - tracing_instance=tracing_instance, trace_manager=trace_manager ) diff --git a/api/core/app/apps/workflow/app_generator.py b/api/core/app/apps/workflow/app_generator.py index 30f0ec8db76970..c57243d7dcb1b9 100644 --- a/api/core/app/apps/workflow/app_generator.py +++ b/api/core/app/apps/workflow/app_generator.py @@ -25,7 +25,6 @@ from models.account import Account from models.model import App, EndUser from models.workflow import Workflow -from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) @@ -73,10 +72,7 @@ def generate( ) # get tracing instance - tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_model.id, - ) - trace_manager = TraceQueueManager() + trace_manager = TraceQueueManager(app_model.id) # init application generate entity application_generate_entity = WorkflowAppGenerateEntity( @@ -88,7 +84,6 @@ def generate( stream=stream, invoke_from=invoke_from, call_depth=call_depth, - tracing_instance=tracing_instance, trace_manager=trace_manager ) diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index f6e6e9a0fae84e..a1160ed4f21234 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -36,7 +36,6 @@ ) from core.app.task_pipeline.based_generate_task_pipeline import BasedGenerateTaskPipeline from core.app.task_pipeline.workflow_cycle_manage import WorkflowCycleManage -from core.ops.base_trace_instance import BaseTraceInstance from core.ops.trace_queue_manager import TraceQueueManager from core.workflow.entities.node_entities import NodeType, SystemVariable from core.workflow.nodes.end.end_node import EndNode @@ -107,7 +106,6 @@ def process(self) -> Union[WorkflowAppBlockingResponse, Generator[WorkflowAppStr db.session.close() generator = self._process_stream_response( - tracing_instance=self._application_generate_entity.tracing_instance, trace_manager=self._application_generate_entity.trace_manager ) if self._stream: @@ -165,7 +163,6 @@ def _to_stream_response(self, generator: Generator[StreamResponse, None, None]) def _process_stream_response( self, - tracing_instance: Optional[BaseTraceInstance] = None, trace_manager: Optional[TraceQueueManager] = None ) -> Generator[StreamResponse, None, None]: """ @@ -225,7 +222,7 @@ def _process_stream_response( self._handle_iteration_operation(event) elif isinstance(event, QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent): workflow_run = self._handle_workflow_finished( - event, tracing_instance=tracing_instance, trace_manager=trace_manager + event, trace_manager=trace_manager ) # save workflow app log diff --git a/api/core/app/entities/app_invoke_entities.py b/api/core/app/entities/app_invoke_entities.py index 44eb999c5c54e7..5896714c996f0e 100644 --- a/api/core/app/entities/app_invoke_entities.py +++ b/api/core/app/entities/app_invoke_entities.py @@ -7,7 +7,6 @@ from core.entities.provider_configuration import ProviderModelBundle from core.file.file_obj import FileVar from core.model_runtime.entities.model_entities import AIModelEntity -from core.ops.base_trace_instance import BaseTraceInstance from core.ops.trace_queue_manager import TraceQueueManager @@ -92,7 +91,6 @@ class AppGenerateEntity(BaseModel): extras: dict[str, Any] = {} # tracing instance - tracing_instance: Optional[BaseTraceInstance] = None trace_manager: Optional[TraceQueueManager] = None class Config: diff --git a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py index 36155a69f05928..65c639bbd7116a 100644 --- a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py +++ b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py @@ -44,7 +44,6 @@ ) from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.model_runtime.utils.encoders import jsonable_encoder -from core.ops.base_trace_instance import BaseTraceInstance from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName from core.prompt.utils.prompt_message_util import PromptMessageUtil from core.prompt.utils.prompt_template_parser import PromptTemplateParser @@ -125,7 +124,6 @@ def process( ) generator = self._process_stream_response( - tracing_instance=self._application_generate_entity.tracing_instance, trace_manager=self._application_generate_entity.trace_manager ) if self._stream: @@ -205,7 +203,7 @@ def _to_stream_response(self, generator: Generator[StreamResponse, None, None]) ) def _process_stream_response( - self, tracing_instance: Optional[BaseTraceInstance] = None, trace_manager: Optional[TraceQueueManager] = None + self, trace_manager: Optional[TraceQueueManager] = None ) -> Generator[StreamResponse, None, None]: """ Process stream response. @@ -233,7 +231,7 @@ def _process_stream_response( yield self._message_replace_to_stream_response(answer=output_moderation_answer) # Save message - self._save_message(tracing_instance, trace_manager) + self._save_message(trace_manager) yield self._message_end_to_stream_response() elif isinstance(event, QueueRetrieverResourcesEvent): @@ -279,7 +277,7 @@ def _process_stream_response( self._conversation_name_generate_thread.join() def _save_message( - self, tracing_instance: Optional[BaseTraceInstance] = None, trace_manager: Optional[TraceQueueManager] = None + self, trace_manager: Optional[TraceQueueManager] = None ) -> None: """ Save message. @@ -311,15 +309,13 @@ def _save_message( db.session.commit() - if tracing_instance: - trace_manager.add_trace_task( - TraceTask( - tracing_instance, - TraceTaskName.MESSAGE_TRACE, - conversation_id=self._conversation.id, - message_id=self._message.id - ) + trace_manager.add_trace_task( + TraceTask( + TraceTaskName.MESSAGE_TRACE, + conversation_id=self._conversation.id, + message_id=self._message.id ) + ) message_was_created.send( self._message, diff --git a/api/core/app/task_pipeline/workflow_cycle_manage.py b/api/core/app/task_pipeline/workflow_cycle_manage.py index bb8bad8153150f..49f407c11ed791 100644 --- a/api/core/app/task_pipeline/workflow_cycle_manage.py +++ b/api/core/app/task_pipeline/workflow_cycle_manage.py @@ -22,7 +22,6 @@ from core.app.task_pipeline.workflow_iteration_cycle_manage import WorkflowIterationCycleManage from core.file.file_obj import FileVar from core.model_runtime.utils.encoders import jsonable_encoder -from core.ops.base_trace_instance import BaseTraceInstance from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName from core.tools.tool_manager import ToolManager from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeType @@ -103,7 +102,6 @@ def _workflow_run_success( total_steps: int, outputs: Optional[str] = None, conversation_id: Optional[str] = None, - tracing_instance: Optional[BaseTraceInstance] = None, trace_manager: Optional[TraceQueueManager] = None ) -> WorkflowRun: """ @@ -128,15 +126,13 @@ def _workflow_run_success( db.session.refresh(workflow_run) db.session.close() - if tracing_instance: - trace_manager.add_trace_task( - TraceTask( - tracing_instance, - TraceTaskName.WORKFLOW_TRACE, - workflow_run=workflow_run, - conversation_id=conversation_id, - ) + trace_manager.add_trace_task( + TraceTask( + TraceTaskName.WORKFLOW_TRACE, + workflow_run=workflow_run, + conversation_id=conversation_id, ) + ) return workflow_run @@ -148,7 +144,6 @@ def _workflow_run_failed( status: WorkflowRunStatus, error: str, conversation_id: Optional[str] = None, - tracing_instance: Optional[BaseTraceInstance] = None, trace_manager: Optional[TraceQueueManager] = None ) -> WorkflowRun: """ @@ -172,15 +167,13 @@ def _workflow_run_failed( db.session.refresh(workflow_run) db.session.close() - if tracing_instance: - trace_manager.add_trace_task( - TraceTask( - tracing_instance, - TraceTaskName.WORKFLOW_TRACE, - workflow_run=workflow_run, - conversation_id=conversation_id, - ) + trace_manager.add_trace_task( + TraceTask( + TraceTaskName.WORKFLOW_TRACE, + workflow_run=workflow_run, + conversation_id=conversation_id, ) + ) return workflow_run @@ -532,7 +525,6 @@ def _handle_node_finished(self, event: QueueNodeSucceededEvent | QueueNodeFailed def _handle_workflow_finished( self, event: QueueStopEvent | QueueWorkflowSucceededEvent | QueueWorkflowFailedEvent, - tracing_instance: Optional[BaseTraceInstance] = None, conversation_id: Optional[str] = None, trace_manager: Optional[TraceQueueManager] = None ) -> Optional[WorkflowRun]: @@ -552,7 +544,6 @@ def _handle_workflow_finished( status=WorkflowRunStatus.STOPPED, error='Workflow stopped.', conversation_id=conversation_id, - tracing_instance=tracing_instance, trace_manager=trace_manager ) @@ -576,7 +567,6 @@ def _handle_workflow_finished( status=WorkflowRunStatus.FAILED, error=event.error, conversation_id=conversation_id, - tracing_instance=tracing_instance, trace_manager=trace_manager ) else: @@ -594,7 +584,6 @@ def _handle_workflow_finished( total_steps=self._task_state.total_steps, outputs=outputs, conversation_id=conversation_id, - tracing_instance=tracing_instance, trace_manager=trace_manager ) diff --git a/api/core/callback_handler/agent_tool_callback_handler.py b/api/core/callback_handler/agent_tool_callback_handler.py index b075e315dbf0b3..3782c34cd58709 100644 --- a/api/core/callback_handler/agent_tool_callback_handler.py +++ b/api/core/callback_handler/agent_tool_callback_handler.py @@ -3,7 +3,6 @@ from pydantic import BaseModel -from core.ops.base_trace_instance import BaseTraceInstance from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName _TEXT_COLOR_MAPPING = { @@ -56,7 +55,6 @@ def on_tool_end( tool_outputs: str, message_id: Optional[str] = None, timer: Optional[Any] = None, - tracing_instance: Optional[BaseTraceInstance] = None, trace_manager: Optional[TraceQueueManager] = None ) -> None: """If not the final action, print out observation.""" @@ -66,18 +64,16 @@ def on_tool_end( print_text("Outputs: " + str(tool_outputs)[:1000] + "\n", color=self.color) print_text("\n") - if tracing_instance: - trace_manager.add_trace_task( - TraceTask( - tracing_instance, - TraceTaskName.TOOL_TRACE, - message_id=message_id, - tool_name=tool_name, - tool_inputs=tool_inputs, - tool_outputs=tool_outputs, - timer=timer, - ) + trace_manager.add_trace_task( + TraceTask( + TraceTaskName.TOOL_TRACE, + message_id=message_id, + tool_name=tool_name, + tool_inputs=tool_inputs, + tool_outputs=tool_outputs, + timer=timer, ) + ) def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 94da53dce93d8a..65682064cef518 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -13,7 +13,6 @@ from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName from core.ops.utils import measure_time from core.prompt.utils.prompt_template_parser import PromptTemplateParser -from services.ops_trace.ops_trace_service import OpsTraceService class LLMGenerator: @@ -54,22 +53,17 @@ def generate_conversation_name(cls, tenant_id: str, query, conversation_id: Opti name = name[:75] + '...' # get tracing instance - tracing_instance = OpsTraceService.get_ops_trace_instance( - conversation_id=conversation_id - ) - if tracing_instance: - trace_manager = TraceQueueManager() - trace_manager.add_trace_task( - TraceTask( - tracing_instance, - TraceTaskName.GENERATE_NAME_TRACE, - conversation_id=conversation_id, - generate_conversation_name=name, - inputs=prompt, - timer=timer, - tenant_id=tenant_id, - ) + trace_manager = TraceQueueManager(conversation_id=conversation_id) + trace_manager.add_trace_task( + TraceTask( + TraceTaskName.GENERATE_NAME_TRACE, + conversation_id=conversation_id, + generate_conversation_name=name, + inputs=prompt, + timer=timer, + tenant_id=tenant_id, ) + ) return name diff --git a/api/core/moderation/input_moderation.py b/api/core/moderation/input_moderation.py index 3817c0e24e5282..85fbfaef068fff 100644 --- a/api/core/moderation/input_moderation.py +++ b/api/core/moderation/input_moderation.py @@ -4,7 +4,6 @@ from core.app.app_config.entities import AppConfig from core.moderation.base import ModerationAction, ModerationException from core.moderation.factory import ModerationFactory -from core.ops.base_trace_instance import BaseTraceInstance from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName from core.ops.utils import measure_time @@ -19,7 +18,6 @@ def check( inputs: dict, query: str, message_id: str, - tracing_instance: Optional[BaseTraceInstance] = None, trace_manager: Optional[TraceQueueManager] = None ) -> tuple[bool, dict, str]: """ @@ -30,7 +28,7 @@ def check( :param inputs: inputs :param query: query :param message_id: message id - :param tracing_instance: tracing instance + :param trace_manager: trace manager :return: """ if not app_config.sensitive_word_avoidance: @@ -49,17 +47,15 @@ def check( with measure_time() as timer: moderation_result = moderation_factory.moderation_for_inputs(inputs, query) - if tracing_instance: - trace_manager.add_trace_task( - TraceTask( - tracing_instance, - TraceTaskName.MODERATION_TRACE, - message_id=message_id, - moderation_result=moderation_result, - inputs=inputs, - timer=timer - ) + trace_manager.add_trace_task( + TraceTask( + TraceTaskName.MODERATION_TRACE, + message_id=message_id, + moderation_result=moderation_result, + inputs=inputs, + timer=timer ) + ) if not moderation_result.flagged: return False, inputs, query diff --git a/api/core/ops/trace_queue_manager.py b/api/core/ops/trace_queue_manager.py index 49b4959f6d03a6..9749d5798e1d47 100644 --- a/api/core/ops/trace_queue_manager.py +++ b/api/core/ops/trace_queue_manager.py @@ -8,6 +8,7 @@ from flask import Flask, current_app +from core.ops.base_trace_instance import BaseTraceInstance from core.ops.entities.trace_entity import ( DatasetRetrievalTraceInfo, GenerateNameTraceInfo, @@ -21,6 +22,7 @@ from extensions.ext_database import db from models.model import Conversation, MessageAgentThought, MessageFile from models.workflow import WorkflowRun +from services.ops_trace.ops_trace_service import OpsTraceService class TraceTaskName(str, Enum): @@ -37,7 +39,6 @@ class TraceTaskName(str, Enum): class TraceTask: def __init__( self, - trace_instance: Any, trace_type: Any, message_id: str = None, workflow_run: WorkflowRun = None, @@ -45,7 +46,6 @@ def __init__( timer: Any = None, **kwargs ): - self.trace_instance = trace_instance self.trace_type = trace_type self.message_id = message_id self.workflow_run = workflow_run @@ -54,10 +54,11 @@ def __init__( self.kwargs = kwargs self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") - def execute(self): + def execute(self, trace_instance: BaseTraceInstance): method_name, trace_info = self.preprocess() - method = self.trace_instance.trace - method(trace_info) + if trace_instance: + method = trace_instance.trace + method(trace_info) def preprocess(self): if self.trace_type == TraceTaskName.CONVERSATION_TRACE: @@ -385,12 +386,14 @@ def generate_name_trace(self, conversation_id, timer, **kwargs): class TraceQueueManager: - def __init__(self): + def __init__(self, app_id=None, conversation_id=None, message_id=None): + tracing_instance = OpsTraceService.get_ops_trace_instance(app_id, conversation_id, message_id) self.queue = queue.Queue() self.is_running = True self.thread = threading.Thread( target=self.process_queue, kwargs={ - 'flask_app': current_app._get_current_object() + 'flask_app': current_app._get_current_object(), + 'trace_instance': tracing_instance } ) self.thread.start() @@ -398,15 +401,15 @@ def __init__(self): def stop(self): self.is_running = False - def process_queue(self, flask_app: Flask): + def process_queue(self, flask_app: Flask, trace_instance: BaseTraceInstance): with flask_app.app_context(): while self.is_running: try: task = self.queue.get(timeout=60) - task.execute() + task.execute(trace_instance) self.queue.task_done() except queue.Empty: self.stop() - def add_trace_task(self, trace_task): + def add_trace_task(self, trace_task: TraceTask): self.queue.put(trace_task) diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index c8d940098b4104..b976f50eb5535c 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -357,19 +357,16 @@ def _on_retrival_end( db.session.commit() # get tracing instance - tracing_instance = self.application_generate_entity.tracing_instance if self.application_generate_entity else None trace_manager = self.application_generate_entity.trace_manager if self.application_generate_entity else None - if tracing_instance: - trace_manager.add_trace_task( - TraceTask( - tracing_instance, - TraceTaskName.DATASET_RETRIEVAL_TRACE, - message_id=message_id, - documents=documents, - timer=timer - ) + trace_manager.add_trace_task( + TraceTask( + TraceTaskName.DATASET_RETRIEVAL_TRACE, + message_id=message_id, + documents=documents, + timer=timer ) + ) def _on_query(self, query: str, dataset_ids: list[str], app_id: str, user_from: str, user_id: str) -> None: """ diff --git a/api/core/tools/tool_engine.py b/api/core/tools/tool_engine.py index e9ac72839171cd..34aca8117d97f8 100644 --- a/api/core/tools/tool_engine.py +++ b/api/core/tools/tool_engine.py @@ -9,7 +9,6 @@ from core.callback_handler.agent_tool_callback_handler import DifyAgentCallbackHandler from core.callback_handler.workflow_tool_callback_handler import DifyWorkflowCallbackHandler from core.file.file_obj import FileTransferMethod -from core.ops.base_trace_instance import BaseTraceInstance from core.ops.trace_queue_manager import TraceQueueManager from core.tools.entities.tool_entities import ToolInvokeMessage, ToolInvokeMessageBinary, ToolInvokeMeta, ToolParameter from core.tools.errors import ( @@ -37,7 +36,6 @@ def agent_invoke( tool: Tool, tool_parameters: Union[str, dict], user_id: str, tenant_id: str, message: Message, invoke_from: InvokeFrom, agent_tool_callback: DifyAgentCallbackHandler, - tracing_instance: Optional[BaseTraceInstance] = None, trace_manager: Optional[TraceQueueManager] = None ) -> tuple[str, list[tuple[MessageFile, bool]], ToolInvokeMeta]: """ @@ -91,7 +89,6 @@ def agent_invoke( tool_inputs=tool_parameters, tool_outputs=plain_text, message_id=message.id, - tracing_instance=tracing_instance, trace_manager=trace_manager ) diff --git a/api/services/message_service.py b/api/services/message_service.py index aba334d5c25840..15329bbc30de8d 100644 --- a/api/services/message_service.py +++ b/api/services/message_service.py @@ -21,7 +21,6 @@ MessageNotExistsError, SuggestedQuestionsAfterAnswerDisabledError, ) -from services.ops_trace.ops_trace_service import OpsTraceService from services.workflow_service import WorkflowService @@ -272,20 +271,14 @@ def get_suggested_questions_after_answer(cls, app_model: App, user: Optional[Uni ) # get tracing instance - tracing_instance = OpsTraceService.get_ops_trace_instance( - message_id=message_id - ) - - if tracing_instance: - trace_manager = TraceQueueManager() - trace_manager.add_trace_task( - TraceTask( - tracing_instance, - TraceTaskName.SUGGESTED_QUESTION_TRACE, - message_id=message_id, - suggested_question=questions, - timer=timer - ) + trace_manager = TraceQueueManager(message_id=message_id) + trace_manager.add_trace_task( + TraceTask( + TraceTaskName.SUGGESTED_QUESTION_TRACE, + message_id=message_id, + suggested_question=questions, + timer=timer ) + ) return questions From afbd77898360f98cbdc365eaadd253f57924d9b1 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 22:58:45 +0800 Subject: [PATCH 38/65] chore: get rid of useless stuff --- api/core/agent/cot_agent_runner.py | 2 -- api/core/app/apps/agent_chat/app_generator.py | 1 - api/core/app/apps/agent_chat/app_runner.py | 1 - api/core/app/apps/chat/app_generator.py | 1 - api/core/app/apps/completion/app_generator.py | 6 ------ api/core/app/task_pipeline/workflow_cycle_manage.py | 1 - 6 files changed, 12 deletions(-) diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py index 54a90b098ae276..86577c0167f44c 100644 --- a/api/core/agent/cot_agent_runner.py +++ b/api/core/agent/cot_agent_runner.py @@ -44,8 +44,6 @@ def run( self._repack_app_generate_entity(app_generate_entity) self._init_react_state(query) - # get tracing instance - tracing_instance = app_generate_entity.tracing_instance trace_manager = app_generate_entity.trace_manager # check model mode diff --git a/api/core/app/apps/agent_chat/app_generator.py b/api/core/app/apps/agent_chat/app_generator.py index 6335040c01992c..e8f0de846236bd 100644 --- a/api/core/app/apps/agent_chat/app_generator.py +++ b/api/core/app/apps/agent_chat/app_generator.py @@ -147,7 +147,6 @@ def generate(self, app_model: App, 'queue_manager': queue_manager, 'conversation_id': conversation.id, 'message_id': message.id, - 'tracing_instance': tracing_instance, }) worker_thread.start() diff --git a/api/core/app/apps/agent_chat/app_runner.py b/api/core/app/apps/agent_chat/app_runner.py index 1444a71257e720..6aa615a48de887 100644 --- a/api/core/app/apps/agent_chat/app_runner.py +++ b/api/core/app/apps/agent_chat/app_runner.py @@ -41,7 +41,6 @@ def run( :param queue_manager: application queue manager :param conversation: conversation :param message: message - :param tracing_instance: tracing instance :return: """ app_config = application_generate_entity.app_config diff --git a/api/core/app/apps/chat/app_generator.py b/api/core/app/apps/chat/app_generator.py index 08db97ff6c6e9e..1cb2f4d9a36c07 100644 --- a/api/core/app/apps/chat/app_generator.py +++ b/api/core/app/apps/chat/app_generator.py @@ -43,7 +43,6 @@ def generate( :param args: request args :param invoke_from: invoke from source :param stream: is stream - :param tracing_instance: tracing instance """ if not args.get('query'): raise ValueError('query is required') diff --git a/api/core/app/apps/completion/app_generator.py b/api/core/app/apps/completion/app_generator.py index 45f16ee2b7daef..466a66eac90add 100644 --- a/api/core/app/apps/completion/app_generator.py +++ b/api/core/app/apps/completion/app_generator.py @@ -25,7 +25,6 @@ from models.model import App, EndUser, Message from services.errors.app import MoreLikeThisDisabledError from services.errors.message import MessageNotExistsError -from services.ops_trace.ops_trace_service import OpsTraceService logger = logging.getLogger(__name__) @@ -278,11 +277,6 @@ def generate_more_like_this(self, app_model: App, message ) = self._init_generate_records(application_generate_entity) - # get tracing instance - tracing_instance = OpsTraceService.get_ops_trace_instance( - app_id=app_model.id - ) - # init queue manager queue_manager = MessageBasedAppQueueManager( task_id=application_generate_entity.task_id, diff --git a/api/core/app/task_pipeline/workflow_cycle_manage.py b/api/core/app/task_pipeline/workflow_cycle_manage.py index 49f407c11ed791..7a5e5a01ae10e5 100644 --- a/api/core/app/task_pipeline/workflow_cycle_manage.py +++ b/api/core/app/task_pipeline/workflow_cycle_manage.py @@ -112,7 +112,6 @@ def _workflow_run_success( :param total_steps: total steps :param outputs: outputs :param conversation_id: conversation id - :param tracing_instance: tracing instance :return: """ workflow_run.status = WorkflowRunStatus.SUCCEEDED.value From e6294b1853d0d0b31bee4d81741bf9c5380ec145 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 23:02:41 +0800 Subject: [PATCH 39/65] chore: change ops_trace_service location --- api/controllers/console/app/app.py | 2 +- api/controllers/console/app/ops_trace.py | 2 +- api/{services/ops_trace => core/ops}/ops_trace_service.py | 0 api/core/ops/trace_queue_manager.py | 2 +- api/services/ops_trace/__init__.py | 0 5 files changed, 3 insertions(+), 3 deletions(-) rename api/{services/ops_trace => core/ops}/ops_trace_service.py (100%) delete mode 100644 api/services/ops_trace/__init__.py diff --git a/api/controllers/console/app/app.py b/api/controllers/console/app/app.py index 2f49222450490f..bc00175939a540 100644 --- a/api/controllers/console/app/app.py +++ b/api/controllers/console/app/app.py @@ -8,6 +8,7 @@ from controllers.console.app.wraps import get_app_model from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check +from core.ops.ops_trace_service import OpsTraceService from fields.app_fields import ( app_detail_fields, app_detail_fields_with_site, @@ -15,7 +16,6 @@ ) from libs.login import login_required from services.app_service import AppService -from services.ops_trace.ops_trace_service import OpsTraceService ALLOW_CREATE_APP_MODES = ['chat', 'agent-chat', 'advanced-chat', 'workflow', 'completion'] diff --git a/api/controllers/console/app/ops_trace.py b/api/controllers/console/app/ops_trace.py index 2a0681a9e60c26..eb5266d7dee69f 100644 --- a/api/controllers/console/app/ops_trace.py +++ b/api/controllers/console/app/ops_trace.py @@ -4,8 +4,8 @@ from controllers.console.app.error import TracingConfigCheckError, TracingConfigIsExist, TracingConfigNotExist from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required +from core.ops.ops_trace_service import OpsTraceService from libs.login import login_required -from services.ops_trace.ops_trace_service import OpsTraceService class TraceAppConfigApi(Resource): diff --git a/api/services/ops_trace/ops_trace_service.py b/api/core/ops/ops_trace_service.py similarity index 100% rename from api/services/ops_trace/ops_trace_service.py rename to api/core/ops/ops_trace_service.py diff --git a/api/core/ops/trace_queue_manager.py b/api/core/ops/trace_queue_manager.py index 9749d5798e1d47..a3e55bf534155f 100644 --- a/api/core/ops/trace_queue_manager.py +++ b/api/core/ops/trace_queue_manager.py @@ -18,11 +18,11 @@ ToolTraceInfo, WorkflowTraceInfo, ) +from core.ops.ops_trace_service import OpsTraceService from core.ops.utils import get_message_data from extensions.ext_database import db from models.model import Conversation, MessageAgentThought, MessageFile from models.workflow import WorkflowRun -from services.ops_trace.ops_trace_service import OpsTraceService class TraceTaskName(str, Enum): diff --git a/api/services/ops_trace/__init__.py b/api/services/ops_trace/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 From 7701aafea711d953722aba134ba4bfd66485709c Mon Sep 17 00:00:00 2001 From: takatost Date: Mon, 24 Jun 2024 23:39:01 +0800 Subject: [PATCH 40/65] update WorkflowNodeExecution init created_at --- .../task_pipeline/workflow_cycle_manage.py | 3 +- .../workflow_iteration_cycle_manage.py | 4 ++- ...workflow_node_executions_time_precision.py | 33 ------------------- api/models/workflow.py | 2 +- 4 files changed, 6 insertions(+), 36 deletions(-) delete mode 100644 api/migrations/versions/fef88df743eb.update_workflow_node_executions_time_precision.py diff --git a/api/core/app/task_pipeline/workflow_cycle_manage.py b/api/core/app/task_pipeline/workflow_cycle_manage.py index 7a5e5a01ae10e5..60169cb4367e0f 100644 --- a/api/core/app/task_pipeline/workflow_cycle_manage.py +++ b/api/core/app/task_pipeline/workflow_cycle_manage.py @@ -206,7 +206,8 @@ def _init_node_execution_from_workflow_run(self, workflow_run: WorkflowRun, title=node_title, status=WorkflowNodeExecutionStatus.RUNNING.value, created_by_role=workflow_run.created_by_role, - created_by=workflow_run.created_by + created_by=workflow_run.created_by, + created_at=datetime.now(timezone.utc).replace(tzinfo=None) ) db.session.add(workflow_node_execution) diff --git a/api/core/app/task_pipeline/workflow_iteration_cycle_manage.py b/api/core/app/task_pipeline/workflow_iteration_cycle_manage.py index 69af81d02691f8..aff187071417c7 100644 --- a/api/core/app/task_pipeline/workflow_iteration_cycle_manage.py +++ b/api/core/app/task_pipeline/workflow_iteration_cycle_manage.py @@ -1,6 +1,7 @@ import json import time from collections.abc import Generator +from datetime import datetime, timezone from typing import Optional, Union from core.app.entities.queue_entities import ( @@ -131,7 +132,8 @@ def _init_iteration_execution_from_workflow_run(self, 'started_run_index': node_run_index + 1, 'current_index': 0, 'steps_boundary': [], - }) + }), + created_at=datetime.now(timezone.utc).replace(tzinfo=None) ) db.session.add(workflow_node_execution) diff --git a/api/migrations/versions/fef88df743eb.update_workflow_node_executions_time_precision.py b/api/migrations/versions/fef88df743eb.update_workflow_node_executions_time_precision.py deleted file mode 100644 index 40f52437211fb3..00000000000000 --- a/api/migrations/versions/fef88df743eb.update_workflow_node_executions_time_precision.py +++ /dev/null @@ -1,33 +0,0 @@ -"""add-dataset-retrival-model - -Revision ID: fca025d3b60f -Revises: b3a09c049e8e -Create Date: 2023-11-03 13:08:23.246396 - -""" -import sqlalchemy as sa -from alembic import op - -# revision identifiers, used by Alembic. -revision = 'fef88df743eb' -down_revision = '2a3aebbbf4bb' -branch_labels = None -depends_on = None - - -def upgrade(): - # created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) - # created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(3)')) - # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('workflow_node_executions') as batch_op: - batch_op.alter_column('created_at', server_default=None) - batch_op.alter_column('created_at', type_=sa.DateTime(), nullable=False, server_default=sa.text('CURRENT_TIMESTAMP(3)')) - # ### end Alembic commands ### - - -def downgrade(): - # ### commands auto generated by Alembic - please adjust! ### - with op.batch_alter_table('workflow_node_executions') as batch_op: - batch_op.alter_column('created_at', server_default=None) - batch_op.alter_column('created_at', type_=sa.DateTime(), nullable=False, server_default=sa.text('CURRENT_TIMESTAMP(0)')) - # ### end Alembic commands ### diff --git a/api/models/workflow.py b/api/models/workflow.py index 85714c6fb1e87a..d9bc7848787925 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -443,7 +443,7 @@ class WorkflowNodeExecution(db.Model): error = db.Column(db.Text) elapsed_time = db.Column(db.Float, nullable=False, server_default=db.text('0')) execution_metadata = db.Column(db.Text) - created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(3)')) + created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) created_by_role = db.Column(db.String(255), nullable=False) created_by = db.Column(StringUUID, nullable=False) finished_at = db.Column(db.DateTime) From bb2ad524d673bfc373640095b03756f8066553e1 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Mon, 24 Jun 2024 23:40:38 +0800 Subject: [PATCH 41/65] feat: update poetry.lock --- api/poetry.lock | 45 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/api/poetry.lock b/api/poetry.lock index f96aa5db0078c1..d4060c0977c0d7 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -5117,6 +5117,49 @@ files = [ {file = "opentelemetry_util_http-0.46b0.tar.gz", hash = "sha256:03b6e222642f9c7eae58d9132343e045b50aca9761fcb53709bd2b663571fdf6"}, ] +[[package]] +name = "oracledb" +version = "2.2.1" +description = "Python interface to Oracle Database" +optional = false +python-versions = ">=3.7" +files = [ + {file = "oracledb-2.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3dacef7c4dd3fca94728f05336076e063450bb57ea569e8dd67fae960aaf537e"}, + {file = "oracledb-2.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd8fdc93a65ae2e1c934a0e3e64cb01997ba004c48a986a37583f670dd344802"}, + {file = "oracledb-2.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:531600569febef29806f058d0f0900127356caccba47785d7ec0fca4714af132"}, + {file = "oracledb-2.2.1-cp310-cp310-win32.whl", hash = "sha256:9bbd2c33a97a91d92178d6c4ffa8676b0da80b9fd1329a5e6a09e01b8b2472b5"}, + {file = "oracledb-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:708edcaddfefa1f58a75f72df2ea0d39980ae126db85ea59a4c83eab40b5f61e"}, + {file = "oracledb-2.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fb6d9a4d7400398b22edb9431334f9add884dec9877fd9c4ae531e1ccc6ee1fd"}, + {file = "oracledb-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07757c240afbb4f28112a6affc2c5e4e34b8a92e5bb9af81a40fba398da2b028"}, + {file = "oracledb-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63daec72f853c47179e98493e9b732909d96d495bdceb521c5973a3940d28142"}, + {file = "oracledb-2.2.1-cp311-cp311-win32.whl", hash = "sha256:fec5318d1e0ada7e4674574cb6c8d1665398e8b9c02982279107212f05df1660"}, + {file = "oracledb-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:5134dccb5a11bc755abf02fd49be6dc8141dfcae4b650b55d40509323d00b5c2"}, + {file = "oracledb-2.2.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ac5716bc9a48247fdf563f5f4ec097f5c9f074a60fd130cdfe16699208ca29b5"}, + {file = "oracledb-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c150bddb882b7c73fb462aa2d698744da76c363e404570ed11d05b65811d96c3"}, + {file = "oracledb-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193e1888411bc21187ade4b16b76820bd1e8f216e25602f6cd0a97d45723c1dc"}, + {file = "oracledb-2.2.1-cp312-cp312-win32.whl", hash = "sha256:44a960f8bbb0711af222e0a9690e037b6a2a382e0559ae8eeb9cfafe26c7a3bc"}, + {file = "oracledb-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:470136add32f0d0084225c793f12a52b61b52c3dc00c9cd388ec6a3db3a7643e"}, + {file = "oracledb-2.2.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:506f0027a2c4b6e33b8aabaebd00e4e31cc85134aa82fd855f4817917cfc9d5e"}, + {file = "oracledb-2.2.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5b8b46e6579eaca3b1436fa57bd666ad041d7f4dd3f9237f21d132cc8b52c04"}, + {file = "oracledb-2.2.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a47019561c5cd76d1f19b3a528a98285dca9d915dd8559555f3074424ee9438"}, + {file = "oracledb-2.2.1-cp37-cp37m-win32.whl", hash = "sha256:4b433ea6465de03315bf7c121ad9272b4eef0ecaf235d1743b06557ee587bf6e"}, + {file = "oracledb-2.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6af95303446966c808f3a6c1c33cb0343e9bf8ec57841cc804de0eb1bfa337b5"}, + {file = "oracledb-2.2.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:7df0bebc28488655fbf64b9222d9a14e5ecd13254b426ef75da7adc80cbc18d9"}, + {file = "oracledb-2.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37564661ba93f5714969400fc8a57552e5ca4244d8ecc7044d29b4af4cf9a660"}, + {file = "oracledb-2.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9077cbbe7a2bad13e20af4276a1ef782029fc5601e9470b4b60f4bbb4144655b"}, + {file = "oracledb-2.2.1-cp38-cp38-win32.whl", hash = "sha256:406c1bacf8a12e993ffe148797a0eb98e62deac073195d5cfa076e78eea85c64"}, + {file = "oracledb-2.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:c1894be5800049c64cdba63f19b94bcb94c42e70f8a53d1dd2dfaa2882fa2096"}, + {file = "oracledb-2.2.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:78e64fa607b28f4de6ff4c6177ef10b8beae0b7fd43a76e78b2215defc1b73c6"}, + {file = "oracledb-2.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7d4999820f23bb5b28097885c8d18b6d6dce47a53aa59be66bf1c865c872b17"}, + {file = "oracledb-2.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0048148630b60fe42e598591be152bd863ef339dff1c3785b121313b94856223"}, + {file = "oracledb-2.2.1-cp39-cp39-win32.whl", hash = "sha256:49a16ccc64c52a83c9db40095d01b0f2ee7f8a20cb105c82ffc2f57151553cfd"}, + {file = "oracledb-2.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:9e76d46d8260e33442cac259278885adf90080f7d2117eaeb4b230504827860b"}, + {file = "oracledb-2.2.1.tar.gz", hash = "sha256:8464c6f0295f3318daf6c2c72c83c2dcbc37e13f8fd44e3e39ff8665f442d6b6"}, +] + +[package.dependencies] +cryptography = ">=3.2.1" + [[package]] name = "orjson" version = "3.10.4" @@ -9038,4 +9081,4 @@ testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "a767085a82605b4db8ba860c355eca48a8793d4a012f84631cf822fa146c2d55" +content-hash = "59a9d41baa5454de6c9032c8d9ca81d79e5a7137c654b8765034aebb8ec29793" From 3d1b27aada563b2a9e7ee9035f3deae2c86b3242 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 02:26:11 +0800 Subject: [PATCH 42/65] feat: add provider_config_map --- api/core/ops/base_trace_instance.py | 5 +- api/core/ops/entities/config_entity.py | 35 +++- api/core/ops/langfuse_trace/langfuse_trace.py | 53 +----- .../ops/langsmith_trace/langsmith_trace.py | 34 +--- api/core/ops/ops_trace_service.py | 157 ++++++++++-------- 5 files changed, 133 insertions(+), 151 deletions(-) diff --git a/api/core/ops/base_trace_instance.py b/api/core/ops/base_trace_instance.py index 7d7eab5e3051aa..c7af8e296339c8 100644 --- a/api/core/ops/base_trace_instance.py +++ b/api/core/ops/base_trace_instance.py @@ -1,5 +1,6 @@ from abc import ABC, abstractmethod +from core.ops.entities.config_entity import BaseTracingConfig from core.ops.entities.trace_entity import BaseTraceInfo @@ -9,12 +10,12 @@ class BaseTraceInstance(ABC): """ @abstractmethod - def __init__(self): + def __init__(self, trace_config: BaseTracingConfig): """ Abstract initializer for the trace instance. Distribute trace tasks by matching entities """ - ... + self.trace_config = trace_config @abstractmethod def trace(self, trace_info: BaseTraceInfo): diff --git a/api/core/ops/entities/config_entity.py b/api/core/ops/entities/config_entity.py index 51dca08137e773..566bbf51ac945c 100644 --- a/api/core/ops/entities/config_entity.py +++ b/api/core/ops/entities/config_entity.py @@ -1,6 +1,6 @@ from enum import Enum -from pydantic import BaseModel +from pydantic import BaseModel, ValidationInfo, field_validator class TracingProviderEnum(Enum): @@ -8,19 +8,44 @@ class TracingProviderEnum(Enum): LANGSMITH = 'langsmith' -class LangfuseConfig(BaseModel): +class BaseTracingConfig(BaseModel): + """ + Base model class for tracing + """ + ... + + +class LangfuseConfig(BaseTracingConfig): """ Model class for Langfuse tracing config. """ public_key: str secret_key: str - host: str + host: str = 'https://api.langfuse.com' + @field_validator("host") + def set_value(cls, v, info: ValidationInfo): + if v is None or v == "": + v = 'https://api.langfuse.com' + if not v.startswith('https://'): + raise ValueError('host must start with https://') -class LangSmithConfig(BaseModel): + return v + + +class LangSmithConfig(BaseTracingConfig): """ Model class for Langsmith tracing config. """ api_key: str project: str - endpoint: str + endpoint: str = 'https://api.smith.langchain.com' + + @field_validator("endpoint") + def set_value(cls, v, info: ValidationInfo): + if v is None or v == "": + v = 'https://api.smith.langchain.com' + if not v.startswith('https://'): + raise ValueError('endpoint must start with https://') + + return v diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py index 2a3526b3ee5ed7..63416a11759a23 100644 --- a/api/core/ops/langfuse_trace/langfuse_trace.py +++ b/api/core/ops/langfuse_trace/langfuse_trace.py @@ -6,7 +6,6 @@ from langfuse import Langfuse -from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token from core.ops.base_trace_instance import BaseTraceInstance from core.ops.entities.config_entity import LangfuseConfig from core.ops.entities.trace_entity import ( @@ -38,15 +37,13 @@ class LangFuseDataTrace(BaseTraceInstance): def __init__( self, - langfuse_client_public_key: str = None, - langfuse_client_secret_key: str = None, - langfuse_client_host: str = "https://cloud.langfuse.com", + langfuse_config: LangfuseConfig, ): - super().__init__() + super().__init__(langfuse_config) self.langfuse_client = Langfuse( - public_key=langfuse_client_public_key, - secret_key=langfuse_client_secret_key, - host=langfuse_client_host, + public_key=langfuse_config.public_key, + secret_key=langfuse_config.secret_key, + host=langfuse_config.host, ) self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") @@ -343,42 +340,4 @@ def api_check(self): return self.langfuse_client.auth_check() except Exception as e: logger.debug(f"LangFuse API check failed: {str(e)}") - return False - - @classmethod - def obfuscate_config(cls, config: LangfuseConfig): - public_key = obfuscated_token(config.public_key) - secret_key = obfuscated_token(config.secret_key) - return LangfuseConfig(public_key=public_key, secret_key=secret_key, host=config.host) - - @classmethod - def encrypt_config(cls, tenant_id, config: LangfuseConfig, current_trace_config: dict = None): - if "*" in config.public_key and "*" in config.secret_key: - return LangfuseConfig( - public_key=current_trace_config.get("public_key"), - secret_key=current_trace_config.get("secret_key"), - host=config.host - ) - if "*" in config.public_key: - decrypt_secret_key = encrypt_token(tenant_id, config.secret_key) - return LangfuseConfig( - public_key=current_trace_config.get("public_key"), - secret_key=decrypt_secret_key, - host=config.host - ) - if "*" in config.secret_key: - decrypt_public_key = encrypt_token(tenant_id, config.public_key) - return LangfuseConfig( - public_key=decrypt_public_key, - secret_key=current_trace_config.get("secret_key"), - host=config.host - ) - decrypt_public_key = encrypt_token(tenant_id, config.public_key) - decrypt_secret_key = encrypt_token(tenant_id, config.secret_key) - return LangfuseConfig(public_key=decrypt_public_key, secret_key=decrypt_secret_key, host=config.host) - - @classmethod - def decrypt_config(cls, tenant_id, config: LangfuseConfig): - decrypt_public_key = decrypt_token(tenant_id, config.public_key) - decrypt_secret_key = decrypt_token(tenant_id, config.secret_key) - return LangfuseConfig(public_key=decrypt_public_key, secret_key=decrypt_secret_key, host=config.host) \ No newline at end of file + raise ValueError(f"LangFuse API check failed: {str(e)}") diff --git a/api/core/ops/langsmith_trace/langsmith_trace.py b/api/core/ops/langsmith_trace/langsmith_trace.py index 259077ee5c9f21..98cd9f47ec3033 100644 --- a/api/core/ops/langsmith_trace/langsmith_trace.py +++ b/api/core/ops/langsmith_trace/langsmith_trace.py @@ -5,7 +5,6 @@ from langsmith import Client -from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token from core.ops.base_trace_instance import BaseTraceInstance from core.ops.entities.config_entity import LangSmithConfig from core.ops.entities.trace_entity import ( @@ -34,16 +33,14 @@ class LangSmithDataTrace(BaseTraceInstance): def __init__( self, - langsmith_key: str = None, - project_name: str = None, - endpoint: str = "https://api.smith.langchain.com" + langsmith_config: LangSmithConfig, ): - super().__init__() - self.langsmith_key = langsmith_key - self.project_name = project_name + super().__init__(langsmith_config) + self.langsmith_key = langsmith_config.api_key + self.project_name = langsmith_config.project self.project_id = None self.langsmith_client = Client( - api_key=langsmith_key, api_url=endpoint + api_key=langsmith_config.api_key, api_url=langsmith_config.endpoint ) self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") @@ -328,23 +325,4 @@ def api_check(self): return True except Exception as e: logger.debug(f"LangSmith API check failed: {str(e)}") - return False - - @classmethod - def obfuscate_config(cls, config: LangSmithConfig): - api_key = obfuscated_token(config.api_key) - return LangSmithConfig(api_key=api_key, project=config.project, endpoint=config.endpoint) - - @classmethod - def encrypt_config(cls, tenant_id, config: LangSmithConfig, current_trace_config=None): - if "*" in config.api_key: - return LangSmithConfig( - api_key=current_trace_config.get("api_key"), project=config.project, endpoint=config.endpoint - ) - api_key = encrypt_token(tenant_id, config.api_key) - return LangSmithConfig(api_key=api_key, project=config.project, endpoint=config.endpoint) - - @classmethod - def decrypt_config(cls, tenant_id, config: LangSmithConfig): - api_key = decrypt_token(tenant_id, config.api_key) - return LangSmithConfig(api_key=api_key, project=config.project, endpoint=config.endpoint) + raise ValueError(f"LangSmith API check failed: {str(e)}") diff --git a/api/core/ops/ops_trace_service.py b/api/core/ops/ops_trace_service.py index 0db2bae4e26529..ea7474d47a28d7 100644 --- a/api/core/ops/ops_trace_service.py +++ b/api/core/ops/ops_trace_service.py @@ -2,12 +2,32 @@ from typing import Union from uuid import UUID -from core.ops.entities.config_entity import LangfuseConfig, LangSmithConfig, TracingProviderEnum +from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token +from core.ops.entities.config_entity import ( + LangfuseConfig, + LangSmithConfig, + TracingProviderEnum, +) from core.ops.langfuse_trace.langfuse_trace import LangFuseDataTrace from core.ops.langsmith_trace.langsmith_trace import LangSmithDataTrace from extensions.ext_database import db from models.model import App, AppModelConfig, Conversation, Message, TraceAppConfig +provider_config_map = { + TracingProviderEnum.LANGFUSE.value: { + 'config_class': LangfuseConfig, + 'secret_keys': ('public_key', 'secret_key'), + 'other_keys': ('host',), + 'trace_instance': LangFuseDataTrace + }, + TracingProviderEnum.LANGSMITH.value: { + 'config_class': LangSmithConfig, + 'secret_keys': ('api_key',), + 'other_keys': ('project', 'endpoint'), + 'trace_instance': LangSmithDataTrace + } +} + class OpsTraceService: @classmethod @@ -47,6 +67,13 @@ def create_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c TracingProviderEnum.LANGSMITH.value] and tracing_provider != "": return {"error": f"Invalid tracing provider: {tracing_provider}"} + config_class, other_keys = provider_config_map[tracing_provider]['config_class'], \ + provider_config_map[tracing_provider]['other_keys'] + default_config_instance = config_class(**tracing_config) + for key in other_keys: + if key in tracing_config and tracing_config[key] == "": + tracing_config[key] = getattr(default_config_instance, key, None) + # api check if not cls.check_trace_config_is_effective(tracing_config, tracing_provider): return {"error": "Invalid Credentials"} @@ -84,10 +111,6 @@ def update_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, TracingProviderEnum.LANGSMITH.value]: raise ValueError(f"Invalid tracing provider: {tracing_provider}") - # api check - # if not cls.check_trace_config_is_effective(tracing_config, tracing_provider): - # raise ValueError("Invalid Credentials") - # check if trace config already exists current_trace_config = db.session.query(TraceAppConfig).filter( TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider @@ -101,6 +124,13 @@ def update_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c tracing_config = cls.encrypt_tracing_config( tenant_id, tracing_provider, tracing_config, current_trace_config.tracing_config ) + + # api check + # decrypt_token + decrypt_tracing_config = cls.decrypt_tracing_config(tenant_id, tracing_provider, tracing_config) + if not cls.check_trace_config_is_effective(decrypt_tracing_config, tracing_provider): + raise ValueError("Invalid Credentials") + current_trace_config.tracing_config = tracing_config db.session.commit() @@ -131,21 +161,34 @@ def encrypt_tracing_config( cls, tenant_id: str, tracing_provider: str, tracing_config: dict, current_trace_config=None ): """ - Encrypt tracing config + Encrypt tracing config. :param tenant_id: tenant id :param tracing_provider: tracing provider - :param tracing_config: tracing config - :param current_trace_config: current trace config - :return: + :param tracing_config: tracing config dictionary to be encrypted + :param current_trace_config: current tracing configuration for keeping existing values + :return: encrypted tracing configuration """ - if tracing_provider == TracingProviderEnum.LANGFUSE.value: - tracing_config = LangfuseConfig(**tracing_config) - tracing_config = LangFuseDataTrace.encrypt_config(tenant_id, tracing_config, current_trace_config) - elif tracing_provider == TracingProviderEnum.LANGSMITH.value: - tracing_config = LangSmithConfig(**tracing_config) - tracing_config = LangSmithDataTrace.encrypt_config(tenant_id, tracing_config, current_trace_config) - - return tracing_config.model_dump() + # Get the configuration class and the keys that require encryption + config_class, secret_keys, other_keys = provider_config_map[tracing_provider]['config_class'], \ + provider_config_map[tracing_provider]['secret_keys'], provider_config_map[tracing_provider]['other_keys'] + + new_config = {} + # Encrypt necessary keys + for key in secret_keys: + if key in tracing_config: + if '*' in tracing_config[key]: + # If the key contains '*', retain the original value from the current config + new_config[key] = current_trace_config.get(key, tracing_config[key]) + else: + # Otherwise, encrypt the key + new_config[key] = encrypt_token(tenant_id, tracing_config[key]) + + for key in other_keys: + new_config[key] = tracing_config.get(key, "") + + # Create a new instance of the config class with the new configuration + encrypted_config = config_class(**new_config) + return encrypted_config.model_dump() @classmethod def decrypt_tracing_config(cls, tenant_id: str, tracing_provider: str, tracing_config: dict): @@ -156,14 +199,17 @@ def decrypt_tracing_config(cls, tenant_id: str, tracing_provider: str, tracing_c :param tracing_config: tracing config :return: """ - if tracing_provider == TracingProviderEnum.LANGFUSE.value: - tracing_config = LangfuseConfig(**tracing_config) - tracing_config = LangFuseDataTrace.decrypt_config(tenant_id, tracing_config) - elif tracing_provider == TracingProviderEnum.LANGSMITH.value: - tracing_config = LangSmithConfig(**tracing_config) - tracing_config = LangSmithDataTrace.decrypt_config(tenant_id, tracing_config) + config_class, secret_keys, other_keys = provider_config_map[tracing_provider]['config_class'], \ + provider_config_map[tracing_provider]['secret_keys'], provider_config_map[tracing_provider]['other_keys'] + new_config = {} + for key in secret_keys: + if key in tracing_config: + new_config[key] = decrypt_token(tenant_id, tracing_config[key]) - return tracing_config.model_dump() + for key in other_keys: + new_config[key] = tracing_config.get(key, "") + + return config_class(**new_config).model_dump() @classmethod def obfuscated_decrypt_token(cls, tracing_provider: str, decrypt_tracing_config:dict): @@ -173,14 +219,17 @@ def obfuscated_decrypt_token(cls, tracing_provider: str, decrypt_tracing_config: :param decrypt_tracing_config: tracing config :return: """ - obfuscate_config = None - if tracing_provider == TracingProviderEnum.LANGFUSE.value: - decrypt_tracing_config = LangfuseConfig(**decrypt_tracing_config) - obfuscate_config = LangFuseDataTrace.obfuscate_config(decrypt_tracing_config) - elif tracing_provider == TracingProviderEnum.LANGSMITH.value: - decrypt_tracing_config = LangSmithConfig(**decrypt_tracing_config) - obfuscate_config = LangSmithDataTrace.obfuscate_config(decrypt_tracing_config) - return obfuscate_config.model_dump() + config_class, secret_keys, other_keys = provider_config_map[tracing_provider]['config_class'], \ + provider_config_map[tracing_provider]['secret_keys'], provider_config_map[tracing_provider]['other_keys'] + new_config = {} + for key in secret_keys: + if key in decrypt_tracing_config: + new_config[key] = obfuscated_token(decrypt_tracing_config[key]) + + for key in other_keys: + new_config[key] = decrypt_tracing_config.get(key, "") + + return config_class(**new_config).model_dump() @classmethod def get_decrypted_tracing_config(cls, app_id: str, tracing_provider: str): @@ -246,24 +295,8 @@ def get_ops_trace_instance( # decrypt_token decrypt_trace_config = cls.get_decrypted_tracing_config(app_id, tracing_provider) if app_ops_trace_config.get('enabled'): - if tracing_provider == TracingProviderEnum.LANGFUSE.value: - langfuse_client_public_key = decrypt_trace_config.get('public_key') - langfuse_client_secret_key = decrypt_trace_config.get('secret_key') - langfuse_host = decrypt_trace_config.get('host') - tracing_instance = LangFuseDataTrace( - langfuse_client_public_key, - langfuse_client_secret_key, - langfuse_host, - ) - elif tracing_provider == TracingProviderEnum.LANGSMITH.value: - langsmith_api_key = decrypt_trace_config.get('api_key') - langsmith_project = decrypt_trace_config.get('project') - langsmith_endpoint = decrypt_trace_config.get('endpoint') - tracing_instance = LangSmithDataTrace( - langsmith_api_key, - langsmith_project, - langsmith_endpoint, - ) + config_class = provider_config_map[tracing_provider]['config_class'] + tracing_instance = config_class(**decrypt_trace_config) return tracing_instance @@ -297,8 +330,8 @@ def update_app_tracing_config(cls, app_id: str, enabled: bool, tracing_provider: # auth check if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, TracingProviderEnum.LANGSMITH.value, None, ""]: raise ValueError(f"Invalid tracing provider: {tracing_provider}") - app_config: App = db.session.query(App).filter(App.id == app_id).first() + app_config: App = db.session.query(App).filter(App.id == app_id).first() app_config.tracing = json.dumps( { "enabled": enabled, @@ -331,21 +364,7 @@ def check_trace_config_is_effective(tracing_config: dict, tracing_provider: str) :param tracing_provider: tracing provider :return: """ - if tracing_provider == TracingProviderEnum.LANGFUSE.value: - tracing_config = LangfuseConfig(**tracing_config) - langfuse_trace_instance = LangFuseDataTrace( - tracing_config.public_key, - tracing_config.secret_key, - tracing_config.host, - ) - return langfuse_trace_instance.api_check() - elif tracing_provider == TracingProviderEnum.LANGSMITH.value: - tracing_config = LangSmithConfig(**tracing_config) - langsmith_trace_instance = LangSmithDataTrace( - tracing_config.api_key, - tracing_config.project, - tracing_config.endpoint, - ) - return langsmith_trace_instance.api_check() - else: - raise ValueError(f"Unsupported tracing provider: {tracing_provider}") + config_type, trace_instance = provider_config_map[tracing_provider]['config_class'], \ + provider_config_map[tracing_provider]['trace_instance'] + tracing_config = config_type(**tracing_config) + return trace_instance(tracing_config).api_check() From 7bf8faaab01b4ca29d21221929b05e85aaef90a8 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 12:51:35 +0800 Subject: [PATCH 43/65] fix: trace queue manager error --- api/core/ops/ops_trace_service.py | 6 +++--- api/core/ops/trace_queue_manager.py | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/api/core/ops/ops_trace_service.py b/api/core/ops/ops_trace_service.py index ea7474d47a28d7..85e3f9cf9a3af8 100644 --- a/api/core/ops/ops_trace_service.py +++ b/api/core/ops/ops_trace_service.py @@ -295,9 +295,9 @@ def get_ops_trace_instance( # decrypt_token decrypt_trace_config = cls.get_decrypted_tracing_config(app_id, tracing_provider) if app_ops_trace_config.get('enabled'): - config_class = provider_config_map[tracing_provider]['config_class'] - tracing_instance = config_class(**decrypt_trace_config) - + trace_instance, config_class = provider_config_map[tracing_provider]['trace_instance'], \ + provider_config_map[tracing_provider]['config_class'] + tracing_instance = trace_instance(config_class(**decrypt_trace_config)) return tracing_instance return None diff --git a/api/core/ops/trace_queue_manager.py b/api/core/ops/trace_queue_manager.py index a3e55bf534155f..4337e242c15649 100644 --- a/api/core/ops/trace_queue_manager.py +++ b/api/core/ops/trace_queue_manager.py @@ -4,7 +4,7 @@ import threading from datetime import timedelta from enum import Enum -from typing import Any +from typing import Any, Optional from flask import Flask, current_app @@ -40,10 +40,10 @@ class TraceTask: def __init__( self, trace_type: Any, - message_id: str = None, - workflow_run: WorkflowRun = None, - conversation_id: str = None, - timer: Any = None, + message_id: Optional[str] = None, + workflow_run: Optional[WorkflowRun] = None, + conversation_id: Optional[str] = None, + timer: Optional[Any] = None, **kwargs ): self.trace_type = trace_type From 084112eff6b3a7128dc89981d1b775c0e26b04c0 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 13:13:37 +0800 Subject: [PATCH 44/65] fix: workflow on_tool_end trace_manager error --- api/core/agent/cot_agent_runner.py | 7 ------- .../agent_tool_callback_handler.py | 19 ++++++++++--------- 2 files changed, 10 insertions(+), 16 deletions(-) diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py index 86577c0167f44c..62e44d729d876b 100644 --- a/api/core/agent/cot_agent_runner.py +++ b/api/core/agent/cot_agent_runner.py @@ -395,13 +395,6 @@ def _organize_historic_prompt_messages(self, current_session_messages: list[Prom scratchpads: list[AgentScratchpadUnit] = [] current_scratchpad: AgentScratchpadUnit = None - self.history_prompt_messages = AgentHistoryPromptTransform( - model_config=self.model_config, - prompt_messages=current_session_messages or [], - history_messages=self.history_prompt_messages, - memory=self.memory - ).get_prompt() - for message in self.history_prompt_messages: if isinstance(message, AssistantPromptMessage): if not current_scratchpad: diff --git a/api/core/callback_handler/agent_tool_callback_handler.py b/api/core/callback_handler/agent_tool_callback_handler.py index 3782c34cd58709..383ee92ee83694 100644 --- a/api/core/callback_handler/agent_tool_callback_handler.py +++ b/api/core/callback_handler/agent_tool_callback_handler.py @@ -64,16 +64,17 @@ def on_tool_end( print_text("Outputs: " + str(tool_outputs)[:1000] + "\n", color=self.color) print_text("\n") - trace_manager.add_trace_task( - TraceTask( - TraceTaskName.TOOL_TRACE, - message_id=message_id, - tool_name=tool_name, - tool_inputs=tool_inputs, - tool_outputs=tool_outputs, - timer=timer, + if trace_manager: + trace_manager.add_trace_task( + TraceTask( + TraceTaskName.TOOL_TRACE, + message_id=message_id, + tool_name=tool_name, + tool_inputs=tool_inputs, + tool_outputs=tool_outputs, + timer=timer, + ) ) - ) def on_tool_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any From 202872e1d4799ac4c07ea6e8d68f7070ce9e37d3 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 13:23:50 +0800 Subject: [PATCH 45/65] chore: change the trace structure chore: change the trace structure --- api/controllers/console/app/app.py | 6 +- api/controllers/console/app/ops_trace.py | 10 +- api/core/agent/cot_agent_runner.py | 2 +- .../app/apps/advanced_chat/app_generator.py | 2 +- .../advanced_chat/generate_task_pipeline.py | 2 +- api/core/app/apps/agent_chat/app_generator.py | 2 +- api/core/app/apps/chat/app_generator.py | 2 +- api/core/app/apps/completion/app_generator.py | 2 +- api/core/app/apps/workflow/app_generator.py | 2 +- .../apps/workflow/generate_task_pipeline.py | 2 +- api/core/app/entities/app_invoke_entities.py | 2 +- .../easy_ui_based_generate_task_pipeline.py | 2 +- .../task_pipeline/workflow_cycle_manage.py | 2 +- .../agent_tool_callback_handler.py | 2 +- api/core/llm_generator/llm_generator.py | 2 +- api/core/moderation/input_moderation.py | 2 +- ..._queue_manager.py => ops_trace_manager.py} | 246 +++++++++++- api/core/ops/ops_trace_service.py | 370 ------------------ api/core/rag/retrieval/dataset_retrieval.py | 2 +- api/core/tools/tool_engine.py | 2 +- api/services/message_service.py | 2 +- api/services/ops_service.py | 132 +++++++ 22 files changed, 399 insertions(+), 399 deletions(-) rename api/core/ops/{trace_queue_manager.py => ops_trace_manager.py} (63%) delete mode 100644 api/core/ops/ops_trace_service.py create mode 100644 api/services/ops_service.py diff --git a/api/controllers/console/app/app.py b/api/controllers/console/app/app.py index bc00175939a540..1a38bcba7e48e3 100644 --- a/api/controllers/console/app/app.py +++ b/api/controllers/console/app/app.py @@ -8,7 +8,7 @@ from controllers.console.app.wraps import get_app_model from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check -from core.ops.ops_trace_service import OpsTraceService +from core.ops.ops_trace_manager import OpsTraceManager from fields.app_fields import ( app_detail_fields, app_detail_fields_with_site, @@ -288,7 +288,7 @@ class AppTraceApi(Resource): @account_initialization_required def get(self, app_id): """Get app trace""" - app_trace_config = OpsTraceService.get_app_tracing_config( + app_trace_config = OpsTraceManager.get_app_tracing_config( app_id=app_id ) @@ -306,7 +306,7 @@ def post(self, app_id): parser.add_argument('tracing_provider', type=str, required=True, location='json') args = parser.parse_args() - OpsTraceService.update_app_tracing_config( + OpsTraceManager.update_app_tracing_config( app_id=app_id, enabled=args['enabled'], tracing_provider=args['tracing_provider'], diff --git a/api/controllers/console/app/ops_trace.py b/api/controllers/console/app/ops_trace.py index eb5266d7dee69f..60a9d4163c25e2 100644 --- a/api/controllers/console/app/ops_trace.py +++ b/api/controllers/console/app/ops_trace.py @@ -4,8 +4,8 @@ from controllers.console.app.error import TracingConfigCheckError, TracingConfigIsExist, TracingConfigNotExist from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required -from core.ops.ops_trace_service import OpsTraceService from libs.login import login_required +from services.ops_service import OpsTraceManager class TraceAppConfigApi(Resource): @@ -22,7 +22,7 @@ def get(self, app_id): args = parser.parse_args() try: - trace_config = OpsTraceService.get_tracing_app_config( + trace_config = OpsTraceManager.get_tracing_app_config( app_id=app_id, tracing_provider=args['tracing_provider'] ) if not trace_config: @@ -42,7 +42,7 @@ def post(self, app_id): args = parser.parse_args() try: - result = OpsTraceService.create_tracing_app_config( + result = OpsTraceManager.create_tracing_app_config( app_id=app_id, tracing_provider=args['tracing_provider'], tracing_config=args['tracing_config'] @@ -66,7 +66,7 @@ def patch(self, app_id): args = parser.parse_args() try: - result = OpsTraceService.update_tracing_app_config( + result = OpsTraceManager.update_tracing_app_config( app_id=app_id, tracing_provider=args['tracing_provider'], tracing_config=args['tracing_config'] @@ -87,7 +87,7 @@ def delete(self, app_id): args = parser.parse_args() try: - result = OpsTraceService.delete_tracing_app_config( + result = OpsTraceManager.delete_tracing_app_config( app_id=app_id, tracing_provider=args['tracing_provider'] ) diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py index 62e44d729d876b..3e883a4f7710f9 100644 --- a/api/core/agent/cot_agent_runner.py +++ b/api/core/agent/cot_agent_runner.py @@ -15,7 +15,7 @@ ToolPromptMessage, UserPromptMessage, ) -from core.ops.trace_queue_manager import TraceQueueManager +from core.ops.ops_trace_manager import TraceQueueManager from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform from core.tools.entities.tool_entities import ToolInvokeMeta from core.tools.tool.tool import Tool diff --git a/api/core/app/apps/advanced_chat/app_generator.py b/api/core/app/apps/advanced_chat/app_generator.py index ee9a423bd74708..2fcc3255408ac0 100644 --- a/api/core/app/apps/advanced_chat/app_generator.py +++ b/api/core/app/apps/advanced_chat/app_generator.py @@ -20,7 +20,7 @@ from core.app.entities.task_entities import ChatbotAppBlockingResponse, ChatbotAppStreamResponse from core.file.message_file_parser import MessageFileParser from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError -from core.ops.trace_queue_manager import TraceQueueManager +from core.ops.ops_trace_manager import TraceQueueManager from extensions.ext_database import db from models.account import Account from models.model import App, Conversation, EndUser, Message diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index 8fad5126a42b88..5ca0fe21911c3f 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -42,7 +42,7 @@ from core.file.file_obj import FileVar from core.model_runtime.entities.llm_entities import LLMUsage from core.model_runtime.utils.encoders import jsonable_encoder -from core.ops.trace_queue_manager import TraceQueueManager +from core.ops.ops_trace_manager import TraceQueueManager from core.workflow.entities.node_entities import NodeType, SystemVariable from core.workflow.nodes.answer.answer_node import AnswerNode from core.workflow.nodes.answer.entities import TextGenerateRouteChunk, VarGenerateRouteChunk diff --git a/api/core/app/apps/agent_chat/app_generator.py b/api/core/app/apps/agent_chat/app_generator.py index 39589be1c4adc0..a9beeb3a5cd2c0 100644 --- a/api/core/app/apps/agent_chat/app_generator.py +++ b/api/core/app/apps/agent_chat/app_generator.py @@ -19,7 +19,7 @@ from core.app.entities.app_invoke_entities import AgentChatAppGenerateEntity, InvokeFrom from core.file.message_file_parser import MessageFileParser from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError -from core.ops.trace_queue_manager import TraceQueueManager +from core.ops.ops_trace_manager import TraceQueueManager from extensions.ext_database import db from models.account import Account from models.model import App, EndUser diff --git a/api/core/app/apps/chat/app_generator.py b/api/core/app/apps/chat/app_generator.py index 0a120986cd42b1..94e862cb878a47 100644 --- a/api/core/app/apps/chat/app_generator.py +++ b/api/core/app/apps/chat/app_generator.py @@ -19,7 +19,7 @@ from core.app.entities.app_invoke_entities import ChatAppGenerateEntity, InvokeFrom from core.file.message_file_parser import MessageFileParser from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError -from core.ops.trace_queue_manager import TraceQueueManager +from core.ops.ops_trace_manager import TraceQueueManager from extensions.ext_database import db from models.account import Account from models.model import App, EndUser diff --git a/api/core/app/apps/completion/app_generator.py b/api/core/app/apps/completion/app_generator.py index 466a66eac90add..c4e1caf65a9679 100644 --- a/api/core/app/apps/completion/app_generator.py +++ b/api/core/app/apps/completion/app_generator.py @@ -19,7 +19,7 @@ from core.app.entities.app_invoke_entities import CompletionAppGenerateEntity, InvokeFrom from core.file.message_file_parser import MessageFileParser from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError -from core.ops.trace_queue_manager import TraceQueueManager +from core.ops.ops_trace_manager import TraceQueueManager from extensions.ext_database import db from models.account import Account from models.model import App, EndUser, Message diff --git a/api/core/app/apps/workflow/app_generator.py b/api/core/app/apps/workflow/app_generator.py index c57243d7dcb1b9..3eb0bcf3dafe7b 100644 --- a/api/core/app/apps/workflow/app_generator.py +++ b/api/core/app/apps/workflow/app_generator.py @@ -20,7 +20,7 @@ from core.app.entities.task_entities import WorkflowAppBlockingResponse, WorkflowAppStreamResponse from core.file.message_file_parser import MessageFileParser from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError -from core.ops.trace_queue_manager import TraceQueueManager +from core.ops.ops_trace_manager import TraceQueueManager from extensions.ext_database import db from models.account import Account from models.model import App, EndUser diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index a1160ed4f21234..f4bd396f46b3ae 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -36,7 +36,7 @@ ) from core.app.task_pipeline.based_generate_task_pipeline import BasedGenerateTaskPipeline from core.app.task_pipeline.workflow_cycle_manage import WorkflowCycleManage -from core.ops.trace_queue_manager import TraceQueueManager +from core.ops.ops_trace_manager import TraceQueueManager from core.workflow.entities.node_entities import NodeType, SystemVariable from core.workflow.nodes.end.end_node import EndNode from extensions.ext_database import db diff --git a/api/core/app/entities/app_invoke_entities.py b/api/core/app/entities/app_invoke_entities.py index 5896714c996f0e..1d2ad4a3735063 100644 --- a/api/core/app/entities/app_invoke_entities.py +++ b/api/core/app/entities/app_invoke_entities.py @@ -7,7 +7,7 @@ from core.entities.provider_configuration import ProviderModelBundle from core.file.file_obj import FileVar from core.model_runtime.entities.model_entities import AIModelEntity -from core.ops.trace_queue_manager import TraceQueueManager +from core.ops.ops_trace_manager import TraceQueueManager class InvokeFrom(Enum): diff --git a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py index 65c639bbd7116a..cc7f27c7d74f67 100644 --- a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py +++ b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py @@ -44,7 +44,7 @@ ) from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.model_runtime.utils.encoders import jsonable_encoder -from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName +from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName from core.prompt.utils.prompt_message_util import PromptMessageUtil from core.prompt.utils.prompt_template_parser import PromptTemplateParser from events.message_event import message_was_created diff --git a/api/core/app/task_pipeline/workflow_cycle_manage.py b/api/core/app/task_pipeline/workflow_cycle_manage.py index 60169cb4367e0f..2f937b14420d8e 100644 --- a/api/core/app/task_pipeline/workflow_cycle_manage.py +++ b/api/core/app/task_pipeline/workflow_cycle_manage.py @@ -22,7 +22,7 @@ from core.app.task_pipeline.workflow_iteration_cycle_manage import WorkflowIterationCycleManage from core.file.file_obj import FileVar from core.model_runtime.utils.encoders import jsonable_encoder -from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName +from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName from core.tools.tool_manager import ToolManager from core.workflow.entities.node_entities import NodeRunMetadataKey, NodeType from core.workflow.nodes.tool.entities import ToolNodeData diff --git a/api/core/callback_handler/agent_tool_callback_handler.py b/api/core/callback_handler/agent_tool_callback_handler.py index 383ee92ee83694..f973b7e1cec511 100644 --- a/api/core/callback_handler/agent_tool_callback_handler.py +++ b/api/core/callback_handler/agent_tool_callback_handler.py @@ -3,7 +3,7 @@ from pydantic import BaseModel -from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName +from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName _TEXT_COLOR_MAPPING = { "blue": "36;1", diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 65682064cef518..e705541d412509 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -10,7 +10,7 @@ from core.model_runtime.entities.message_entities import SystemPromptMessage, UserPromptMessage from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError -from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName +from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName from core.ops.utils import measure_time from core.prompt.utils.prompt_template_parser import PromptTemplateParser diff --git a/api/core/moderation/input_moderation.py b/api/core/moderation/input_moderation.py index 85fbfaef068fff..e2fda4ace4aa8f 100644 --- a/api/core/moderation/input_moderation.py +++ b/api/core/moderation/input_moderation.py @@ -4,7 +4,7 @@ from core.app.app_config.entities import AppConfig from core.moderation.base import ModerationAction, ModerationException from core.moderation.factory import ModerationFactory -from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName +from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName from core.ops.utils import measure_time logger = logging.getLogger(__name__) diff --git a/api/core/ops/trace_queue_manager.py b/api/core/ops/ops_trace_manager.py similarity index 63% rename from api/core/ops/trace_queue_manager.py rename to api/core/ops/ops_trace_manager.py index 4337e242c15649..b52da63fb9db15 100644 --- a/api/core/ops/trace_queue_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -4,11 +4,18 @@ import threading from datetime import timedelta from enum import Enum -from typing import Any, Optional +from typing import Any, Optional, Union +from uuid import UUID from flask import Flask, current_app +from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token from core.ops.base_trace_instance import BaseTraceInstance +from core.ops.entities.config_entity import ( + LangfuseConfig, + LangSmithConfig, + TracingProviderEnum, +) from core.ops.entities.trace_entity import ( DatasetRetrievalTraceInfo, GenerateNameTraceInfo, @@ -18,12 +25,243 @@ ToolTraceInfo, WorkflowTraceInfo, ) -from core.ops.ops_trace_service import OpsTraceService +from core.ops.langfuse_trace.langfuse_trace import LangFuseDataTrace +from core.ops.langsmith_trace.langsmith_trace import LangSmithDataTrace from core.ops.utils import get_message_data from extensions.ext_database import db -from models.model import Conversation, MessageAgentThought, MessageFile +from models.model import App, AppModelConfig, Conversation, Message, MessageAgentThought, MessageFile, TraceAppConfig from models.workflow import WorkflowRun +provider_config_map = { + TracingProviderEnum.LANGFUSE.value: { + 'config_class': LangfuseConfig, + 'secret_keys': ('public_key', 'secret_key'), + 'other_keys': ('host',), + 'trace_instance': LangFuseDataTrace + }, + TracingProviderEnum.LANGSMITH.value: { + 'config_class': LangSmithConfig, + 'secret_keys': ('api_key',), + 'other_keys': ('project', 'endpoint'), + 'trace_instance': LangSmithDataTrace + } +} + + +class OpsTraceManager: + @classmethod + def encrypt_tracing_config( + cls, tenant_id: str, tracing_provider: str, tracing_config: dict, current_trace_config=None + ): + """ + Encrypt tracing config. + :param tenant_id: tenant id + :param tracing_provider: tracing provider + :param tracing_config: tracing config dictionary to be encrypted + :param current_trace_config: current tracing configuration for keeping existing values + :return: encrypted tracing configuration + """ + # Get the configuration class and the keys that require encryption + config_class, secret_keys, other_keys = provider_config_map[tracing_provider]['config_class'], \ + provider_config_map[tracing_provider]['secret_keys'], provider_config_map[tracing_provider]['other_keys'] + + new_config = {} + # Encrypt necessary keys + for key in secret_keys: + if key in tracing_config: + if '*' in tracing_config[key]: + # If the key contains '*', retain the original value from the current config + new_config[key] = current_trace_config.get(key, tracing_config[key]) + else: + # Otherwise, encrypt the key + new_config[key] = encrypt_token(tenant_id, tracing_config[key]) + + for key in other_keys: + new_config[key] = tracing_config.get(key, "") + + # Create a new instance of the config class with the new configuration + encrypted_config = config_class(**new_config) + return encrypted_config.model_dump() + + @classmethod + def decrypt_tracing_config(cls, tenant_id: str, tracing_provider: str, tracing_config: dict): + """ + Decrypt tracing config + :param tenant_id: tenant id + :param tracing_provider: tracing provider + :param tracing_config: tracing config + :return: + """ + config_class, secret_keys, other_keys = provider_config_map[tracing_provider]['config_class'], \ + provider_config_map[tracing_provider]['secret_keys'], provider_config_map[tracing_provider]['other_keys'] + new_config = {} + for key in secret_keys: + if key in tracing_config: + new_config[key] = decrypt_token(tenant_id, tracing_config[key]) + + for key in other_keys: + new_config[key] = tracing_config.get(key, "") + + return config_class(**new_config).model_dump() + + @classmethod + def obfuscated_decrypt_token(cls, tracing_provider: str, decrypt_tracing_config:dict): + """ + Decrypt tracing config + :param tracing_provider: tracing provider + :param decrypt_tracing_config: tracing config + :return: + """ + config_class, secret_keys, other_keys = provider_config_map[tracing_provider]['config_class'], \ + provider_config_map[tracing_provider]['secret_keys'], provider_config_map[tracing_provider]['other_keys'] + new_config = {} + for key in secret_keys: + if key in decrypt_tracing_config: + new_config[key] = obfuscated_token(decrypt_tracing_config[key]) + + for key in other_keys: + new_config[key] = decrypt_tracing_config.get(key, "") + + return config_class(**new_config).model_dump() + + @classmethod + def get_decrypted_tracing_config(cls, app_id: str, tracing_provider: str): + """ + Get decrypted tracing config + :param app_id: app id + :param tracing_provider: tracing provider + :return: + """ + trace_config_data: TraceAppConfig = db.session.query(TraceAppConfig).filter( + TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider + ).first() + + if not trace_config_data: + return None + + # decrypt_token + tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id + decrypt_tracing_config = cls.decrypt_tracing_config( + tenant_id, tracing_provider, trace_config_data.tracing_config + ) + + return decrypt_tracing_config + + @classmethod + def get_ops_trace_instance( + cls, + app_id: Union[UUID, str] = None, + message_id: str = None, + conversation_id: str = None + ): + """ + Get ops trace through model config + :param app_id: app_id + :param message_id: message_id + :param conversation_id: conversation_id + :return: + """ + if conversation_id: + conversation_data: Conversation = db.session.query(Conversation).filter( + Conversation.id == conversation_id + ).first() + app_id = conversation_data.app_id + + if message_id: + record: Message = db.session.query(Message).filter(Message.id == message_id).first() + app_id = record.app_id + + if isinstance(app_id, UUID): + app_id = str(app_id) + + tracing_instance = None + app: App = db.session.query(App).filter( + App.id == app_id + ).first() + app_ops_trace_config = json.loads(app.tracing) if app.tracing else None + + if app_ops_trace_config is not None: + tracing_provider = app_ops_trace_config.get('tracing_provider') + else: + return None + + # decrypt_token + decrypt_trace_config = cls.get_decrypted_tracing_config(app_id, tracing_provider) + if app_ops_trace_config.get('enabled'): + trace_instance, config_class = provider_config_map[tracing_provider]['trace_instance'], \ + provider_config_map[tracing_provider]['config_class'] + tracing_instance = trace_instance(config_class(**decrypt_trace_config)) + return tracing_instance + + return None + + @classmethod + def get_app_config_through_message_id(cls, message_id: str): + app_model_config = None + message_data = db.session.query(Message).filter(Message.id == message_id).first() + conversation_id = message_data.conversation_id + conversation_data = db.session.query(Conversation).filter(Conversation.id == conversation_id).first() + + if conversation_data.app_model_config_id: + app_model_config = db.session.query(AppModelConfig).filter( + AppModelConfig.id == conversation_data.app_model_config_id + ).first() + elif conversation_data.app_model_config_id is None and conversation_data.override_model_configs: + app_model_config = conversation_data.override_model_configs + + return app_model_config + + @classmethod + def update_app_tracing_config(cls, app_id: str, enabled: bool, tracing_provider: str): + """ + Update app tracing config + :param app_id: app id + :param enabled: enabled + :param tracing_provider: tracing provider + :return: + """ + # auth check + if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, TracingProviderEnum.LANGSMITH.value, None, ""]: + raise ValueError(f"Invalid tracing provider: {tracing_provider}") + + app_config: App = db.session.query(App).filter(App.id == app_id).first() + app_config.tracing = json.dumps( + { + "enabled": enabled, + "tracing_provider": tracing_provider, + } + ) + db.session.commit() + + @classmethod + def get_app_tracing_config(cls, app_id: str): + """ + Get app tracing config + :param app_id: app id + :return: + """ + app: App = db.session.query(App).filter(App.id == app_id).first() + if not app.tracing: + return { + "enabled": False, + "tracing_provider": None + } + app_trace_config = json.loads(app.tracing) + return app_trace_config + + @staticmethod + def check_trace_config_is_effective(tracing_config: dict, tracing_provider: str): + """ + Check trace config is effective + :param tracing_config: tracing config + :param tracing_provider: tracing provider + :return: + """ + config_type, trace_instance = provider_config_map[tracing_provider]['config_class'], \ + provider_config_map[tracing_provider]['trace_instance'] + tracing_config = config_type(**tracing_config) + return trace_instance(tracing_config).api_check() + class TraceTaskName(str, Enum): CONVERSATION_TRACE = 'conversation_trace' @@ -387,7 +625,7 @@ def generate_name_trace(self, conversation_id, timer, **kwargs): class TraceQueueManager: def __init__(self, app_id=None, conversation_id=None, message_id=None): - tracing_instance = OpsTraceService.get_ops_trace_instance(app_id, conversation_id, message_id) + tracing_instance = OpsTraceManager.get_ops_trace_instance(app_id, conversation_id, message_id) self.queue = queue.Queue() self.is_running = True self.thread = threading.Thread( diff --git a/api/core/ops/ops_trace_service.py b/api/core/ops/ops_trace_service.py deleted file mode 100644 index 85e3f9cf9a3af8..00000000000000 --- a/api/core/ops/ops_trace_service.py +++ /dev/null @@ -1,370 +0,0 @@ -import json -from typing import Union -from uuid import UUID - -from core.helper.encrypter import decrypt_token, encrypt_token, obfuscated_token -from core.ops.entities.config_entity import ( - LangfuseConfig, - LangSmithConfig, - TracingProviderEnum, -) -from core.ops.langfuse_trace.langfuse_trace import LangFuseDataTrace -from core.ops.langsmith_trace.langsmith_trace import LangSmithDataTrace -from extensions.ext_database import db -from models.model import App, AppModelConfig, Conversation, Message, TraceAppConfig - -provider_config_map = { - TracingProviderEnum.LANGFUSE.value: { - 'config_class': LangfuseConfig, - 'secret_keys': ('public_key', 'secret_key'), - 'other_keys': ('host',), - 'trace_instance': LangFuseDataTrace - }, - TracingProviderEnum.LANGSMITH.value: { - 'config_class': LangSmithConfig, - 'secret_keys': ('api_key',), - 'other_keys': ('project', 'endpoint'), - 'trace_instance': LangSmithDataTrace - } -} - - -class OpsTraceService: - @classmethod - def get_tracing_app_config(cls, app_id: str, tracing_provider: str): - """ - Get tracing app config - :param app_id: app id - :param tracing_provider: tracing provider - :return: - """ - trace_config_data: TraceAppConfig = db.session.query(TraceAppConfig).filter( - TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider - ).first() - - if not trace_config_data: - return None - - # decrypt_token and obfuscated_token - tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id - decrypt_tracing_config = cls.decrypt_tracing_config(tenant_id, tracing_provider, trace_config_data.tracing_config) - decrypt_tracing_config = cls.obfuscated_decrypt_token(tracing_provider, decrypt_tracing_config) - - trace_config_data.tracing_config = decrypt_tracing_config - - return trace_config_data.to_dict() - - @classmethod - def create_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_config: dict): - """ - Create tracing app config - :param app_id: app id - :param tracing_provider: tracing provider - :param tracing_config: tracing config - :return: - """ - if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, - TracingProviderEnum.LANGSMITH.value] and tracing_provider != "": - return {"error": f"Invalid tracing provider: {tracing_provider}"} - - config_class, other_keys = provider_config_map[tracing_provider]['config_class'], \ - provider_config_map[tracing_provider]['other_keys'] - default_config_instance = config_class(**tracing_config) - for key in other_keys: - if key in tracing_config and tracing_config[key] == "": - tracing_config[key] = getattr(default_config_instance, key, None) - - # api check - if not cls.check_trace_config_is_effective(tracing_config, tracing_provider): - return {"error": "Invalid Credentials"} - - # check if trace config already exists - trace_config_data: TraceAppConfig = db.session.query(TraceAppConfig).filter( - TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider - ).first() - - if trace_config_data: - return None - - # get tenant id - tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id - tracing_config = cls.encrypt_tracing_config(tenant_id, tracing_provider, tracing_config) - trace_config_data = TraceAppConfig( - app_id=app_id, - tracing_provider=tracing_provider, - tracing_config=tracing_config, - ) - db.session.add(trace_config_data) - db.session.commit() - - return {"result": "success"} - - @classmethod - def update_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_config: dict): - """ - Update tracing app config - :param app_id: app id - :param tracing_provider: tracing provider - :param tracing_config: tracing config - :return: - """ - if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, TracingProviderEnum.LANGSMITH.value]: - raise ValueError(f"Invalid tracing provider: {tracing_provider}") - - # check if trace config already exists - current_trace_config = db.session.query(TraceAppConfig).filter( - TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider - ).first() - - if not current_trace_config: - return None - - # get tenant id - tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id - tracing_config = cls.encrypt_tracing_config( - tenant_id, tracing_provider, tracing_config, current_trace_config.tracing_config - ) - - # api check - # decrypt_token - decrypt_tracing_config = cls.decrypt_tracing_config(tenant_id, tracing_provider, tracing_config) - if not cls.check_trace_config_is_effective(decrypt_tracing_config, tracing_provider): - raise ValueError("Invalid Credentials") - - current_trace_config.tracing_config = tracing_config - db.session.commit() - - return current_trace_config.to_dict() - - @classmethod - def delete_tracing_app_config(cls, app_id: str, tracing_provider: str): - """ - Delete tracing app config - :param app_id: app id - :param tracing_provider: tracing provider - :return: - """ - trace_config = db.session.query(TraceAppConfig).filter( - TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider - ).first() - - if not trace_config: - return None - - db.session.delete(trace_config) - db.session.commit() - - return True - - @classmethod - def encrypt_tracing_config( - cls, tenant_id: str, tracing_provider: str, tracing_config: dict, current_trace_config=None - ): - """ - Encrypt tracing config. - :param tenant_id: tenant id - :param tracing_provider: tracing provider - :param tracing_config: tracing config dictionary to be encrypted - :param current_trace_config: current tracing configuration for keeping existing values - :return: encrypted tracing configuration - """ - # Get the configuration class and the keys that require encryption - config_class, secret_keys, other_keys = provider_config_map[tracing_provider]['config_class'], \ - provider_config_map[tracing_provider]['secret_keys'], provider_config_map[tracing_provider]['other_keys'] - - new_config = {} - # Encrypt necessary keys - for key in secret_keys: - if key in tracing_config: - if '*' in tracing_config[key]: - # If the key contains '*', retain the original value from the current config - new_config[key] = current_trace_config.get(key, tracing_config[key]) - else: - # Otherwise, encrypt the key - new_config[key] = encrypt_token(tenant_id, tracing_config[key]) - - for key in other_keys: - new_config[key] = tracing_config.get(key, "") - - # Create a new instance of the config class with the new configuration - encrypted_config = config_class(**new_config) - return encrypted_config.model_dump() - - @classmethod - def decrypt_tracing_config(cls, tenant_id: str, tracing_provider: str, tracing_config: dict): - """ - Decrypt tracing config - :param tenant_id: tenant id - :param tracing_provider: tracing provider - :param tracing_config: tracing config - :return: - """ - config_class, secret_keys, other_keys = provider_config_map[tracing_provider]['config_class'], \ - provider_config_map[tracing_provider]['secret_keys'], provider_config_map[tracing_provider]['other_keys'] - new_config = {} - for key in secret_keys: - if key in tracing_config: - new_config[key] = decrypt_token(tenant_id, tracing_config[key]) - - for key in other_keys: - new_config[key] = tracing_config.get(key, "") - - return config_class(**new_config).model_dump() - - @classmethod - def obfuscated_decrypt_token(cls, tracing_provider: str, decrypt_tracing_config:dict): - """ - Decrypt tracing config - :param tracing_provider: tracing provider - :param decrypt_tracing_config: tracing config - :return: - """ - config_class, secret_keys, other_keys = provider_config_map[tracing_provider]['config_class'], \ - provider_config_map[tracing_provider]['secret_keys'], provider_config_map[tracing_provider]['other_keys'] - new_config = {} - for key in secret_keys: - if key in decrypt_tracing_config: - new_config[key] = obfuscated_token(decrypt_tracing_config[key]) - - for key in other_keys: - new_config[key] = decrypt_tracing_config.get(key, "") - - return config_class(**new_config).model_dump() - - @classmethod - def get_decrypted_tracing_config(cls, app_id: str, tracing_provider: str): - """ - Get decrypted tracing config - :param app_id: app id - :param tracing_provider: tracing provider - :return: - """ - trace_config_data: TraceAppConfig = db.session.query(TraceAppConfig).filter( - TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider - ).first() - - if not trace_config_data: - return None - - # decrypt_token - tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id - decrypt_tracing_config = cls.decrypt_tracing_config( - tenant_id, tracing_provider, trace_config_data.tracing_config - ) - - return decrypt_tracing_config - - @classmethod - def get_ops_trace_instance( - cls, - app_id: Union[UUID, str] = None, - message_id: str = None, - conversation_id: str = None - ): - """ - Get ops trace through model config - :param app_id: app_id - :param message_id: message_id - :param conversation_id: conversation_id - :return: - """ - if conversation_id: - conversation_data: Conversation = db.session.query(Conversation).filter( - Conversation.id == conversation_id - ).first() - app_id = conversation_data.app_id - - if message_id: - record: Message = db.session.query(Message).filter(Message.id == message_id).first() - app_id = record.app_id - - if isinstance(app_id, UUID): - app_id = str(app_id) - - tracing_instance = None - app: App = db.session.query(App).filter( - App.id == app_id - ).first() - app_ops_trace_config = json.loads(app.tracing) if app.tracing else None - - if app_ops_trace_config is not None: - tracing_provider = app_ops_trace_config.get('tracing_provider') - else: - return None - - # decrypt_token - decrypt_trace_config = cls.get_decrypted_tracing_config(app_id, tracing_provider) - if app_ops_trace_config.get('enabled'): - trace_instance, config_class = provider_config_map[tracing_provider]['trace_instance'], \ - provider_config_map[tracing_provider]['config_class'] - tracing_instance = trace_instance(config_class(**decrypt_trace_config)) - return tracing_instance - - return None - - @classmethod - def get_app_config_through_message_id(cls, message_id: str): - app_model_config = None - message_data = db.session.query(Message).filter(Message.id == message_id).first() - conversation_id = message_data.conversation_id - conversation_data = db.session.query(Conversation).filter(Conversation.id == conversation_id).first() - - if conversation_data.app_model_config_id: - app_model_config = db.session.query(AppModelConfig).filter( - AppModelConfig.id == conversation_data.app_model_config_id - ).first() - elif conversation_data.app_model_config_id is None and conversation_data.override_model_configs: - app_model_config = conversation_data.override_model_configs - - return app_model_config - - @classmethod - def update_app_tracing_config(cls, app_id: str, enabled: bool, tracing_provider: str): - """ - Update app tracing config - :param app_id: app id - :param enabled: enabled - :param tracing_provider: tracing provider - :return: - """ - # auth check - if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, TracingProviderEnum.LANGSMITH.value, None, ""]: - raise ValueError(f"Invalid tracing provider: {tracing_provider}") - - app_config: App = db.session.query(App).filter(App.id == app_id).first() - app_config.tracing = json.dumps( - { - "enabled": enabled, - "tracing_provider": tracing_provider, - } - ) - db.session.commit() - - @classmethod - def get_app_tracing_config(cls, app_id: str): - """ - Get app tracing config - :param app_id: app id - :return: - """ - app: App = db.session.query(App).filter(App.id == app_id).first() - if not app.tracing: - return { - "enabled": False, - "tracing_provider": None - } - app_trace_config = json.loads(app.tracing) - return app_trace_config - - @staticmethod - def check_trace_config_is_effective(tracing_config: dict, tracing_provider: str): - """ - Check trace config is effective - :param tracing_config: tracing config - :param tracing_provider: tracing provider - :return: - """ - config_type, trace_instance = provider_config_map[tracing_provider]['config_class'], \ - provider_config_map[tracing_provider]['trace_instance'] - tracing_config = config_type(**tracing_config) - return trace_instance(tracing_config).api_check() diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index b976f50eb5535c..e23f4662c45be4 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -12,7 +12,7 @@ from core.model_runtime.entities.message_entities import PromptMessageTool from core.model_runtime.entities.model_entities import ModelFeature, ModelType from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.ops.trace_queue_manager import TraceTask, TraceTaskName +from core.ops.ops_trace_manager import TraceTask, TraceTaskName from core.ops.utils import measure_time from core.rag.datasource.retrieval_service import RetrievalService from core.rag.models.document import Document diff --git a/api/core/tools/tool_engine.py b/api/core/tools/tool_engine.py index d48936fe1aed67..ebcdb1e3432fcb 100644 --- a/api/core/tools/tool_engine.py +++ b/api/core/tools/tool_engine.py @@ -10,7 +10,7 @@ from core.callback_handler.agent_tool_callback_handler import DifyAgentCallbackHandler from core.callback_handler.workflow_tool_callback_handler import DifyWorkflowCallbackHandler from core.file.file_obj import FileTransferMethod -from core.ops.trace_queue_manager import TraceQueueManager +from core.ops.ops_trace_manager import TraceQueueManager from core.tools.entities.tool_entities import ToolInvokeMessage, ToolInvokeMessageBinary, ToolInvokeMeta, ToolParameter from core.tools.errors import ( ToolEngineInvokeError, diff --git a/api/services/message_service.py b/api/services/message_service.py index 15329bbc30de8d..f0fed955a84c43 100644 --- a/api/services/message_service.py +++ b/api/services/message_service.py @@ -7,7 +7,7 @@ from core.memory.token_buffer_memory import TokenBufferMemory from core.model_manager import ModelManager from core.model_runtime.entities.model_entities import ModelType -from core.ops.trace_queue_manager import TraceQueueManager, TraceTask, TraceTaskName +from core.ops.ops_trace_manager import TraceQueueManager, TraceTask, TraceTaskName from core.ops.utils import measure_time from extensions.ext_database import db from libs.infinite_scroll_pagination import InfiniteScrollPagination diff --git a/api/services/ops_service.py b/api/services/ops_service.py new file mode 100644 index 00000000000000..41042999b9c098 --- /dev/null +++ b/api/services/ops_service.py @@ -0,0 +1,132 @@ +from core.ops.entities.config_entity import TracingProviderEnum +from core.ops.ops_trace_manager import OpsTraceManager, provider_config_map +from extensions.ext_database import db +from models.model import App, TraceAppConfig + + +class OpsService: + @classmethod + def get_tracing_app_config(cls, app_id: str, tracing_provider: str): + """ + Get tracing app config + :param app_id: app id + :param tracing_provider: tracing provider + :return: + """ + trace_config_data: TraceAppConfig = db.session.query(TraceAppConfig).filter( + TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider + ).first() + + if not trace_config_data: + return None + + # decrypt_token and obfuscated_token + tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id + decrypt_tracing_config = OpsTraceManager.decrypt_tracing_config(tenant_id, tracing_provider, trace_config_data.tracing_config) + decrypt_tracing_config = OpsTraceManager.obfuscated_decrypt_token(tracing_provider, decrypt_tracing_config) + + trace_config_data.tracing_config = decrypt_tracing_config + + return trace_config_data.to_dict() + + @classmethod + def create_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_config: dict): + """ + Create tracing app config + :param app_id: app id + :param tracing_provider: tracing provider + :param tracing_config: tracing config + :return: + """ + if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, + TracingProviderEnum.LANGSMITH.value] and tracing_provider != "": + return {"error": f"Invalid tracing provider: {tracing_provider}"} + + config_class, other_keys = provider_config_map[tracing_provider]['config_class'], \ + provider_config_map[tracing_provider]['other_keys'] + default_config_instance = config_class(**tracing_config) + for key in other_keys: + if key in tracing_config and tracing_config[key] == "": + tracing_config[key] = getattr(default_config_instance, key, None) + + # api check + if not OpsTraceManager.check_trace_config_is_effective(tracing_config, tracing_provider): + return {"error": "Invalid Credentials"} + + # check if trace config already exists + trace_config_data: TraceAppConfig = db.session.query(TraceAppConfig).filter( + TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider + ).first() + + if trace_config_data: + return None + + # get tenant id + tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id + tracing_config = OpsTraceManager.encrypt_tracing_config(tenant_id, tracing_provider, tracing_config) + trace_config_data = TraceAppConfig( + app_id=app_id, + tracing_provider=tracing_provider, + tracing_config=tracing_config, + ) + db.session.add(trace_config_data) + db.session.commit() + + return {"result": "success"} + + @classmethod + def update_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_config: dict): + """ + Update tracing app config + :param app_id: app id + :param tracing_provider: tracing provider + :param tracing_config: tracing config + :return: + """ + if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, TracingProviderEnum.LANGSMITH.value]: + raise ValueError(f"Invalid tracing provider: {tracing_provider}") + + # check if trace config already exists + current_trace_config = db.session.query(TraceAppConfig).filter( + TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider + ).first() + + if not current_trace_config: + return None + + # get tenant id + tenant_id = db.session.query(App).filter(App.id == app_id).first().tenant_id + tracing_config = OpsTraceManager.encrypt_tracing_config( + tenant_id, tracing_provider, tracing_config, current_trace_config.tracing_config + ) + + # api check + # decrypt_token + decrypt_tracing_config = OpsTraceManager.decrypt_tracing_config(tenant_id, tracing_provider, tracing_config) + if not OpsTraceManager.check_trace_config_is_effective(decrypt_tracing_config, tracing_provider): + raise ValueError("Invalid Credentials") + + current_trace_config.tracing_config = tracing_config + db.session.commit() + + return current_trace_config.to_dict() + + @classmethod + def delete_tracing_app_config(cls, app_id: str, tracing_provider: str): + """ + Delete tracing app config + :param app_id: app id + :param tracing_provider: tracing provider + :return: + """ + trace_config = db.session.query(TraceAppConfig).filter( + TraceAppConfig.app_id == app_id, TraceAppConfig.tracing_provider == tracing_provider + ).first() + + if not trace_config: + return None + + db.session.delete(trace_config) + db.session.commit() + + return True From 27c24465dc93c029755e17296077f08650d8951d Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 13:38:04 +0800 Subject: [PATCH 46/65] fix: pydantic model_ warning --- api/controllers/console/app/ops_trace.py | 10 +++++----- api/core/ops/entities/trace_entity.py | 3 +++ .../langfuse_trace/entities/langfuse_trace_entity.py | 4 +++- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/api/controllers/console/app/ops_trace.py b/api/controllers/console/app/ops_trace.py index 60a9d4163c25e2..c0cf7b9e33f32b 100644 --- a/api/controllers/console/app/ops_trace.py +++ b/api/controllers/console/app/ops_trace.py @@ -5,7 +5,7 @@ from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required from libs.login import login_required -from services.ops_service import OpsTraceManager +from services.ops_service import OpsService class TraceAppConfigApi(Resource): @@ -22,7 +22,7 @@ def get(self, app_id): args = parser.parse_args() try: - trace_config = OpsTraceManager.get_tracing_app_config( + trace_config = OpsService.get_tracing_app_config( app_id=app_id, tracing_provider=args['tracing_provider'] ) if not trace_config: @@ -42,7 +42,7 @@ def post(self, app_id): args = parser.parse_args() try: - result = OpsTraceManager.create_tracing_app_config( + result = OpsService.create_tracing_app_config( app_id=app_id, tracing_provider=args['tracing_provider'], tracing_config=args['tracing_config'] @@ -66,7 +66,7 @@ def patch(self, app_id): args = parser.parse_args() try: - result = OpsTraceManager.update_tracing_app_config( + result = OpsService.update_tracing_app_config( app_id=app_id, tracing_provider=args['tracing_provider'], tracing_config=args['tracing_config'] @@ -87,7 +87,7 @@ def delete(self, app_id): args = parser.parse_args() try: - result = OpsTraceManager.delete_tracing_app_config( + result = OpsService.delete_tracing_app_config( app_id=app_id, tracing_provider=args['tracing_provider'] ) diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py index 456b87e7fbed89..3ff6e61fdc615c 100644 --- a/api/core/ops/entities/trace_entity.py +++ b/api/core/ops/entities/trace_entity.py @@ -63,7 +63,10 @@ class SuggestedQuestionTraceInfo(BaseTraceInfo): level: str status_message: Optional[str] = None + class Config: + protected_namespaces = () + class DatasetRetrievalTraceInfo(BaseTraceInfo): documents: Any diff --git a/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py b/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py index ebcf692a5ca08c..52f9dc0169400c 100644 --- a/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py +++ b/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py @@ -266,8 +266,10 @@ class LangfuseGeneration(BaseModel): description="The version of the generation type. Used to understand how changes to the span type affect " "metrics. Useful in debugging.", ) - @field_validator("input", "output") def ensure_dict(cls, v, info: ValidationInfo): field_name = info.field_name return validate_input_output(v, field_name) + + class Config: + protected_namespaces = () From 0971768a7444647f2c75f8340ba763931ebdbf47 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 13:49:28 +0800 Subject: [PATCH 47/65] fix: conversation none error --- api/core/ops/ops_trace_manager.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index b52da63fb9db15..b1ca7b131a3848 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -150,9 +150,9 @@ def get_decrypted_tracing_config(cls, app_id: str, tracing_provider: str): @classmethod def get_ops_trace_instance( cls, - app_id: Union[UUID, str] = None, - message_id: str = None, - conversation_id: str = None + app_id: Optional[Union[UUID, str]] = None, + message_id: Optional[str] = None, + conversation_id: Optional[str] = None ): """ Get ops trace through model config @@ -161,20 +161,23 @@ def get_ops_trace_instance( :param conversation_id: conversation_id :return: """ - if conversation_id: + if conversation_id is not None: conversation_data: Conversation = db.session.query(Conversation).filter( Conversation.id == conversation_id ).first() - app_id = conversation_data.app_id + if conversation_data: + app_id = conversation_data.app_id - if message_id: + if message_id is not None: record: Message = db.session.query(Message).filter(Message.id == message_id).first() app_id = record.app_id if isinstance(app_id, UUID): app_id = str(app_id) - tracing_instance = None + if app_id is None: + return None + app: App = db.session.query(App).filter( App.id == app_id ).first() From 13a010456ffaa958c2c5ca6dfbb21db088f2e4fb Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 13:57:36 +0800 Subject: [PATCH 48/65] fix: from_account_id type error --- api/core/ops/entities/trace_entity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py index 3ff6e61fdc615c..d4a8439e6b74d2 100644 --- a/api/core/ops/entities/trace_entity.py +++ b/api/core/ops/entities/trace_entity.py @@ -54,7 +54,7 @@ class SuggestedQuestionTraceInfo(BaseTraceInfo): total_tokens: int status: Optional[str] = None error: Optional[str] = None - from_account_id: str + from_account_id: Optional[str] = None agent_based: bool from_source: str model_provider: str From b5df5876d7ccf03005feb26b1d90d0ee7092e247 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 15:18:27 +0800 Subject: [PATCH 49/65] chore: update pydantic warning and tracing_provider_map --- api/core/ops/entities/trace_entity.py | 7 +++---- .../ops/langfuse_trace/entities/langfuse_trace_entity.py | 7 ++++--- api/core/ops/ops_trace_manager.py | 7 ++++++- api/services/ops_service.py | 6 ++---- 4 files changed, 15 insertions(+), 12 deletions(-) diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py index d4a8439e6b74d2..b6c3aec6cc138b 100644 --- a/api/core/ops/entities/trace_entity.py +++ b/api/core/ops/entities/trace_entity.py @@ -1,7 +1,7 @@ from datetime import datetime from typing import Any, Optional, Union -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict class BaseTraceInfo(BaseModel): @@ -63,10 +63,9 @@ class SuggestedQuestionTraceInfo(BaseTraceInfo): level: str status_message: Optional[str] = None - class Config: - protected_namespaces = () + model_config = ConfigDict(protected_namespaces=()) + - class DatasetRetrievalTraceInfo(BaseTraceInfo): documents: Any diff --git a/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py b/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py index 52f9dc0169400c..6b4c01e5712b92 100644 --- a/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py +++ b/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py @@ -2,7 +2,7 @@ from enum import Enum from typing import Any, Optional, Union -from pydantic import BaseModel, Field, field_validator +from pydantic import BaseModel, ConfigDict, Field, field_validator from pydantic_core.core_schema import ValidationInfo from core.ops.utils import replace_text_with_content @@ -266,10 +266,11 @@ class LangfuseGeneration(BaseModel): description="The version of the generation type. Used to understand how changes to the span type affect " "metrics. Useful in debugging.", ) + + model_config = ConfigDict(protected_namespaces=()) + @field_validator("input", "output") def ensure_dict(cls, v, info: ValidationInfo): field_name = info.field_name return validate_input_output(v, field_name) - class Config: - protected_namespaces = () diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index b1ca7b131a3848..9c6af816c72e24 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -224,7 +224,7 @@ def update_app_tracing_config(cls, app_id: str, enabled: bool, tracing_provider: :return: """ # auth check - if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, TracingProviderEnum.LANGSMITH.value, None, ""]: + if tracing_provider not in provider_config_map.keys() and tracing_provider != "": raise ValueError(f"Invalid tracing provider: {tracing_provider}") app_config: App = db.session.query(App).filter(App.id == app_id).first() @@ -654,3 +654,8 @@ def process_queue(self, flask_app: Flask, trace_instance: BaseTraceInstance): def add_trace_task(self, trace_task: TraceTask): self.queue.put(trace_task) + + +if __name__ == '__main__': + print(provider_config_map.keys()) + print(type(provider_config_map.keys())) diff --git a/api/services/ops_service.py b/api/services/ops_service.py index 41042999b9c098..ab8c737f75da5b 100644 --- a/api/services/ops_service.py +++ b/api/services/ops_service.py @@ -1,4 +1,3 @@ -from core.ops.entities.config_entity import TracingProviderEnum from core.ops.ops_trace_manager import OpsTraceManager, provider_config_map from extensions.ext_database import db from models.model import App, TraceAppConfig @@ -38,8 +37,7 @@ def create_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c :param tracing_config: tracing config :return: """ - if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, - TracingProviderEnum.LANGSMITH.value] and tracing_provider != "": + if tracing_config not in provider_config_map.keys() and tracing_provider != "": return {"error": f"Invalid tracing provider: {tracing_provider}"} config_class, other_keys = provider_config_map[tracing_provider]['config_class'], \ @@ -83,7 +81,7 @@ def update_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c :param tracing_config: tracing config :return: """ - if tracing_provider not in [TracingProviderEnum.LANGFUSE.value, TracingProviderEnum.LANGSMITH.value]: + if tracing_provider not in provider_config_map.keys(): raise ValueError(f"Invalid tracing provider: {tracing_provider}") # check if trace config already exists From a760d0d0ec46d3367d811fc861c1559cb6b8c032 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 16:36:24 +0800 Subject: [PATCH 50/65] fix: tracing_provider error --- api/services/ops_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/services/ops_service.py b/api/services/ops_service.py index ab8c737f75da5b..65a446ce7478ca 100644 --- a/api/services/ops_service.py +++ b/api/services/ops_service.py @@ -37,7 +37,7 @@ def create_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c :param tracing_config: tracing config :return: """ - if tracing_config not in provider_config_map.keys() and tracing_provider != "": + if tracing_provider not in provider_config_map.keys() and tracing_provider != "": return {"error": f"Invalid tracing provider: {tracing_provider}"} config_class, other_keys = provider_config_map[tracing_provider]['config_class'], \ From 0117310378560fa30edcea8f21da3c397995a872 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 16:39:54 +0800 Subject: [PATCH 51/65] fix: invalid tracing provider None --- api/core/ops/ops_trace_manager.py | 2 +- api/services/ops_service.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index 9c6af816c72e24..1f021583f286ac 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -224,7 +224,7 @@ def update_app_tracing_config(cls, app_id: str, enabled: bool, tracing_provider: :return: """ # auth check - if tracing_provider not in provider_config_map.keys() and tracing_provider != "": + if tracing_provider not in provider_config_map.keys() and tracing_provider is not None: raise ValueError(f"Invalid tracing provider: {tracing_provider}") app_config: App = db.session.query(App).filter(App.id == app_id).first() diff --git a/api/services/ops_service.py b/api/services/ops_service.py index 65a446ce7478ca..ffc12a9acdb42c 100644 --- a/api/services/ops_service.py +++ b/api/services/ops_service.py @@ -37,7 +37,7 @@ def create_tracing_app_config(cls, app_id: str, tracing_provider: str, tracing_c :param tracing_config: tracing config :return: """ - if tracing_provider not in provider_config_map.keys() and tracing_provider != "": + if tracing_provider not in provider_config_map.keys() and tracing_provider != None: return {"error": f"Invalid tracing provider: {tracing_provider}"} config_class, other_keys = provider_config_map[tracing_provider]['config_class'], \ From d5b9fbd2db067c0a197419c186b04ce5dacbe225 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 16:48:02 +0800 Subject: [PATCH 52/65] fix: trace_manager none error --- api/core/rag/retrieval/dataset_retrieval.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index e23f4662c45be4..8544d7c3c86c43 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -358,15 +358,15 @@ def _on_retrival_end( # get tracing instance trace_manager = self.application_generate_entity.trace_manager if self.application_generate_entity else None - - trace_manager.add_trace_task( - TraceTask( - TraceTaskName.DATASET_RETRIEVAL_TRACE, - message_id=message_id, - documents=documents, - timer=timer + if trace_manager: + trace_manager.add_trace_task( + TraceTask( + TraceTaskName.DATASET_RETRIEVAL_TRACE, + message_id=message_id, + documents=documents, + timer=timer + ) ) - ) def _on_query(self, query: str, dataset_ids: list[str], app_id: str, user_from: str, user_id: str) -> None: """ From fca91214666a82ead5c1018d17698d1e05e525ab Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 17:57:14 +0800 Subject: [PATCH 53/65] fix: conversation generate error --- api/core/llm_generator/llm_generator.py | 7 ++++--- api/services/conversation_service.py | 4 +++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index e705541d412509..7d6b6868452d85 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -17,7 +17,9 @@ class LLMGenerator: @classmethod - def generate_conversation_name(cls, tenant_id: str, query, conversation_id: Optional[str] = None): + def generate_conversation_name( + cls, tenant_id: str, query, conversation_id: Optional[str] = None, app_id: Optional[str] = None + ): prompt = CONVERSATION_TITLE_PROMPT if len(query) > 2000: @@ -43,7 +45,6 @@ def generate_conversation_name(cls, tenant_id: str, query, conversation_id: Opti }, stream=False ) - answer = response.message.content result_dict = json.loads(answer) answer = result_dict['Your Output'] @@ -53,7 +54,7 @@ def generate_conversation_name(cls, tenant_id: str, query, conversation_id: Opti name = name[:75] + '...' # get tracing instance - trace_manager = TraceQueueManager(conversation_id=conversation_id) + trace_manager = TraceQueueManager(app_id=app_id) trace_manager.add_trace_task( TraceTask( TraceTaskName.GENERATE_NAME_TRACE, diff --git a/api/services/conversation_service.py b/api/services/conversation_service.py index 44a264087cbd69..82ee10ee78f095 100644 --- a/api/services/conversation_service.py +++ b/api/services/conversation_service.py @@ -96,7 +96,9 @@ def auto_generate_name(cls, app_model: App, conversation: Conversation): # generate conversation name try: - name = LLMGenerator.generate_conversation_name(app_model.tenant_id, message.query, conversation.id) + name = LLMGenerator.generate_conversation_name( + app_model.tenant_id, message.query, conversation.id, app_model.id + ) conversation.name = name except: pass From 9590da7dffdd5723a0d9c30648949a0fe10ca515 Mon Sep 17 00:00:00 2001 From: takatost Date: Tue, 25 Jun 2024 18:10:36 +0800 Subject: [PATCH 54/65] fix generate conversation name --- api/core/llm_generator/llm_generator.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 7d6b6868452d85..70d3befbbdcb6f 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -1,5 +1,6 @@ import json import logging +import re from typing import Optional from core.llm_generator.output_parser.errors import OutputParserException @@ -46,7 +47,8 @@ def generate_conversation_name( stream=False ) answer = response.message.content - result_dict = json.loads(answer) + cleaned_answer = re.sub(r'^.*(\{.*\}).*$', r'\1', answer, flags=re.DOTALL) + result_dict = json.loads(cleaned_answer) answer = result_dict['Your Output'] name = answer.strip() From f88d915a37ddef71bdedf07f5588124f3ed1d718 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 18:30:48 +0800 Subject: [PATCH 55/65] feat: add BaseTraceInfo field validator --- api/core/ops/entities/trace_entity.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py index b6c3aec6cc138b..c4e4d059053ded 100644 --- a/api/core/ops/entities/trace_entity.py +++ b/api/core/ops/entities/trace_entity.py @@ -1,7 +1,7 @@ from datetime import datetime from typing import Any, Optional, Union -from pydantic import BaseModel, ConfigDict +from pydantic import BaseModel, ConfigDict, field_validator class BaseTraceInfo(BaseModel): @@ -13,6 +13,14 @@ class BaseTraceInfo(BaseModel): end_time: Optional[datetime] = None metadata: dict[str, Any] + @field_validator("inputs", "outputs") + def ensure_type(cls, v): + if v is None: + return None + if isinstance(v, str | dict | list): + return v + else: + return "" class WorkflowTraceInfo(BaseTraceInfo): workflow_data: Any @@ -55,10 +63,10 @@ class SuggestedQuestionTraceInfo(BaseTraceInfo): status: Optional[str] = None error: Optional[str] = None from_account_id: Optional[str] = None - agent_based: bool - from_source: str - model_provider: str - model_id: str + agent_based: Optional[bool] = None + from_source: Optional[str] = None + model_provider: Optional[str] = None + model_id: Optional[str] = None suggested_question: list[str] level: str status_message: Optional[str] = None From 1e6cc9d20799a3c376102f0660de3fa3e3d2b74d Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 18:42:49 +0800 Subject: [PATCH 56/65] feat: update MessageTraceInfo --- api/core/ops/entities/trace_entity.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py index c4e4d059053ded..b14a2595bb2bbc 100644 --- a/api/core/ops/entities/trace_entity.py +++ b/api/core/ops/entities/trace_entity.py @@ -46,8 +46,8 @@ class MessageTraceInfo(BaseTraceInfo): answer_tokens: int total_tokens: int error: Optional[str] = None - file_list: list[str] - message_file_data: Any + file_list: Optional[Union[str, dict[str, Any], list]] = None + message_file_data: Optional[Any] = None conversation_mode: str From a21da4ce29a2a3ec854155981caa964c882f73c3 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 18:51:19 +0800 Subject: [PATCH 57/65] feat: update suggested question trace manager --- api/services/message_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/services/message_service.py b/api/services/message_service.py index f0fed955a84c43..e310d70d5314e7 100644 --- a/api/services/message_service.py +++ b/api/services/message_service.py @@ -271,7 +271,7 @@ def get_suggested_questions_after_answer(cls, app_model: App, user: Optional[Uni ) # get tracing instance - trace_manager = TraceQueueManager(message_id=message_id) + trace_manager = TraceQueueManager(app_id=app_model.id) trace_manager.add_trace_task( TraceTask( TraceTaskName.SUGGESTED_QUESTION_TRACE, From 3f9de5cccb91f8e2b1676ec644d8c88970323bfb Mon Sep 17 00:00:00 2001 From: takatost Date: Tue, 25 Jun 2024 18:58:59 +0800 Subject: [PATCH 58/65] optimize error msg --- .../parameter_extractor_node.py | 153 +++++++++--------- 1 file changed, 79 insertions(+), 74 deletions(-) diff --git a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py index 8fceb3404ab039..bb0ccb5fc37116 100644 --- a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py +++ b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py @@ -66,44 +66,43 @@ def get_default_config(cls, filters: Optional[dict] = None) -> dict: } } - def _run(self, variable_pool: VariablePool) -> NodeRunResult: """ Run the node. """ - node_data = cast(ParameterExtractorNodeData, self.node_data) query = variable_pool.get_variable_value(node_data.query) if not query: - raise ValueError("Query not found") - - inputs={ + raise ValueError("Input variable content not found or is empty") + + inputs = { 'query': query, 'parameters': jsonable_encoder(node_data.parameters), 'instruction': jsonable_encoder(node_data.instruction), } - + model_instance, model_config = self._fetch_model_config(node_data.model) if not isinstance(model_instance.model_type_instance, LargeLanguageModel): raise ValueError("Model is not a Large Language Model") - + llm_model = model_instance.model_type_instance model_schema = llm_model.get_model_schema(model_config.model, model_config.credentials) if not model_schema: raise ValueError("Model schema not found") - + # fetch memory memory = self._fetch_memory(node_data.memory, variable_pool, model_instance) - + if set(model_schema.features or []) & set([ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL]) \ - and node_data.reasoning_mode == 'function_call': + and node_data.reasoning_mode == 'function_call': # use function call prompt_messages, prompt_message_tools = self._generate_function_call_prompt( node_data, query, variable_pool, model_config, memory ) else: # use prompt engineering - prompt_messages = self._generate_prompt_engineering_prompt(node_data, query, variable_pool, model_config, memory) + prompt_messages = self._generate_prompt_engineering_prompt(node_data, query, variable_pool, model_config, + memory) prompt_message_tools = [] process_data = { @@ -202,7 +201,7 @@ def _invoke_llm(self, node_data_model: ModelConfig, # handle invoke result if not isinstance(invoke_result, LLMResult): raise ValueError(f"Invalid invoke result: {invoke_result}") - + text = invoke_result.message.content usage = invoke_result.usage tool_call = invoke_result.message.tool_calls[0] if invoke_result.message.tool_calls else None @@ -212,21 +211,23 @@ def _invoke_llm(self, node_data_model: ModelConfig, return text, usage, tool_call - def _generate_function_call_prompt(self, - node_data: ParameterExtractorNodeData, - query: str, - variable_pool: VariablePool, - model_config: ModelConfigWithCredentialsEntity, - memory: Optional[TokenBufferMemory], - ) -> tuple[list[PromptMessage], list[PromptMessageTool]]: + def _generate_function_call_prompt(self, + node_data: ParameterExtractorNodeData, + query: str, + variable_pool: VariablePool, + model_config: ModelConfigWithCredentialsEntity, + memory: Optional[TokenBufferMemory], + ) -> tuple[list[PromptMessage], list[PromptMessageTool]]: """ Generate function call prompt. """ - query = FUNCTION_CALLING_EXTRACTOR_USER_TEMPLATE.format(content=query, structure=json.dumps(node_data.get_parameter_json_schema())) + query = FUNCTION_CALLING_EXTRACTOR_USER_TEMPLATE.format(content=query, structure=json.dumps( + node_data.get_parameter_json_schema())) prompt_transform = AdvancedPromptTransform(with_variable_tmpl=True) rest_token = self._calculate_rest_token(node_data, query, variable_pool, model_config, '') - prompt_template = self._get_function_calling_prompt_template(node_data, query, variable_pool, memory, rest_token) + prompt_template = self._get_function_calling_prompt_template(node_data, query, variable_pool, memory, + rest_token) prompt_messages = prompt_transform.get_prompt( prompt_template=prompt_template, inputs={}, @@ -259,8 +260,8 @@ def _generate_function_call_prompt(self, function=AssistantPromptMessage.ToolCall.ToolCallFunction( name=example['assistant']['function_call']['name'], arguments=json.dumps(example['assistant']['function_call']['parameters'] - ) - )) + ) + )) ] ), ToolPromptMessage( @@ -273,8 +274,8 @@ def _generate_function_call_prompt(self, ]) prompt_messages = prompt_messages[:last_user_message_idx] + \ - example_messages + prompt_messages[last_user_message_idx:] - + example_messages + prompt_messages[last_user_message_idx:] + # generate tool tool = PromptMessageTool( name=FUNCTION_CALLING_EXTRACTOR_NAME, @@ -284,13 +285,13 @@ def _generate_function_call_prompt(self, return prompt_messages, [tool] - def _generate_prompt_engineering_prompt(self, - data: ParameterExtractorNodeData, - query: str, - variable_pool: VariablePool, - model_config: ModelConfigWithCredentialsEntity, - memory: Optional[TokenBufferMemory], - ) -> list[PromptMessage]: + def _generate_prompt_engineering_prompt(self, + data: ParameterExtractorNodeData, + query: str, + variable_pool: VariablePool, + model_config: ModelConfigWithCredentialsEntity, + memory: Optional[TokenBufferMemory], + ) -> list[PromptMessage]: """ Generate prompt engineering prompt. """ @@ -308,18 +309,19 @@ def _generate_prompt_engineering_prompt(self, raise ValueError(f"Invalid model mode: {model_mode}") def _generate_prompt_engineering_completion_prompt(self, - node_data: ParameterExtractorNodeData, - query: str, - variable_pool: VariablePool, - model_config: ModelConfigWithCredentialsEntity, - memory: Optional[TokenBufferMemory], - ) -> list[PromptMessage]: + node_data: ParameterExtractorNodeData, + query: str, + variable_pool: VariablePool, + model_config: ModelConfigWithCredentialsEntity, + memory: Optional[TokenBufferMemory], + ) -> list[PromptMessage]: """ Generate completion prompt. """ prompt_transform = AdvancedPromptTransform(with_variable_tmpl=True) rest_token = self._calculate_rest_token(node_data, query, variable_pool, model_config, '') - prompt_template = self._get_prompt_engineering_prompt_template(node_data, query, variable_pool, memory, rest_token) + prompt_template = self._get_prompt_engineering_prompt_template(node_data, query, variable_pool, memory, + rest_token) prompt_messages = prompt_transform.get_prompt( prompt_template=prompt_template, inputs={ @@ -336,23 +338,23 @@ def _generate_prompt_engineering_completion_prompt(self, return prompt_messages def _generate_prompt_engineering_chat_prompt(self, - node_data: ParameterExtractorNodeData, - query: str, - variable_pool: VariablePool, - model_config: ModelConfigWithCredentialsEntity, - memory: Optional[TokenBufferMemory], - ) -> list[PromptMessage]: + node_data: ParameterExtractorNodeData, + query: str, + variable_pool: VariablePool, + model_config: ModelConfigWithCredentialsEntity, + memory: Optional[TokenBufferMemory], + ) -> list[PromptMessage]: """ Generate chat prompt. """ prompt_transform = AdvancedPromptTransform(with_variable_tmpl=True) rest_token = self._calculate_rest_token(node_data, query, variable_pool, model_config, '') prompt_template = self._get_prompt_engineering_prompt_template( - node_data, + node_data, CHAT_GENERATE_JSON_USER_MESSAGE_TEMPLATE.format( structure=json.dumps(node_data.get_parameter_json_schema()), text=query - ), + ), variable_pool, memory, rest_token ) @@ -387,7 +389,7 @@ def _generate_prompt_engineering_chat_prompt(self, ]) prompt_messages = prompt_messages[:last_user_message_idx] + \ - example_messages + prompt_messages[last_user_message_idx:] + example_messages + prompt_messages[last_user_message_idx:] return prompt_messages @@ -397,23 +399,23 @@ def _validate_result(self, data: ParameterExtractorNodeData, result: dict) -> di """ if len(data.parameters) != len(result): raise ValueError("Invalid number of parameters") - + for parameter in data.parameters: if parameter.required and parameter.name not in result: raise ValueError(f"Parameter {parameter.name} is required") - + if parameter.type == 'select' and parameter.options and result.get(parameter.name) not in parameter.options: raise ValueError(f"Invalid `select` value for parameter {parameter.name}") - + if parameter.type == 'number' and not isinstance(result.get(parameter.name), int | float): raise ValueError(f"Invalid `number` value for parameter {parameter.name}") - + if parameter.type == 'bool' and not isinstance(result.get(parameter.name), bool): raise ValueError(f"Invalid `bool` value for parameter {parameter.name}") - + if parameter.type == 'string' and not isinstance(result.get(parameter.name), str): raise ValueError(f"Invalid `string` value for parameter {parameter.name}") - + if parameter.type.startswith('array'): if not isinstance(result.get(parameter.name), list): raise ValueError(f"Invalid `array` value for parameter {parameter.name}") @@ -499,6 +501,7 @@ def _extract_complete_json_response(self, result: str) -> Optional[dict]: """ Extract complete json response. """ + def extract_json(text): """ From a given JSON started from '{' or '[' extract the complete JSON object. @@ -515,11 +518,11 @@ def extract_json(text): if (c == '}' and stack[-1] == '{') or (c == ']' and stack[-1] == '['): stack.pop() if not stack: - return text[:i+1] + return text[:i + 1] else: return text[:i] return None - + # extract json from the text for idx in range(len(result)): if result[idx] == '{' or result[idx] == '[': @@ -536,9 +539,9 @@ def _extract_json_from_tool_call(self, tool_call: AssistantPromptMessage.ToolCal """ if not tool_call or not tool_call.function.arguments: return None - + return json.loads(tool_call.function.arguments) - + def _generate_default_result(self, data: ParameterExtractorNodeData) -> dict: """ Generate default result. @@ -551,7 +554,7 @@ def _generate_default_result(self, data: ParameterExtractorNodeData) -> dict: result[parameter.name] = False elif parameter.type in ['string', 'select']: result[parameter.name] = '' - + return result def _render_instruction(self, instruction: str, variable_pool: VariablePool) -> str: @@ -562,13 +565,13 @@ def _render_instruction(self, instruction: str, variable_pool: VariablePool) -> inputs = {} for selector in variable_template_parser.extract_variable_selectors(): inputs[selector.variable] = variable_pool.get_variable_value(selector.value_selector) - + return variable_template_parser.format(inputs) def _get_function_calling_prompt_template(self, node_data: ParameterExtractorNodeData, query: str, - variable_pool: VariablePool, - memory: Optional[TokenBufferMemory], - max_token_limit: int = 2000) \ + variable_pool: VariablePool, + memory: Optional[TokenBufferMemory], + max_token_limit: int = 2000) \ -> list[ChatModelMessage]: model_mode = ModelMode.value_of(node_data.model.mode) input_text = query @@ -590,12 +593,12 @@ def _get_function_calling_prompt_template(self, node_data: ParameterExtractorNod return [system_prompt_messages, user_prompt_message] else: raise ValueError(f"Model mode {model_mode} not support.") - + def _get_prompt_engineering_prompt_template(self, node_data: ParameterExtractorNodeData, query: str, variable_pool: VariablePool, memory: Optional[TokenBufferMemory], max_token_limit: int = 2000) \ - -> list[ChatModelMessage]: + -> list[ChatModelMessage]: model_mode = ModelMode.value_of(node_data.model.mode) input_text = query @@ -620,8 +623,8 @@ def _get_prompt_engineering_prompt_template(self, node_data: ParameterExtractorN text=COMPLETION_GENERATE_JSON_PROMPT.format(histories=memory_str, text=input_text, instruction=instruction) - .replace('{γγγ', '') - .replace('}γγγ', '') + .replace('{γγγ', '') + .replace('}γγγ', '') ) else: raise ValueError(f"Model mode {model_mode} not support.") @@ -635,7 +638,7 @@ def _calculate_rest_token(self, node_data: ParameterExtractorNodeData, query: st model_instance, model_config = self._fetch_model_config(node_data.model) if not isinstance(model_instance.model_type_instance, LargeLanguageModel): raise ValueError("Model is not a Large Language Model") - + llm_model = model_instance.model_type_instance model_schema = llm_model.get_model_schema(model_config.model, model_config.credentials) if not model_schema: @@ -667,7 +670,7 @@ def _calculate_rest_token(self, node_data: ParameterExtractorNodeData, query: st model_config.model, model_config.credentials, prompt_messages - ) + 1000 # add 1000 to ensure tool call messages + ) + 1000 # add 1000 to ensure tool call messages max_tokens = 0 for parameter_rule in model_config.model_schema.parameter_rules: @@ -680,8 +683,9 @@ def _calculate_rest_token(self, node_data: ParameterExtractorNodeData, query: st rest_tokens = max(rest_tokens, 0) return rest_tokens - - def _fetch_model_config(self, node_data_model: ModelConfig) -> tuple[ModelInstance, ModelConfigWithCredentialsEntity]: + + def _fetch_model_config(self, node_data_model: ModelConfig) -> tuple[ + ModelInstance, ModelConfigWithCredentialsEntity]: """ Fetch model config. """ @@ -689,9 +693,10 @@ def _fetch_model_config(self, node_data_model: ModelConfig) -> tuple[ModelInstan self._model_instance, self._model_config = super()._fetch_model_config(node_data_model) return self._model_instance, self._model_config - + @classmethod - def _extract_variable_selector_to_variable_mapping(cls, node_data: ParameterExtractorNodeData) -> dict[str, list[str]]: + def _extract_variable_selector_to_variable_mapping(cls, node_data: ParameterExtractorNodeData) -> dict[ + str, list[str]]: """ Extract variable selector to variable mapping :param node_data: node data @@ -708,4 +713,4 @@ def _extract_variable_selector_to_variable_mapping(cls, node_data: ParameterExtr for selector in variable_template_parser.extract_variable_selectors(): variable_mapping[selector.variable] = selector.value_selector - return variable_mapping \ No newline at end of file + return variable_mapping From 81a6f8075a260140c2bb7913f654ab2f755113e2 Mon Sep 17 00:00:00 2001 From: takatost Date: Tue, 25 Jun 2024 19:07:09 +0800 Subject: [PATCH 59/65] fix _invoke_error_mapping of tongyi tts --- .../model_providers/tongyi/_common.py | 36 +++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/api/core/model_runtime/model_providers/tongyi/_common.py b/api/core/model_runtime/model_providers/tongyi/_common.py index dfc010266676df..fab18b41fd0487 100644 --- a/api/core/model_runtime/model_providers/tongyi/_common.py +++ b/api/core/model_runtime/model_providers/tongyi/_common.py @@ -1,4 +1,20 @@ -from core.model_runtime.errors.invoke import InvokeError +from dashscope.common.error import ( + AuthenticationError, + InvalidParameter, + RequestFailure, + ServiceUnavailableError, + UnsupportedHTTPMethod, + UnsupportedModel, +) + +from core.model_runtime.errors.invoke import ( + InvokeAuthorizationError, + InvokeBadRequestError, + InvokeConnectionError, + InvokeError, + InvokeRateLimitError, + InvokeServerUnavailableError, +) class _CommonTongyi: @@ -20,4 +36,20 @@ def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]] :return: Invoke error mapping """ - pass + return { + InvokeConnectionError: [ + RequestFailure, + ], + InvokeServerUnavailableError: [ + ServiceUnavailableError, + ], + InvokeRateLimitError: [], + InvokeAuthorizationError: [ + AuthenticationError, + ], + InvokeBadRequestError: [ + InvalidParameter, + UnsupportedModel, + UnsupportedHTTPMethod, + ] + } From 568b4d47913f5fd3dbc449bed69d3e31a5f5ac8e Mon Sep 17 00:00:00 2001 From: takatost Date: Tue, 25 Jun 2024 19:49:09 +0800 Subject: [PATCH 60/65] fix --- .../easy_ui_based_generate_task_pipeline.py | 13 +++++++------ .../app/task_pipeline/workflow_cycle_manage.py | 13 +++++++------ api/core/moderation/input_moderation.py | 17 +++++++++-------- 3 files changed, 23 insertions(+), 20 deletions(-) diff --git a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py index cc7f27c7d74f67..7d16d015bfcd41 100644 --- a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py +++ b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py @@ -309,13 +309,14 @@ def _save_message( db.session.commit() - trace_manager.add_trace_task( - TraceTask( - TraceTaskName.MESSAGE_TRACE, - conversation_id=self._conversation.id, - message_id=self._message.id + if trace_manager: + trace_manager.add_trace_task( + TraceTask( + TraceTaskName.MESSAGE_TRACE, + conversation_id=self._conversation.id, + message_id=self._message.id + ) ) - ) message_was_created.send( self._message, diff --git a/api/core/app/task_pipeline/workflow_cycle_manage.py b/api/core/app/task_pipeline/workflow_cycle_manage.py index 2f937b14420d8e..e79ac05a752e4e 100644 --- a/api/core/app/task_pipeline/workflow_cycle_manage.py +++ b/api/core/app/task_pipeline/workflow_cycle_manage.py @@ -125,13 +125,14 @@ def _workflow_run_success( db.session.refresh(workflow_run) db.session.close() - trace_manager.add_trace_task( - TraceTask( - TraceTaskName.WORKFLOW_TRACE, - workflow_run=workflow_run, - conversation_id=conversation_id, + if trace_manager: + trace_manager.add_trace_task( + TraceTask( + TraceTaskName.WORKFLOW_TRACE, + workflow_run=workflow_run, + conversation_id=conversation_id, + ) ) - ) return workflow_run diff --git a/api/core/moderation/input_moderation.py b/api/core/moderation/input_moderation.py index e2fda4ace4aa8f..3482d5c5cfd334 100644 --- a/api/core/moderation/input_moderation.py +++ b/api/core/moderation/input_moderation.py @@ -47,15 +47,16 @@ def check( with measure_time() as timer: moderation_result = moderation_factory.moderation_for_inputs(inputs, query) - trace_manager.add_trace_task( - TraceTask( - TraceTaskName.MODERATION_TRACE, - message_id=message_id, - moderation_result=moderation_result, - inputs=inputs, - timer=timer + if trace_manager: + trace_manager.add_trace_task( + TraceTask( + TraceTaskName.MODERATION_TRACE, + message_id=message_id, + moderation_result=moderation_result, + inputs=inputs, + timer=timer + ) ) - ) if not moderation_result.flagged: return False, inputs, query From a0715291e626cfec4051f6a95f3076a16b343429 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 19:59:16 +0800 Subject: [PATCH 61/65] feat: optimize file_list --- api/core/ops/langfuse_trace/langfuse_trace.py | 9 ++++----- api/core/ops/ops_trace_manager.py | 11 +++++++---- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py index 63416a11759a23..f4e766d9144e2f 100644 --- a/api/core/ops/langfuse_trace/langfuse_trace.py +++ b/api/core/ops/langfuse_trace/langfuse_trace.py @@ -28,7 +28,6 @@ ) from core.ops.utils import filter_none_values from extensions.ext_database import db -from models.model import MessageFile from models.workflow import WorkflowNodeExecution logger = logging.getLogger(__name__) @@ -136,9 +135,6 @@ def message_trace( ): # get message file data file_list = trace_info.file_list - message_file_data: MessageFile = trace_info.message_file_data - file_url = f"{self.file_base_url}/{message_file_data.url}" if message_file_data else "" - file_list.append(file_url) metadata = trace_info.metadata message_data = trace_info.message_data message_id = message_data.id @@ -161,7 +157,10 @@ def message_trace( metadata=metadata, session_id=message_data.conversation_id, tags=["message", str(trace_info.conversation_mode)], - version=None, release=None, public=None, ) + version=None, + release=None, + public=None, + ) self.add_trace(langfuse_trace_data=trace_data) # start add span diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index 1f021583f286ac..5fdb2b3b23d2c1 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -395,9 +395,12 @@ def message_trace(self, message_id): # get message file data message_file_data = db.session.query(MessageFile).filter_by(message_id=message_id).first() - file_url = f"{self.file_base_url}/{message_file_data.url}" if message_file_data else "" - file_list = inputs[0].get("files", []) - file_list.append(file_url) + file_list = [] + if message_file_data.url is not None: + file_url = f"{self.file_base_url}/{message_file_data.url}" if message_file_data else "" + file_list.append(file_url) + else: + file_list.append(str(message_file_data.upload_file_id)) metadata = { "conversation_id": message_data.conversation_id, @@ -422,7 +425,7 @@ def message_trace(self, message_id): error=message_data.error if message_data.error else "", inputs=inputs, outputs=message_data.answer, - file_list=message_data.message[0].get("files", []), + file_list=file_list, start_time=created_at, end_time=created_at + timedelta(seconds=message_data.provider_response_latency), metadata=metadata, From 10cd08eaf21787cebc5cc51a1bab3d1585696fbd Mon Sep 17 00:00:00 2001 From: takatost Date: Tue, 25 Jun 2024 20:02:36 +0800 Subject: [PATCH 62/65] optimize --- api/core/ops/ops_trace_manager.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index 5fdb2b3b23d2c1..f5aa5faa7d1434 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -35,14 +35,14 @@ provider_config_map = { TracingProviderEnum.LANGFUSE.value: { 'config_class': LangfuseConfig, - 'secret_keys': ('public_key', 'secret_key'), - 'other_keys': ('host',), + 'secret_keys': ['public_key', 'secret_key'], + 'other_keys': ['host'], 'trace_instance': LangFuseDataTrace }, TracingProviderEnum.LANGSMITH.value: { 'config_class': LangSmithConfig, - 'secret_keys': ('api_key',), - 'other_keys': ('project', 'endpoint'), + 'secret_keys': ['api_key'], + 'other_keys': ['project', 'endpoint'], 'trace_instance': LangSmithDataTrace } } @@ -657,8 +657,3 @@ def process_queue(self, flask_app: Flask, trace_instance: BaseTraceInstance): def add_trace_task(self, trace_task: TraceTask): self.queue.put(trace_task) - - -if __name__ == '__main__': - print(provider_config_map.keys()) - print(type(provider_config_map.keys())) From 8bf2c384a6910427731c2fd3cb66f1c9b8c14f03 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Tue, 25 Jun 2024 22:04:34 +0800 Subject: [PATCH 63/65] feat: update workflow trace --- api/core/ops/entities/trace_entity.py | 2 + api/core/ops/langfuse_trace/langfuse_trace.py | 29 ++++++++++---- .../ops/langsmith_trace/langsmith_trace.py | 35 +++++++++++++++-- api/core/ops/ops_trace_manager.py | 39 ++++++++++++++++--- 4 files changed, 88 insertions(+), 17 deletions(-) diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py index b14a2595bb2bbc..b615f21e6c99ff 100644 --- a/api/core/ops/entities/trace_entity.py +++ b/api/core/ops/entities/trace_entity.py @@ -25,6 +25,7 @@ def ensure_type(cls, v): class WorkflowTraceInfo(BaseTraceInfo): workflow_data: Any conversation_id: Optional[str] = None + workflow_app_log_id: Optional[str] = None workflow_id: str tenant_id: str workflow_run_id: str @@ -70,6 +71,7 @@ class SuggestedQuestionTraceInfo(BaseTraceInfo): suggested_question: list[str] level: str status_message: Optional[str] = None + workflow_run_id: Optional[str] = None model_config = ConfigDict(protected_namespaces=()) diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py index f4e766d9144e2f..2e1b6cd0bc17c4 100644 --- a/api/core/ops/langfuse_trace/langfuse_trace.py +++ b/api/core/ops/langfuse_trace/langfuse_trace.py @@ -28,6 +28,7 @@ ) from core.ops.utils import filter_none_values from extensions.ext_database import db +from models.model import EndUser from models.workflow import WorkflowNodeExecution logger = logging.getLogger(__name__) @@ -63,17 +64,24 @@ def trace(self, trace_info: BaseTraceInfo): self.generate_name_trace(trace_info) def workflow_trace(self, trace_info: WorkflowTraceInfo): + name = f"workflow_{trace_info.workflow_run_id}" + trace_id = trace_info.workflow_run_id + if trace_info.message_id: + name = f"message_{trace_info.message_id}" + trace_id = trace_info.message_id + elif trace_info.workflow_app_log_id: + name = f"workflow_{trace_info.workflow_app_log_id}" + trace_id = trace_info.workflow_app_log_id trace_data = LangfuseTrace( - id=trace_info.workflow_run_id, - name=f"workflow_{trace_info.workflow_run_id}", + id=trace_id, user_id=trace_info.tenant_id, - input=trace_info.query, + name=name, + input=trace_info.workflow_run_inputs, output=trace_info.workflow_run_outputs, metadata=trace_info.metadata, - session_id=trace_info.conversation_id if trace_info.conversation_id else trace_info.workflow_run_id, + session_id=trace_info.conversation_id, tags=["workflow"], ) - self.add_trace(langfuse_trace_data=trace_data) # through workflow_run_id get all_nodes_execution @@ -120,7 +128,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): name=f"{node_name}_{node_execution_id}", input=inputs, output=outputs, - trace_id=trace_info.workflow_run_id, + trace_id=trace_id, start_time=created_at, end_time=finished_at, metadata=metadata, @@ -139,9 +147,16 @@ def message_trace( message_data = trace_info.message_data message_id = message_data.id + user_id = message_data.from_account_id + if message_data.from_end_user_id: + end_user_data: EndUser = db.session.query(EndUser).filter( + EndUser.id == message_data.from_end_user_id + ).first().session_id + user_id = end_user_data.session_id + trace_data = LangfuseTrace( id=message_id, - user_id=message_data.from_end_user_id if message_data.from_end_user_id else message_data.from_account_id, + user_id=user_id, name=f"message_{message_id}", input={ "message": trace_info.inputs, diff --git a/api/core/ops/langsmith_trace/langsmith_trace.py b/api/core/ops/langsmith_trace/langsmith_trace.py index 98cd9f47ec3033..422830fb1e4df4 100644 --- a/api/core/ops/langsmith_trace/langsmith_trace.py +++ b/api/core/ops/langsmith_trace/langsmith_trace.py @@ -24,7 +24,7 @@ ) from core.ops.utils import filter_none_values from extensions.ext_database import db -from models.model import MessageFile +from models.model import EndUser, MessageFile from models.workflow import WorkflowNodeExecution logger = logging.getLogger(__name__) @@ -61,11 +61,28 @@ def trace(self, trace_info: BaseTraceInfo): self.generate_name_trace(trace_info) def workflow_trace(self, trace_info: WorkflowTraceInfo): + if trace_info.message_id: + message_run = LangSmithRunModel( + id=trace_info.message_id, + name=f"message_{trace_info.message_id}", + inputs=trace_info.workflow_run_inputs, + outputs=trace_info.workflow_run_outputs, + run_type=LangSmithRunType.chain, + start_time=trace_info.start_time, + end_time=trace_info.end_time, + extra={ + "metadata": trace_info.metadata, + }, + tags=["message"], + error=trace_info.error + ) + self.add_run(message_run) + langsmith_run = LangSmithRunModel( file_list=trace_info.file_list, total_tokens=trace_info.total_tokens, - id=trace_info.workflow_run_id, - name=f"workflow_run_{trace_info.workflow_run_id}", + id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id, + name=f"workflow_{trace_info.workflow_app_log_id}" if trace_info.workflow_app_log_id else f"workflow_{trace_info.workflow_run_id}", inputs=trace_info.workflow_run_inputs, run_type=LangSmithRunType.tool, start_time=trace_info.workflow_data.created_at, @@ -76,6 +93,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): }, error=trace_info.error, tags=["workflow"], + parent_run_id=trace_info.message_id if trace_info.message_id else None, ) self.add_run(langsmith_run) @@ -146,7 +164,7 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): extra={ "metadata": metadata, }, - parent_run_id=trace_info.workflow_run_id, + parent_run_id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id, tags=["node_execution"], ) @@ -162,6 +180,15 @@ def message_trace(self, trace_info: MessageTraceInfo): message_data = trace_info.message_data message_id = message_data.id + user_id = message_data.from_account_id + if message_data.from_end_user_id: + end_user_data: EndUser = db.session.query(EndUser).filter( + EndUser.id == message_data.from_end_user_id + ).first().session_id + end_user_id = end_user_data.session_id + metadata["end_user_id"] = end_user_id + metadata["user_id"] = user_id + message_run = LangSmithRunModel( input_tokens=trace_info.message_tokens, output_tokens=trace_info.answer_tokens, diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index f5aa5faa7d1434..9e4f8ec13af2d6 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -30,7 +30,7 @@ from core.ops.utils import get_message_data from extensions.ext_database import db from models.model import App, AppModelConfig, Conversation, Message, MessageAgentThought, MessageFile, TraceAppConfig -from models.workflow import WorkflowRun +from models.workflow import WorkflowAppLog, WorkflowRun provider_config_map = { TracingProviderEnum.LANGFUSE.value: { @@ -351,6 +351,13 @@ def workflow_trace(self, workflow_run: WorkflowRun, conversation_id): file_list = workflow_run_inputs.get("sys.file") if workflow_run_inputs.get("sys.file") else [] query = workflow_run_inputs.get("query") or workflow_run_inputs.get("sys.query") or "" + # get workflow_app_log_id + workflow_app_log_data = db.session.query(WorkflowAppLog).filter_by(workflow_run_id=workflow_run.id).first() + workflow_app_log_id = str(workflow_app_log_data.id) if workflow_app_log_data else None + # get message_id + message_data = db.session.query(Message.id).filter_by(workflow_run_id=workflow_run_id).first() + message_id = str(message_data.id) if message_data else None + metadata = { "workflow_id": workflow_id, "conversation_id": conversation_id, @@ -380,6 +387,10 @@ def workflow_trace(self, workflow_run: WorkflowRun, conversation_id): file_list=file_list, query=query, metadata=metadata, + workflow_app_log_id=workflow_app_log_id, + message_id=message_id, + start_time=workflow_run.created_at, + end_time=workflow_run.finished_at, ) return workflow_trace_info @@ -396,11 +407,9 @@ def message_trace(self, message_id): # get message file data message_file_data = db.session.query(MessageFile).filter_by(message_id=message_id).first() file_list = [] - if message_file_data.url is not None: + if message_file_data and message_file_data.url is not None: file_url = f"{self.file_base_url}/{message_file_data.url}" if message_file_data else "" file_list.append(file_url) - else: - file_list.append(str(message_file_data.upload_file_id)) metadata = { "conversation_id": message_data.conversation_id, @@ -447,8 +456,17 @@ def moderation_trace(self, message_id, timer, **kwargs): "preset_response": moderation_result.preset_response, "query": moderation_result.query, } + + # get workflow_app_log_id + workflow_app_log_id = None + if message_data.workflow_run_id: + workflow_app_log_data = db.session.query(WorkflowAppLog).filter_by( + workflow_run_id=message_data.workflow_run_id + ).first() + workflow_app_log_id = str(workflow_app_log_data.id) if workflow_app_log_data else None + moderation_trace_info = ModerationTraceInfo( - message_id=message_id, + message_id=workflow_app_log_id if workflow_app_log_id else message_id, inputs=inputs, message_data=message_data, flagged=moderation_result.flagged, @@ -478,8 +496,17 @@ def suggested_question_trace(self, message_id, timer, **kwargs): "workflow_run_id": message_data.workflow_run_id, "from_source": message_data.from_source, } + + # get workflow_app_log_id + workflow_app_log_id = None + if message_data.workflow_run_id: + workflow_app_log_data = db.session.query(WorkflowAppLog).filter_by( + workflow_run_id=message_data.workflow_run_id + ).first() + workflow_app_log_id = str(workflow_app_log_data.id) if workflow_app_log_data else None + suggested_question_trace_info = SuggestedQuestionTraceInfo( - message_id=message_id, + message_id=workflow_app_log_id if workflow_app_log_id else message_id, message_data=message_data, inputs=message_data.message, outputs=message_data.answer, From da93485a5ea312802e0f0eefc510743479bdbfc7 Mon Sep 17 00:00:00 2001 From: Joe <1264204425@qq.com> Date: Wed, 26 Jun 2024 12:48:34 +0800 Subject: [PATCH 64/65] feat: update langfuse trace structure --- .../entities/langfuse_trace_entity.py | 4 + api/core/ops/langfuse_trace/langfuse_trace.py | 91 +++++++++++++------ api/core/ops/ops_trace_manager.py | 1 + 3 files changed, 68 insertions(+), 28 deletions(-) diff --git a/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py b/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py index 6b4c01e5712b92..b90c05f4cbc605 100644 --- a/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py +++ b/api/core/ops/langfuse_trace/entities/langfuse_trace_entity.py @@ -163,6 +163,10 @@ class LangfuseSpan(BaseModel): description="The version of the span type. Used to understand how changes to the span type affect metrics. " "Useful in debugging.", ) + parent_observation_id: Optional[str] = Field( + default=None, + description="The id of the observation the span belongs to. Used to link spans to observations.", + ) @field_validator("input", "output") def ensure_dict(cls, v, info: ValidationInfo): diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py index 2e1b6cd0bc17c4..05d34c5527b0a3 100644 --- a/api/core/ops/langfuse_trace/langfuse_trace.py +++ b/api/core/ops/langfuse_trace/langfuse_trace.py @@ -64,25 +64,46 @@ def trace(self, trace_info: BaseTraceInfo): self.generate_name_trace(trace_info) def workflow_trace(self, trace_info: WorkflowTraceInfo): - name = f"workflow_{trace_info.workflow_run_id}" - trace_id = trace_info.workflow_run_id + trace_id = trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id if trace_info.message_id: - name = f"message_{trace_info.message_id}" trace_id = trace_info.message_id - elif trace_info.workflow_app_log_id: - name = f"workflow_{trace_info.workflow_app_log_id}" - trace_id = trace_info.workflow_app_log_id - trace_data = LangfuseTrace( - id=trace_id, - user_id=trace_info.tenant_id, - name=name, - input=trace_info.workflow_run_inputs, - output=trace_info.workflow_run_outputs, - metadata=trace_info.metadata, - session_id=trace_info.conversation_id, - tags=["workflow"], - ) - self.add_trace(langfuse_trace_data=trace_data) + name = f"message_{trace_info.message_id}" + trace_data = LangfuseTrace( + id=trace_info.message_id, + user_id=trace_info.tenant_id, + name=name, + input=trace_info.workflow_run_inputs, + output=trace_info.workflow_run_outputs, + metadata=trace_info.metadata, + session_id=trace_info.conversation_id, + tags=["message", "workflow"], + ) + self.add_trace(langfuse_trace_data=trace_data) + workflow_span_data = LangfuseSpan( + id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id, + name=f"workflow_{trace_info.workflow_app_log_id}" if trace_info.workflow_app_log_id else f"workflow_{trace_info.workflow_run_id}", + input=trace_info.workflow_run_inputs, + output=trace_info.workflow_run_outputs, + trace_id=trace_id, + start_time=trace_info.start_time, + end_time=trace_info.end_time, + metadata=trace_info.metadata, + level=LevelEnum.DEFAULT if trace_info.error == "" else LevelEnum.ERROR, + status_message=trace_info.error if trace_info.error else "", + ) + self.add_span(langfuse_span_data=workflow_span_data) + else: + trace_data = LangfuseTrace( + id=trace_id, + user_id=trace_info.tenant_id, + name=f"workflow_{trace_info.workflow_app_log_id}" if trace_info.workflow_app_log_id else f"workflow_{trace_info.workflow_run_id}", + input=trace_info.workflow_run_inputs, + output=trace_info.workflow_run_outputs, + metadata=trace_info.metadata, + session_id=trace_info.conversation_id, + tags=["workflow"], + ) + self.add_trace(langfuse_trace_data=trace_data) # through workflow_run_id get all_nodes_execution workflow_nodes_executions = ( @@ -124,17 +145,31 @@ def workflow_trace(self, trace_info: WorkflowTraceInfo): ) # add span - span_data = LangfuseSpan( - name=f"{node_name}_{node_execution_id}", - input=inputs, - output=outputs, - trace_id=trace_id, - start_time=created_at, - end_time=finished_at, - metadata=metadata, - level=LevelEnum.DEFAULT if status == 'succeeded' else LevelEnum.ERROR, - status_message=trace_info.error if trace_info.error else "", - ) + if trace_info.message_id: + span_data = LangfuseSpan( + name=f"{node_name}_{node_execution_id}", + input=inputs, + output=outputs, + trace_id=trace_id, + start_time=created_at, + end_time=finished_at, + metadata=metadata, + level=LevelEnum.DEFAULT if status == 'succeeded' else LevelEnum.ERROR, + status_message=trace_info.error if trace_info.error else "", + parent_observation_id=trace_info.workflow_app_log_id if trace_info.workflow_app_log_id else trace_info.workflow_run_id, + ) + else: + span_data = LangfuseSpan( + name=f"{node_name}_{node_execution_id}", + input=inputs, + output=outputs, + trace_id=trace_id, + start_time=created_at, + end_time=finished_at, + metadata=metadata, + level=LevelEnum.DEFAULT if status == 'succeeded' else LevelEnum.ERROR, + status_message=trace_info.error if trace_info.error else "", + ) self.add_span(langfuse_span_data=span_data) diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index 9e4f8ec13af2d6..00750ab81f6302 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -421,6 +421,7 @@ def message_trace(self, message_id): "agent_based": message_data.agent_based, "workflow_run_id": message_data.workflow_run_id, "from_source": message_data.from_source, + "message_id": message_id, } message_tokens = message_data.message_tokens From 6a9d4f7728f4bddcf385a0f63b940c5e3fecbbe3 Mon Sep 17 00:00:00 2001 From: takatost Date: Wed, 26 Jun 2024 16:16:17 +0800 Subject: [PATCH 65/65] remove requirements.txt --- api/requirements.txt | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 api/requirements.txt diff --git a/api/requirements.txt b/api/requirements.txt deleted file mode 100644 index e69de29bb2d1d6..00000000000000