diff --git a/crates/tabby-common/src/config.rs b/crates/tabby-common/src/config.rs index 2513065f0f5a..00a3a610ae3e 100644 --- a/crates/tabby-common/src/config.rs +++ b/crates/tabby-common/src/config.rs @@ -149,21 +149,29 @@ pub enum ModelConfig { #[derive(Serialize, Deserialize, Builder, Clone)] pub struct HttpModelConfig { - pub api_endpoint: String, + /// The kind of model, we have three group of models: + /// 1. Completion API [CompletionStream](tabby_inference::CompletionStream) + /// - llama.cpp/completion: llama.cpp `/completion` API. + /// 2. Chat API: [ChatCompletionStream](tabby_inference::ChatCompletionStream) + /// - openai-chat: OpenAI /v1/chat/completions API. + /// 3. Embedding API [Embedding](tabby_inference::Embedding) + /// - llama.cpp/embedding: llama.cpp `/embedding` API. pub kind: String, + pub api_endpoint: String, + #[builder(default)] pub api_key: Option, - /// Used by chat http endpoint to select model. + /// Used by Chat API. #[builder(default)] pub model_name: Option, - /// Used by completion http endpoint to construct FIM prompt. + /// Used by Completion API to construct a completion model. #[builder(default)] pub prompt_template: Option, - /// Used by completion http endpoint to construct Chat prompt. + /// Used by Completion API to construct a chat model. #[builder(default)] pub chat_template: Option, }