Skip to content

Commit

Permalink
Update docstrings
Browse files Browse the repository at this point in the history
  • Loading branch information
Azzaare committed Oct 5, 2024
1 parent 07924d8 commit 58186c0
Show file tree
Hide file tree
Showing 6 changed files with 42 additions and 15 deletions.
26 changes: 19 additions & 7 deletions src/llm.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,9 @@ abstract type OpenAILLM <: AbstractLLM end

"""
GroqLLM
Structure encapsulating the parameters for accessing the Groq LLM API.
- `api_key`: an API key for accessing the Groq API (https://groq.com), read from the environmental variable GROQ_API_KEY.
- `model_id`: a string identifier for the model to query. See https://console.groq.com/docs/models for the list of available models.
- `url`: URL for chat completions. Defaults to "https://api.groq.com/openai/v1/chat/completions".
Expand All @@ -27,10 +29,12 @@ end

"""
Google LLM
Structure encapsulating the parameters for accessing the Google LLM API.
- `api_key`: an API key for accessing the Google Gemini API (https://ai.google.dev/gemini-api/docs/), read from the environmental variable GOOGLE_API_KEY.
- `model_id`: a string identifier for the model to query. See https://ai.google.dev/gemini-api/docs/models/gemini for the list of available models.
- `url`: URL for chat completions. Defaults to ""https://generativelanguage.googleapis.com/v1beta/models/{{model_id}}".
- `api_key`: an API key for accessing the Google Gemini API (`https://ai.google.dev/gemini-api/docs/`), read from the environmental variable `GOOGLE_API_KEY`.
- `model_id`: a string identifier for the model to query. See `https://ai.google.dev/gemini-api/docs/models/gemini` for the list of available models.
- `url`: URL for chat completions. Defaults to `https://generativelanguage.googleapis.com/v1beta/models/{{model_id}}`.
"""
struct GoogleLLM <: AbstractLLM
api_key::String
Expand All @@ -48,12 +52,14 @@ end

"""
LlamaCppLLM
Structure encapsulating the parameters for accessing the llama.cpp server API.
- `api_key`: an optional API key for accessing the server
- `model_id`: a string identifier for the model to query. Unused, kept for API compatibility.
- `url`: the URL of the llama.cpp server OpenAI API endpoint (e.g., http://localhost:8080)
NOTE: we do not apply the appropriate chat templates to the prompt.
This must be handled either in an external code path or by the server.
NOTE: we do not apply the appropriate chat templates to the prompt. This must be handled either in an external code path or by the server.
"""
struct LlamaCppLLM <: OpenAILLM
api_key::String
Expand All @@ -68,6 +74,7 @@ end

"""
get_completion(llm::OpenAILLM, prompt::Prompt)
Returns a completion for the given prompt using an OpenAI API compatible LLM
"""
function get_completion(llm::OpenAILLM, prompt::Prompt)
Expand All @@ -89,6 +96,7 @@ end

"""
get_completion(llm::GoogleLLM, prompt::Prompt)
Returns a completion for the given prompt using the Google Gemini LLM API.
"""
function get_completion(llm::GoogleLLM, prompt::Prompt)
Expand All @@ -110,6 +118,7 @@ end

"""
stream_completion(llm::OpenAILLM, prompt::Prompt)
Returns a completion for the given prompt using an OpenAI API compatible model.
The completion is streamed to the terminal as it is generated.
"""
Expand Down Expand Up @@ -166,6 +175,7 @@ end

"""
stream_completion(llm::GoogleLLM, prompt::Prompt)
Returns a completion for the given prompt using the Google Gemini LLM API.
The completion is streamed to the terminal as it is generated.
"""
Expand Down Expand Up @@ -206,6 +216,7 @@ end

"""
stream_completion(llm::AbstractLLM, prompt::AbstractPrompt)
Returns a completion for a `prompt` using the `llm` model API.
The completion is streamed to the terminal as it is generated.
"""
Expand All @@ -216,8 +227,9 @@ end

"""
get_completion(llm::AbstractLLM, prompt::AbstractPrompt)
Returns a completion for a `prompt` using the `llm` model API.
"""
function get_completion(llm::AbstractLLM, prompt::AbstractPrompt)
error("Not implemented for this LLM and/or prompt type.")
function get_completion(::AbstractLLM, ::AbstractPrompt)
return error("Not implemented for this LLM and/or prompt type.")
end
5 changes: 4 additions & 1 deletion src/parsing.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""
parse_code(s::String)
Parse the code blocks in the input string `s` delimited by triple backticks and an optional language annotation.
Returns a dictionary keyed by language. Code blocks from the same language are concatenated.
"""
Expand Down Expand Up @@ -32,7 +33,8 @@ end

"""
check_syntax_errors(s::String)
Parses the string `s` as Julia code. In the case of syntax errors, it returns the error
Parses the string `s` as Julia code. In the case of syntax errors, it returns the error
message of the parser as a string. Otherwise, it returns an empty string.
"""
function check_syntax_errors(s::String)
Expand All @@ -47,6 +49,7 @@ end

"""
edit_in_vim(s::String)
Edits the input string `s` in a temporary file using the Vim editor.
Returns the modified string after the editor is closed.
"""
Expand Down
1 change: 1 addition & 0 deletions src/prompt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ abstract type AbstractPrompt end

"""
Prompt
Simple data structure encapsulating a system prompt and a user prompt for LLM generation.
## Fields
Expand Down
10 changes: 8 additions & 2 deletions src/template.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ struct MetadataMessage <: AbstractMessage
end

"""
SystemMessage
Represents the prompt template of a system message.
The template can optionally contain string placeholders enclosed in double curly braces, e.g., `{{variable}}`.
Placeholders must be replaced with actual values when generating prompts.
Expand All @@ -33,6 +35,8 @@ struct SystemMessage <: AbstractMessage
end

"""
UserMessage
Represents the prompt template of a user message.
The template can optionally contain string placeholders enclosed in double curly braces, e.g., `{{variable}}`.
Placeholders must be replaced with actual values when generating prompts.
Expand All @@ -47,6 +51,8 @@ struct UserMessage <: AbstractMessage
end

"""
PromptTemplate
Represents a complete prompt template, comprising metadata, system, and user messages.
# Fields
Expand All @@ -63,7 +69,7 @@ end
"""
read_template(data_path::String)
Reads a prompt template from a JSON file specified by `data_path`.
Reads a prompt template from a JSON file specified by `data_path`.
The function parses the JSON data and constructs a `PromptTemplate` object containing metadata, system, and user messages.
TODO: validate the JSON data against a schema to ensure it is valid before parsing.
Expand Down Expand Up @@ -165,4 +171,4 @@ function format_template(template::PromptTemplate; kwargs...)
end

return Prompt(system, user)
end
end
12 changes: 8 additions & 4 deletions src/translate.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@ const MAX_RETRIES::Int = 3

"""
extract_structure(model <: AbstractLLM, description <: AbstractString)
Extracts the parameters, decision variables and constraints of an optimization problem
Extracts the parameters, decision variables and constraints of an optimization problem
given a natural-language `description`.
Returns a Markdown-formatted text containing the above information.
"""
Expand Down Expand Up @@ -51,8 +52,9 @@ end

"""
jumpify_model(model::AbstractLLM, description::AbstractString, examples::AbstractString)
Translates the natural language `description` of an optimization problem into a JuMP constraints
programming model to be solved with CBL by querying the Large Language Model `model`.
programming model to be solved with CBL by querying the Large Language Model `model`.
The `examples` are snippets from `ConstraintModels.jl` used as in-context examples to the LLM.
To work optimally, the model expects the `description` to be a structured Markdown-formatted
description as the ones generated by `extract_structure`.
Expand Down Expand Up @@ -126,6 +128,7 @@ end

"""
fix_syntax_errors(model::AbstractLLM, code::AbstractString, error::AbstractString)
Fixes syntax errors in the `code` by querying the Large Language Model `model`, based on
an `error` produced by the Julia parser.
Returns Markdown-formatted text containing the corrected code in a Julia code block.
Expand All @@ -141,9 +144,10 @@ end

"""
translate(model::AbstractLLM, description::AbstractString; interactive::Bool = false)
Translate the natural-language `description` of an optimization problem into
Translate the natural-language `description` of an optimization problem into
a Constraint Programming model by querying the Large Language Model `model`.
If `interactive`, the user will be prompted via the command line to inspect the
If `interactive`, the user will be prompted via the command line to inspect the
intermediate outputs of the LLM, and possibly modify them.
"""
function translate(
Expand Down
3 changes: 2 additions & 1 deletion src/utils.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""
get_package_path()
Returns the absolute path of the root directory of `ConstraintsTranslator.jl`.
"""
function get_package_path()
Expand All @@ -8,4 +9,4 @@ function get_package_path()
error("The path of the package could not be found. This should never happen!")
end
return package_path
end
end

0 comments on commit 58186c0

Please sign in to comment.