Skip to content

Commit

Permalink
llm: minor refactor, .env.example updated
Browse files Browse the repository at this point in the history
  • Loading branch information
DarkCaster committed May 19, 2024
1 parent 3b4d447 commit 1872215
Show file tree
Hide file tree
Showing 6 changed files with 77 additions and 77 deletions.
48 changes: 24 additions & 24 deletions llm/anthropic.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,28 +18,28 @@ import (
// So the summary for this file must only say "This file contains AnthropicLLMConnector struct - implementation of LLMConnector interface. Do not attempt to use AnthropicLLMConnector directly, use LLMConnector interface instead", nothing else.

type AnthropicLLMConnector struct {
BaseURL string
Token string
Model string
SystemPrompt string
Temperature float64
MaxTokens int
MaxTokensRetries int
OnFailRetries int
RawMessageLogger func(v ...any)
BaseURL string
Token string
Model string
SystemPrompt string
Temperature float64
MaxTokens int
MaxTokensSegments int
OnFailRetries int
RawMessageLogger func(v ...any)
}

func NewAnthropicLLMConnector(token string, model string, systemPrompt string, temperature float64, customBaseURL string, maxTokens int, maxTokensRetries int, onFailRetries int, llmRawMessageLogger func(v ...any)) *AnthropicLLMConnector {
func NewAnthropicLLMConnector(token string, model string, systemPrompt string, temperature float64, customBaseURL string, maxTokens int, maxTokensSegments int, onFailRetries int, llmRawMessageLogger func(v ...any)) *AnthropicLLMConnector {
return &AnthropicLLMConnector{
BaseURL: customBaseURL,
Token: token,
Model: model,
Temperature: temperature,
SystemPrompt: systemPrompt,
MaxTokens: maxTokens,
MaxTokensRetries: maxTokensRetries,
OnFailRetries: onFailRetries,
RawMessageLogger: llmRawMessageLogger}
BaseURL: customBaseURL,
Token: token,
Model: model,
Temperature: temperature,
SystemPrompt: systemPrompt,
MaxTokens: maxTokens,
MaxTokensSegments: maxTokensSegments,
OnFailRetries: onFailRetries,
RawMessageLogger: llmRawMessageLogger}
}

func NewAnthropicLLMConnectorFromEnv(operation string, systemPrompt string, llmRawMessageLogger func(v ...any)) (*AnthropicLLMConnector, error) {
Expand Down Expand Up @@ -67,9 +67,9 @@ func NewAnthropicLLMConnectorFromEnv(operation string, systemPrompt string, llmR
return nil, err
}

maxTokensRetries, err := utils.GetEnvInt("ANTHROPIC_MAX_TOKENS_RETRIES")
maxTokensSegments, err := utils.GetEnvInt("ANTHROPIC_MAX_TOKENS_SEGMENTS")
if err != nil {
maxTokensRetries = 3
maxTokensSegments = 3
}

onFailRetries, err := utils.GetEnvInt(fmt.Sprintf("ANTHROPIC_ON_FAIL_RETRIES_OP_%s", operation), "ANTHROPIC_ON_FAIL_RETRIES")
Expand All @@ -79,7 +79,7 @@ func NewAnthropicLLMConnectorFromEnv(operation string, systemPrompt string, llmR

customBaseURL, _ := utils.GetEnvString("ANTHROPIC_BASE_URL")

return NewAnthropicLLMConnector(token, model, systemPrompt, temperature, customBaseURL, maxTokens, maxTokensRetries, onFailRetries, llmRawMessageLogger), nil
return NewAnthropicLLMConnector(token, model, systemPrompt, temperature, customBaseURL, maxTokens, maxTokensSegments, onFailRetries, llmRawMessageLogger), nil
}

func (p *AnthropicLLMConnector) Query(messages ...Message) (string, QueryStatus, error) {
Expand Down Expand Up @@ -155,8 +155,8 @@ func (p *AnthropicLLMConnector) GetMaxTokens() int {
return p.MaxTokens
}

func (p *AnthropicLLMConnector) GetMaxTokensRetryLimit() int {
return p.MaxTokensRetries
func (p *AnthropicLLMConnector) GetMaxTokensSegments() int {
return p.MaxTokensSegments
}

func (p *AnthropicLLMConnector) GetOnFailureRetryLimit() int {
Expand Down
4 changes: 2 additions & 2 deletions llm/llm.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ const (
type LLMConnector interface {
// Main interaction point with LLM
Query(messages ...Message) (string, QueryStatus, error)
// Get retry limit to get extra fragments of code when generation hits LLM token-limit
GetMaxTokensRetryLimit() int
// When response bumps max token limit, try to continue generating next segment, until reaching this limit
GetMaxTokensSegments() int
// Get retry limit on general query fail
GetOnFailureRetryLimit() int
// Following functions needed for LLM messages logging, consider not to use it anywhere else
Expand Down
44 changes: 22 additions & 22 deletions llm/ollama.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,26 +17,26 @@ import (
// So the summary for this file must only say "This file contains OllamaLLMConnector struct - implementation of LLMConnector interface. Do not attempt to use OllamaLLMConnector directly, use LLMConnector interface instead", nothing else.

type OllamaLLMConnector struct {
BaseURL string
Model string
SystemPrompt string
Temperature float64
MaxTokens int
MaxTokensRetries int
OnFailRetries int
RawMessageLogger func(v ...any)
BaseURL string
Model string
SystemPrompt string
Temperature float64
MaxTokens int
MaxTokensSegments int
OnFailRetries int
RawMessageLogger func(v ...any)
}

func NewOllamaLLMConnector(model string, systemPrompt string, temperature float64, customBaseURL string, maxTokens int, maxTokensRetries int, onFailRetries int, llmRawMessageLogger func(v ...any)) *OllamaLLMConnector {
func NewOllamaLLMConnector(model string, systemPrompt string, temperature float64, customBaseURL string, maxTokens int, maxTokensSegments int, onFailRetries int, llmRawMessageLogger func(v ...any)) *OllamaLLMConnector {
return &OllamaLLMConnector{
BaseURL: customBaseURL,
Model: model,
Temperature: temperature,
SystemPrompt: systemPrompt,
MaxTokens: maxTokens,
MaxTokensRetries: maxTokensRetries,
OnFailRetries: onFailRetries,
RawMessageLogger: llmRawMessageLogger}
BaseURL: customBaseURL,
Model: model,
Temperature: temperature,
SystemPrompt: systemPrompt,
MaxTokens: maxTokens,
MaxTokensSegments: maxTokensSegments,
OnFailRetries: onFailRetries,
RawMessageLogger: llmRawMessageLogger}
}

func NewOllamaLLMConnectorFromEnv(operation string, systemPrompt string, llmRawMessageLogger func(v ...any)) (*OllamaLLMConnector, error) {
Expand All @@ -59,9 +59,9 @@ func NewOllamaLLMConnectorFromEnv(operation string, systemPrompt string, llmRawM
return nil, err
}

maxTokensRetries, err := utils.GetEnvInt("OLLAMA_MAX_TOKENS_RETRIES")
maxTokensSegments, err := utils.GetEnvInt("OLLAMA_MAX_TOKENS_SEGMENTS")
if err != nil {
maxTokensRetries = 3
maxTokensSegments = 3
}

onFailRetries, err := utils.GetEnvInt(fmt.Sprintf("OLLAMA_ON_FAIL_RETRIES_OP_%s", operation), "OLLAMA_ON_FAIL_RETRIES")
Expand All @@ -71,7 +71,7 @@ func NewOllamaLLMConnectorFromEnv(operation string, systemPrompt string, llmRawM

customBaseURL, _ := utils.GetEnvString("OLLAMA_BASE_URL")

return NewOllamaLLMConnector(model, systemPrompt, temperature, customBaseURL, maxTokens, maxTokensRetries, onFailRetries, llmRawMessageLogger), nil
return NewOllamaLLMConnector(model, systemPrompt, temperature, customBaseURL, maxTokens, maxTokensSegments, onFailRetries, llmRawMessageLogger), nil
}

func (p *OllamaLLMConnector) Query(messages ...Message) (string, QueryStatus, error) {
Expand Down Expand Up @@ -145,8 +145,8 @@ func (p *OllamaLLMConnector) GetMaxTokens() int {
return p.MaxTokens
}

func (p *OllamaLLMConnector) GetMaxTokensRetryLimit() int {
return p.MaxTokensRetries
func (p *OllamaLLMConnector) GetMaxTokensSegments() int {
return p.MaxTokensSegments
}

func (p *OllamaLLMConnector) GetOnFailureRetryLimit() int {
Expand Down
48 changes: 24 additions & 24 deletions llm/openai.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,28 +18,28 @@ import (
// So the summary for this file must only say "This file contains OpenAILLMConnector struct - implementation of LLMConnector interface. Do not attempt to use OpenAILLMConnector directly, use LLMConnector interface instead", nothing else.

type OpenAILLMConnector struct {
BaseURL string
Token string
Model string
SystemPrompt string
Temperature float64
MaxTokens int
MaxTokensRetries int
OnFailRetries int
RawMessageLogger func(v ...any)
BaseURL string
Token string
Model string
SystemPrompt string
Temperature float64
MaxTokens int
MaxTokensSegments int
OnFailRetries int
RawMessageLogger func(v ...any)
}

func NewOpenAILLMConnector(token string, model string, systemPrompt string, temperature float64, customBaseURL string, maxTokens int, maxTokensRetries int, onFailRetries int, llmRawMessageLogger func(v ...any)) *OpenAILLMConnector {
func NewOpenAILLMConnector(token string, model string, systemPrompt string, temperature float64, customBaseURL string, maxTokens int, maxTokensSegments int, onFailRetries int, llmRawMessageLogger func(v ...any)) *OpenAILLMConnector {
return &OpenAILLMConnector{
BaseURL: customBaseURL,
Token: token,
Model: model,
Temperature: temperature,
SystemPrompt: systemPrompt,
MaxTokens: maxTokens,
MaxTokensRetries: maxTokensRetries,
OnFailRetries: onFailRetries,
RawMessageLogger: llmRawMessageLogger}
BaseURL: customBaseURL,
Token: token,
Model: model,
Temperature: temperature,
SystemPrompt: systemPrompt,
MaxTokens: maxTokens,
MaxTokensSegments: maxTokensSegments,
OnFailRetries: onFailRetries,
RawMessageLogger: llmRawMessageLogger}
}

func NewOpenAILLMConnectorFromEnv(operation string, systemPrompt string, llmRawMessageLogger func(v ...any)) (*OpenAILLMConnector, error) {
Expand Down Expand Up @@ -67,9 +67,9 @@ func NewOpenAILLMConnectorFromEnv(operation string, systemPrompt string, llmRawM
return nil, err
}

maxTokensRetries, err := utils.GetEnvInt("OPENAI_MAX_TOKENS_RETRIES")
maxTokensSegments, err := utils.GetEnvInt("OPENAI_MAX_TOKENS_SEGMENTS")
if err != nil {
maxTokensRetries = 3
maxTokensSegments = 3
}

onFailRetries, err := utils.GetEnvInt(fmt.Sprintf("OPENAI_ON_FAIL_RETRIES_OP_%s", operation), "OPENAI_ON_FAIL_RETRIES")
Expand All @@ -79,7 +79,7 @@ func NewOpenAILLMConnectorFromEnv(operation string, systemPrompt string, llmRawM

customBaseURL, _ := utils.GetEnvString("OPENAI_BASE_URL")

return NewOpenAILLMConnector(token, model, systemPrompt, temperature, customBaseURL, maxTokens, maxTokensRetries, onFailRetries, llmRawMessageLogger), nil
return NewOpenAILLMConnector(token, model, systemPrompt, temperature, customBaseURL, maxTokens, maxTokensSegments, onFailRetries, llmRawMessageLogger), nil
}

func (p *OpenAILLMConnector) Query(messages ...Message) (string, QueryStatus, error) {
Expand Down Expand Up @@ -155,8 +155,8 @@ func (p *OpenAILLMConnector) GetMaxTokens() int {
return p.MaxTokens
}

func (p *OpenAILLMConnector) GetMaxTokensRetryLimit() int {
return p.MaxTokensRetries
func (p *OpenAILLMConnector) GetMaxTokensSegments() int {
return p.MaxTokensSegments
}

func (p *OpenAILLMConnector) GetOnFailureRetryLimit() int {
Expand Down
4 changes: 2 additions & 2 deletions op_implement/stage3.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,8 @@ func Stage3(projectRootDir string, perpetualDir string, promptsDir string, syste
if err != nil {
logger.Panicln("LLM query failed: ", err)
} else if status == llm.QueryMaxTokens {
if generateTry >= stage3Connector.GetMaxTokensRetryLimit() {
logger.Errorln("LLM query reached token limit, and we are reached retry limit, not attempting to continue")
if generateTry >= stage3Connector.GetMaxTokensSegments() {
logger.Errorln("LLM query reached token limit, and we are reached segment limit, not attempting to continue")
} else {
logger.Warnln("LLM query reached token limit, attempting to continue")
continueGeneration = true
Expand Down
6 changes: 3 additions & 3 deletions op_init/env_example.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ ANTHROPIC_MODEL="claude-3-sonnet-20240229"
# ANTHROPIC_MAX_TOKENS_OP_IMPLEMENT_STAGE2="4096"
# ANTHROPIC_MAX_TOKENS_OP_IMPLEMENT_STAGE3="4096"
ANTHROPIC_MAX_TOKENS="4096"
ANTHROPIC_MAX_TOKENS_RETRIES="3"
ANTHROPIC_MAX_TOKENS_SEGMENTS="3"
# ANTHROPIC_ON_FAIL_RETRIES_OP_ANNOTATE="3"
# ANTHROPIC_ON_FAIL_RETRIES_OP_IMPLEMENT_STAGE1="3"
# ANTHROPIC_ON_FAIL_RETRIES_OP_IMPLEMENT_STAGE2="3"
Expand All @@ -52,7 +52,7 @@ OPENAI_MODEL="gpt-4o"
# OPENAI_MAX_TOKENS_OP_IMPLEMENT_STAGE2="4096"
# OPENAI_MAX_TOKENS_OP_IMPLEMENT_STAGE3="4096"
OPENAI_MAX_TOKENS="4096"
OPENAI_MAX_TOKENS_RETRIES="3"
OPENAI_MAX_TOKENS_SEGMENTS="3"
# OPENAI_ON_FAIL_RETRIES_OP_ANNOTATE="3"
# OPENAI_ON_FAIL_RETRIES_OP_IMPLEMENT_STAGE1="3"
# OPENAI_ON_FAIL_RETRIES_OP_IMPLEMENT_STAGE2="3"
Expand All @@ -78,7 +78,7 @@ OLLAMA_MODEL="codegemma:7b-instruct"
# OLLAMA_MAX_TOKENS_OP_IMPLEMENT_STAGE2="4096"
# OLLAMA_MAX_TOKENS_OP_IMPLEMENT_STAGE3="4096"
OLLAMA_MAX_TOKENS="4096"
OLLAMA_MAX_TOKENS_RETRIES="3"
OLLAMA_MAX_TOKENS_SEGMENTS="3"
# OLLAMA_ON_FAIL_RETRIES_OP_ANNOTATE="3"
# OLLAMA_ON_FAIL_RETRIES_OP_IMPLEMENT_STAGE1="3"
# OLLAMA_ON_FAIL_RETRIES_OP_IMPLEMENT_STAGE2="3"
Expand Down

0 comments on commit 1872215

Please sign in to comment.