Skip to content

Commit

Permalink
Add InsightJob
Browse files Browse the repository at this point in the history
Create InsightJob to process insights asynchronously, triggered from an
after_commit callback on Insight to queue processing.

Add client and models Insight class methods model which calls out to
Ollama.

Add prompt generation substituting in the initial request body in place
of a template variable.
  • Loading branch information
gbp committed Nov 20, 2024
1 parent bda3486 commit be95394
Show file tree
Hide file tree
Showing 4 changed files with 101 additions and 0 deletions.
30 changes: 30 additions & 0 deletions app/jobs/insight_job.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
##
# InsightJob is responsible for generating InfoRequest insights using an AI
# model run via Ollama.
#
class InsightJob < ApplicationJob
queue_as :insights

delegate :model, :temperature, :prompt, to: :@insight

def perform(insight)
@insight = insight

insight.update(output: results.first)
end

private

def results
client.generate(
{ model: model, prompt: prompt, temperature: temperature, stream: false }
)
end

def client
Ollama.new(
credentials: { address: ENV['OLLAMA_URL'] },
options: { server_sent_events: true }
)
end
end
14 changes: 14 additions & 0 deletions app/models/insight.rb
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
# updated_at :datetime not null
#
class Insight < ApplicationRecord
after_commit :queue, on: :create

belongs_to :info_request, optional: false
has_many :outgoing_messages, through: :info_request

Expand All @@ -21,4 +23,16 @@ class Insight < ApplicationRecord
validates :model, presence: true
validates :temperature, presence: true
validates :template, presence: true

def prompt
template.gsub('[initial_request]') do
outgoing_messages.first.body[0...500]
end
end

private

def queue
InsightJob.perform_later(self)
end
end
34 changes: 34 additions & 0 deletions spec/jobs/insight_job_spec.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
require 'spec_helper'

RSpec.describe InsightJob, type: :job do
let(:insight) do
FactoryBot.build('insight', model: 'gpt-3.5-turbo', temperature: 0.7)
end

let(:client) { instance_double('Ollama::Controllers::Client') }

let(:job) { InsightJob.new }

before do
allow(job).to receive(:client).and_return(client)
allow(insight).to receive(:prompt).and_return('Test prompt')
allow(insight).to receive(:update)
end

describe '#perform' do
it 'updates the insight with the generated output' do
expect(client).to receive(:generate).with(
hash_including(
model: 'gpt-3.5-turbo',
temperature: 0.7,
prompt: 'Test prompt',
stream: false
)
).and_return(['Generated output'])

expect(insight).to receive(:update).with(output: 'Generated output')

job.perform(insight)
end
end
end
23 changes: 23 additions & 0 deletions spec/models/insight_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -52,4 +52,27 @@
expect(insight).not_to be_valid
end
end

describe 'callbacks' do
it 'queues InsightJob after create' do
expect(InsightJob).to receive(:perform_later)
FactoryBot.create(:insight)
end
end

describe '#prompt' do
it 'replaces [initial_request] with first outgoing message body' do
outgoing_message = instance_double(
OutgoingMessage, body: 'message content'
)
insight = FactoryBot.build(
:insight, template: 'Template with [initial_request]'
)

allow(insight).to receive(:outgoing_messages).
and_return([outgoing_message])

expect(insight.prompt).to eq('Template with message content')
end
end
end

0 comments on commit be95394

Please sign in to comment.