Restructure omni services and add Chatwoot research snapshot
This commit is contained in:
@@ -0,0 +1,147 @@
|
||||
require 'rails_helper'
|
||||
|
||||
RSpec.describe Captain::Llm::AssistantChatService do
|
||||
let(:account) { create(:account) }
|
||||
let(:assistant) { create(:captain_assistant, account: account) }
|
||||
let(:conversation) { create(:conversation, account: account) }
|
||||
|
||||
let(:mock_chat) { instance_double(RubyLLM::Chat) }
|
||||
let(:mock_response) do
|
||||
instance_double(
|
||||
RubyLLM::Message,
|
||||
content: '{"response": "I can see the image shows a pricing table", "reasoning": "Analyzed the image"}'
|
||||
)
|
||||
end
|
||||
|
||||
before do
|
||||
create(:installation_config, name: 'CAPTAIN_OPEN_AI_API_KEY', value: 'test-key')
|
||||
|
||||
allow(RubyLLM).to receive(:chat).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:with_temperature).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:with_params).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:with_tool).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:with_instructions).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:add_message).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:on_end_message).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:on_tool_call).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:on_tool_result).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:messages).and_return([])
|
||||
end
|
||||
|
||||
describe 'instrumentation metadata' do
|
||||
it 'passes channel_type to the agent session instrumentation' do
|
||||
service = described_class.new(assistant: assistant, conversation_id: conversation.display_id)
|
||||
|
||||
expect(service).to receive(:instrument_agent_session).with(
|
||||
hash_including(metadata: hash_including(channel_type: conversation.inbox.channel_type))
|
||||
).and_yield
|
||||
|
||||
allow(mock_chat).to receive(:ask).and_return(mock_response)
|
||||
service.generate_response(message_history: [{ role: 'user', content: 'Hello' }])
|
||||
end
|
||||
end
|
||||
|
||||
describe 'image analysis' do
|
||||
context 'when user sends a message with an image attachment' do
|
||||
let(:message_history) do
|
||||
[
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'What do you see in this image?' },
|
||||
{ type: 'image_url', image_url: { url: 'https://example.com/screenshot.png' } }
|
||||
]
|
||||
}
|
||||
]
|
||||
end
|
||||
|
||||
it 'sends the image to the LLM for analysis' do
|
||||
expect(mock_chat).to receive(:ask).with(
|
||||
'What do you see in this image?',
|
||||
with: ['https://example.com/screenshot.png']
|
||||
).and_return(mock_response)
|
||||
|
||||
service = described_class.new(assistant: assistant, conversation_id: conversation.display_id)
|
||||
service.generate_response(message_history: message_history)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user sends only an image without text' do
|
||||
let(:message_history) do
|
||||
[
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'image_url', image_url: { url: 'https://example.com/photo.jpg' } }
|
||||
]
|
||||
}
|
||||
]
|
||||
end
|
||||
|
||||
it 'sends the image to the LLM with nil text' do
|
||||
expect(mock_chat).to receive(:ask).with(
|
||||
nil,
|
||||
with: ['https://example.com/photo.jpg']
|
||||
).and_return(mock_response)
|
||||
|
||||
service = described_class.new(assistant: assistant, conversation_id: conversation.display_id)
|
||||
service.generate_response(message_history: message_history)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when user sends a plain text message' do
|
||||
let(:message_history) do
|
||||
[
|
||||
{ role: 'user', content: 'Hello, how can you help me?' }
|
||||
]
|
||||
end
|
||||
|
||||
it 'sends the text without attachments' do
|
||||
expect(mock_chat).to receive(:ask).with('Hello, how can you help me?').and_return(mock_response)
|
||||
|
||||
service = described_class.new(assistant: assistant, conversation_id: conversation.display_id)
|
||||
service.generate_response(message_history: message_history)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe 'conversation history with images' do
|
||||
context 'when previous messages contain images' do
|
||||
let(:message_history) do
|
||||
[
|
||||
{
|
||||
role: 'user',
|
||||
content: [
|
||||
{ type: 'text', text: 'Here is my error screenshot' },
|
||||
{ type: 'image_url', image_url: { url: 'https://example.com/error.png' } }
|
||||
]
|
||||
},
|
||||
{ role: 'assistant', content: 'I see the error. Try restarting.' },
|
||||
{ role: 'user', content: 'It still does not work' }
|
||||
]
|
||||
end
|
||||
|
||||
it 'includes images from conversation history in context' do
|
||||
# First historical message should include the image via RubyLLM::Content
|
||||
expect(mock_chat).to receive(:add_message) do |args|
|
||||
expect(args[:role]).to eq(:user)
|
||||
expect(args[:content]).to be_a(RubyLLM::Content)
|
||||
expect(args[:content].text).to eq('Here is my error screenshot')
|
||||
expect(args[:content].attachments.first.source.to_s).to eq('https://example.com/error.png')
|
||||
end.ordered
|
||||
|
||||
# Second historical message is plain text
|
||||
expect(mock_chat).to receive(:add_message).with(
|
||||
role: :assistant,
|
||||
content: 'I see the error. Try restarting.'
|
||||
).ordered
|
||||
|
||||
# Current message asked via chat.ask
|
||||
expect(mock_chat).to receive(:ask).with('It still does not work').and_return(mock_response)
|
||||
|
||||
service = described_class.new(assistant: assistant, conversation_id: conversation.display_id)
|
||||
service.generate_response(message_history: message_history)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,155 @@
|
||||
require 'rails_helper'
|
||||
|
||||
RSpec.describe Captain::Llm::ConversationFaqService do
|
||||
let(:captain_assistant) { create(:captain_assistant) }
|
||||
let(:conversation) { create(:conversation, first_reply_created_at: Time.zone.now) }
|
||||
let(:service) { described_class.new(captain_assistant, conversation) }
|
||||
let(:embedding_service) { instance_double(Captain::Llm::EmbeddingService) }
|
||||
let(:mock_chat) { instance_double(RubyLLM::Chat) }
|
||||
let(:sample_faqs) do
|
||||
[
|
||||
{ 'question' => 'What is the purpose?', 'answer' => 'To help users.' },
|
||||
{ 'question' => 'How does it work?', 'answer' => 'Through AI.' }
|
||||
]
|
||||
end
|
||||
let(:mock_response) do
|
||||
instance_double(RubyLLM::Message, content: { faqs: sample_faqs }.to_json)
|
||||
end
|
||||
|
||||
before do
|
||||
create(:installation_config, name: 'CAPTAIN_OPEN_AI_API_KEY', value: 'test-key')
|
||||
allow(Captain::Llm::EmbeddingService).to receive(:new).and_return(embedding_service)
|
||||
allow(RubyLLM).to receive(:chat).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:with_temperature).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:with_params).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:with_instructions).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:ask).and_return(mock_response)
|
||||
end
|
||||
|
||||
describe '#generate_and_deduplicate' do
|
||||
context 'when successful' do
|
||||
before do
|
||||
allow(embedding_service).to receive(:get_embedding).and_return([0.1, 0.2, 0.3])
|
||||
allow(captain_assistant.responses).to receive(:nearest_neighbors).and_return([])
|
||||
end
|
||||
|
||||
it 'creates new FAQs for valid conversation content' do
|
||||
expect do
|
||||
service.generate_and_deduplicate
|
||||
end.to change(captain_assistant.responses, :count).by(2)
|
||||
end
|
||||
|
||||
it 'saves FAQs with pending status linked to conversation' do
|
||||
service.generate_and_deduplicate
|
||||
expect(
|
||||
captain_assistant.responses.pluck(:question, :answer, :status, :documentable_id)
|
||||
).to contain_exactly(
|
||||
['What is the purpose?', 'To help users.', 'pending', conversation.id],
|
||||
['How does it work?', 'Through AI.', 'pending', conversation.id]
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
context 'without human interaction' do
|
||||
let(:conversation) { create(:conversation) }
|
||||
|
||||
it 'returns an empty array without generating FAQs' do
|
||||
expect(service.generate_and_deduplicate).to eq([])
|
||||
end
|
||||
|
||||
it 'does not call the LLM API' do
|
||||
expect(RubyLLM).not_to receive(:chat)
|
||||
service.generate_and_deduplicate
|
||||
end
|
||||
end
|
||||
|
||||
context 'when finding duplicates' do
|
||||
let(:existing_response) do
|
||||
create(:captain_assistant_response, assistant: captain_assistant, question: 'Similar question', answer: 'Similar answer')
|
||||
end
|
||||
let(:similar_neighbor) do
|
||||
OpenStruct.new(
|
||||
id: 1,
|
||||
question: existing_response.question,
|
||||
answer: existing_response.answer,
|
||||
neighbor_distance: 0.1
|
||||
)
|
||||
end
|
||||
|
||||
before do
|
||||
allow(embedding_service).to receive(:get_embedding).and_return([0.1, 0.2, 0.3])
|
||||
allow(captain_assistant.responses).to receive(:nearest_neighbors).and_return([similar_neighbor])
|
||||
end
|
||||
|
||||
it 'filters out duplicate FAQs based on embedding similarity' do
|
||||
expect do
|
||||
service.generate_and_deduplicate
|
||||
end.not_to change(captain_assistant.responses, :count)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when LLM API fails' do
|
||||
before do
|
||||
allow(mock_chat).to receive(:ask).and_raise(RubyLLM::Error.new(nil, 'API Error'))
|
||||
allow(Rails.logger).to receive(:error)
|
||||
end
|
||||
|
||||
it 'returns empty array and logs the error' do
|
||||
expect(Rails.logger).to receive(:error).with('LLM API Error: API Error')
|
||||
expect(service.generate_and_deduplicate).to eq([])
|
||||
end
|
||||
end
|
||||
|
||||
context 'when JSON parsing fails' do
|
||||
let(:invalid_response) do
|
||||
instance_double(RubyLLM::Message, content: 'invalid json')
|
||||
end
|
||||
|
||||
before do
|
||||
allow(mock_chat).to receive(:ask).and_return(invalid_response)
|
||||
end
|
||||
|
||||
it 'handles JSON parsing errors gracefully' do
|
||||
expect(Rails.logger).to receive(:error).with(/Error in parsing GPT processed response:/)
|
||||
expect(service.generate_and_deduplicate).to eq([])
|
||||
end
|
||||
end
|
||||
|
||||
context 'when response content is nil' do
|
||||
let(:nil_response) do
|
||||
instance_double(RubyLLM::Message, content: nil)
|
||||
end
|
||||
|
||||
before do
|
||||
allow(mock_chat).to receive(:ask).and_return(nil_response)
|
||||
end
|
||||
|
||||
it 'returns empty array' do
|
||||
expect(service.generate_and_deduplicate).to eq([])
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe 'language handling' do
|
||||
context 'when conversation has different language' do
|
||||
let(:account) { create(:account, locale: 'fr') }
|
||||
let(:conversation) do
|
||||
create(:conversation, account: account, first_reply_created_at: Time.zone.now)
|
||||
end
|
||||
|
||||
before do
|
||||
allow(embedding_service).to receive(:get_embedding).and_return([0.1, 0.2, 0.3])
|
||||
allow(captain_assistant.responses).to receive(:nearest_neighbors).and_return([])
|
||||
end
|
||||
|
||||
it 'uses account language for system prompt' do
|
||||
expect(Captain::Llm::SystemPromptsService).to receive(:conversation_faq_generator)
|
||||
.with('french')
|
||||
.at_least(:once)
|
||||
.and_call_original
|
||||
|
||||
service.generate_and_deduplicate
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,103 @@
|
||||
require 'rails_helper'
|
||||
|
||||
RSpec.describe Captain::Llm::FaqGeneratorService do
|
||||
let(:content) { 'Sample content for FAQ generation' }
|
||||
let(:language) { 'english' }
|
||||
let(:service) { described_class.new(content, language) }
|
||||
let(:mock_chat) { instance_double(RubyLLM::Chat) }
|
||||
let(:sample_faqs) do
|
||||
[
|
||||
{ 'question' => 'What is this service?', 'answer' => 'It generates FAQs.' },
|
||||
{ 'question' => 'How does it work?', 'answer' => 'Using AI technology.' }
|
||||
]
|
||||
end
|
||||
let(:mock_response) do
|
||||
instance_double(RubyLLM::Message, content: { faqs: sample_faqs }.to_json)
|
||||
end
|
||||
|
||||
before do
|
||||
create(:installation_config, name: 'CAPTAIN_OPEN_AI_API_KEY', value: 'test-key')
|
||||
allow(RubyLLM).to receive(:chat).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:with_temperature).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:with_params).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:with_instructions).and_return(mock_chat)
|
||||
allow(mock_chat).to receive(:ask).and_return(mock_response)
|
||||
end
|
||||
|
||||
describe '#generate' do
|
||||
context 'when successful' do
|
||||
it 'returns parsed FAQs from the LLM response' do
|
||||
result = service.generate
|
||||
expect(result).to eq(sample_faqs)
|
||||
end
|
||||
|
||||
it 'sends content to LLM with JSON response format' do
|
||||
expect(mock_chat).to receive(:with_params).with(response_format: { type: 'json_object' }).and_return(mock_chat)
|
||||
service.generate
|
||||
end
|
||||
|
||||
it 'uses SystemPromptsService with the specified language' do
|
||||
expect(Captain::Llm::SystemPromptsService).to receive(:faq_generator).with(language).at_least(:once).and_call_original
|
||||
service.generate
|
||||
end
|
||||
end
|
||||
|
||||
context 'with different language' do
|
||||
let(:language) { 'spanish' }
|
||||
|
||||
it 'passes the correct language to SystemPromptsService' do
|
||||
expect(Captain::Llm::SystemPromptsService).to receive(:faq_generator).with('spanish').at_least(:once).and_call_original
|
||||
service.generate
|
||||
end
|
||||
end
|
||||
|
||||
context 'when LLM API fails' do
|
||||
before do
|
||||
allow(mock_chat).to receive(:ask).and_raise(RubyLLM::Error.new(nil, 'API Error'))
|
||||
allow(Rails.logger).to receive(:error)
|
||||
end
|
||||
|
||||
it 'returns empty array and logs the error' do
|
||||
expect(Rails.logger).to receive(:error).with('LLM API Error: API Error')
|
||||
expect(service.generate).to eq([])
|
||||
end
|
||||
end
|
||||
|
||||
context 'when response content is nil' do
|
||||
let(:nil_response) { instance_double(RubyLLM::Message, content: nil) }
|
||||
|
||||
before do
|
||||
allow(mock_chat).to receive(:ask).and_return(nil_response)
|
||||
end
|
||||
|
||||
it 'returns empty array' do
|
||||
expect(service.generate).to eq([])
|
||||
end
|
||||
end
|
||||
|
||||
context 'when JSON parsing fails' do
|
||||
let(:invalid_response) { instance_double(RubyLLM::Message, content: 'invalid json') }
|
||||
|
||||
before do
|
||||
allow(mock_chat).to receive(:ask).and_return(invalid_response)
|
||||
end
|
||||
|
||||
it 'logs error and returns empty array' do
|
||||
expect(Rails.logger).to receive(:error).with(/Error in parsing GPT processed response:/)
|
||||
expect(service.generate).to eq([])
|
||||
end
|
||||
end
|
||||
|
||||
context 'when response is missing faqs key' do
|
||||
let(:missing_key_response) { instance_double(RubyLLM::Message, content: '{"data": []}') }
|
||||
|
||||
before do
|
||||
allow(mock_chat).to receive(:ask).and_return(missing_key_response)
|
||||
end
|
||||
|
||||
it 'returns empty array via KeyError rescue' do
|
||||
expect(service.generate).to eq([])
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,105 @@
|
||||
require 'rails_helper'
|
||||
|
||||
RSpec.describe Captain::Llm::PaginatedFaqGeneratorService do
|
||||
let(:document) { create(:captain_document) }
|
||||
let(:service) { described_class.new(document, pages_per_chunk: 5) }
|
||||
let(:openai_client) { instance_double(OpenAI::Client) }
|
||||
|
||||
before do
|
||||
# Mock OpenAI configuration
|
||||
installation_config = instance_double(InstallationConfig, value: 'test-api-key')
|
||||
allow(InstallationConfig).to receive(:find_by!)
|
||||
.with(name: 'CAPTAIN_OPEN_AI_API_KEY')
|
||||
.and_return(installation_config)
|
||||
|
||||
allow(OpenAI::Client).to receive(:new).and_return(openai_client)
|
||||
end
|
||||
|
||||
describe '#generate' do
|
||||
context 'when document lacks OpenAI file ID' do
|
||||
before do
|
||||
allow(document).to receive(:openai_file_id).and_return(nil)
|
||||
end
|
||||
|
||||
it 'raises an error' do
|
||||
expect { service.generate }.to raise_error(CustomExceptions::Pdf::FaqGenerationError)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when generating FAQs from PDF pages' do
|
||||
let(:faq_response) do
|
||||
{
|
||||
'choices' => [{
|
||||
'message' => {
|
||||
'content' => JSON.generate({
|
||||
'faqs' => [
|
||||
{ 'question' => 'What is this document about?', 'answer' => 'It explains key concepts.' }
|
||||
],
|
||||
'has_content' => true
|
||||
})
|
||||
}
|
||||
}]
|
||||
}
|
||||
end
|
||||
|
||||
let(:empty_response) do
|
||||
{
|
||||
'choices' => [{
|
||||
'message' => {
|
||||
'content' => JSON.generate({
|
||||
'faqs' => [],
|
||||
'has_content' => false
|
||||
})
|
||||
}
|
||||
}]
|
||||
}
|
||||
end
|
||||
|
||||
before do
|
||||
allow(document).to receive(:openai_file_id).and_return('file-123')
|
||||
end
|
||||
|
||||
it 'generates FAQs from paginated content' do
|
||||
allow(openai_client).to receive(:chat).and_return(faq_response, empty_response)
|
||||
|
||||
faqs = service.generate
|
||||
|
||||
expect(faqs).to have_attributes(size: 1)
|
||||
expect(faqs.first['question']).to eq('What is this document about?')
|
||||
end
|
||||
|
||||
it 'stops when no more content' do
|
||||
allow(openai_client).to receive(:chat).and_return(empty_response)
|
||||
|
||||
faqs = service.generate
|
||||
|
||||
expect(faqs).to be_empty
|
||||
end
|
||||
|
||||
it 'respects max iterations limit' do
|
||||
allow(openai_client).to receive(:chat).and_return(faq_response)
|
||||
|
||||
# Force max iterations
|
||||
service.instance_variable_set(:@iterations_completed, 19)
|
||||
|
||||
service.generate
|
||||
expect(service.iterations_completed).to eq(20)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
describe '#should_continue_processing?' do
|
||||
it 'stops at max iterations' do
|
||||
service.instance_variable_set(:@iterations_completed, 20)
|
||||
expect(service.should_continue_processing?(faqs: ['faq'], has_content: true)).to be false
|
||||
end
|
||||
|
||||
it 'stops when no FAQs returned' do
|
||||
expect(service.should_continue_processing?(faqs: [], has_content: true)).to be false
|
||||
end
|
||||
|
||||
it 'continues when FAQs exist and under limits' do
|
||||
expect(service.should_continue_processing?(faqs: ['faq'], has_content: true)).to be true
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,58 @@
|
||||
require 'rails_helper'
|
||||
|
||||
RSpec.describe Captain::Llm::PdfProcessingService do
|
||||
let(:document) { create(:captain_document) }
|
||||
let(:service) { described_class.new(document) }
|
||||
|
||||
before do
|
||||
# Mock OpenAI configuration
|
||||
installation_config = instance_double(InstallationConfig, value: 'test-api-key')
|
||||
allow(InstallationConfig).to receive(:find_by!)
|
||||
.with(name: 'CAPTAIN_OPEN_AI_API_KEY')
|
||||
.and_return(installation_config)
|
||||
end
|
||||
|
||||
describe '#process' do
|
||||
context 'when document already has OpenAI file ID' do
|
||||
before do
|
||||
allow(document).to receive(:openai_file_id).and_return('existing-file-id')
|
||||
end
|
||||
|
||||
it 'skips upload' do
|
||||
expect(document).not_to receive(:store_openai_file_id)
|
||||
service.process
|
||||
end
|
||||
end
|
||||
|
||||
context 'when uploading PDF to OpenAI' do
|
||||
let(:mock_client) { instance_double(OpenAI::Client) }
|
||||
let(:pdf_content) { 'PDF content' }
|
||||
let(:blob_double) { instance_double(ActiveStorage::Blob) }
|
||||
let(:pdf_file) { instance_double(ActiveStorage::Attachment) }
|
||||
|
||||
before do
|
||||
allow(document).to receive(:openai_file_id).and_return(nil)
|
||||
allow(document).to receive(:pdf_file).and_return(pdf_file)
|
||||
allow(pdf_file).to receive(:blob).and_return(blob_double)
|
||||
allow(blob_double).to receive(:open).and_yield(StringIO.new(pdf_content))
|
||||
|
||||
allow(OpenAI::Client).to receive(:new).and_return(mock_client)
|
||||
# Use a simple double for OpenAI::Files as it may not be loaded
|
||||
files_api = double('files_api') # rubocop:disable RSpec/VerifiedDoubles
|
||||
allow(files_api).to receive(:upload).and_return({ 'id' => 'file-abc123' })
|
||||
allow(mock_client).to receive(:files).and_return(files_api)
|
||||
end
|
||||
|
||||
it 'uploads PDF and stores file ID' do
|
||||
expect(document).to receive(:store_openai_file_id).with('file-abc123')
|
||||
service.process
|
||||
end
|
||||
|
||||
it 'raises error when upload fails' do
|
||||
allow(mock_client.files).to receive(:upload).and_return({ 'id' => nil })
|
||||
|
||||
expect { service.process }.to raise_error(CustomExceptions::Pdf::UploadError)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
Reference in New Issue
Block a user