Restructure omni services and add Chatwoot research snapshot
This commit is contained in:
189
research/chatwoot/lib/captain/base_task_service.rb
Normal file
189
research/chatwoot/lib/captain/base_task_service.rb
Normal file
@@ -0,0 +1,189 @@
|
||||
class Captain::BaseTaskService
|
||||
include Integrations::LlmInstrumentation
|
||||
include Captain::ToolInstrumentation
|
||||
|
||||
# gpt-4o-mini supports 128,000 tokens
|
||||
# 1 token is approx 4 characters
|
||||
# sticking with 120000 to be safe
|
||||
# 120000 * 4 = 480,000 characters (rounding off downwards to 400,000 to be safe)
|
||||
TOKEN_LIMIT = 400_000
|
||||
GPT_MODEL = Llm::Config::DEFAULT_MODEL
|
||||
|
||||
# Prepend enterprise module to subclasses when they're defined.
|
||||
# This ensures the enterprise perform wrapper is applied even when
|
||||
# subclasses define their own perform method, since prepend puts
|
||||
# the module before the class in the ancestor chain.
|
||||
def self.inherited(subclass)
|
||||
super
|
||||
subclass.prepend_mod_with('Captain::BaseTaskService')
|
||||
end
|
||||
|
||||
pattr_initialize [:account!, { conversation_display_id: nil }]
|
||||
|
||||
private
|
||||
|
||||
def event_name
|
||||
raise NotImplementedError, "#{self.class} must implement #event_name"
|
||||
end
|
||||
|
||||
def conversation
|
||||
@conversation ||= account.conversations.find_by(display_id: conversation_display_id)
|
||||
end
|
||||
|
||||
def api_base
|
||||
endpoint = InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_ENDPOINT')&.value.presence || 'https://api.openai.com/'
|
||||
endpoint = endpoint.chomp('/')
|
||||
"#{endpoint}/v1"
|
||||
end
|
||||
|
||||
def make_api_call(model:, messages:, tools: [])
|
||||
# Community edition prerequisite checks
|
||||
# Enterprise module handles these with more specific error messages (cloud vs self-hosted)
|
||||
return { error: I18n.t('captain.disabled'), error_code: 403 } unless captain_tasks_enabled?
|
||||
return { error: I18n.t('captain.api_key_missing'), error_code: 401 } unless api_key_configured?
|
||||
|
||||
instrumentation_params = build_instrumentation_params(model, messages)
|
||||
instrumentation_method = tools.any? ? :instrument_tool_session : :instrument_llm_call
|
||||
|
||||
response = send(instrumentation_method, instrumentation_params) do
|
||||
execute_ruby_llm_request(model: model, messages: messages, tools: tools)
|
||||
end
|
||||
|
||||
return response unless build_follow_up_context? && response[:message].present?
|
||||
|
||||
response.merge(follow_up_context: build_follow_up_context(messages, response))
|
||||
end
|
||||
|
||||
def execute_ruby_llm_request(model:, messages:, tools: [])
|
||||
Llm::Config.with_api_key(api_key, api_base: api_base) do |context|
|
||||
chat = build_chat(context, model: model, messages: messages, tools: tools)
|
||||
|
||||
conversation_messages = messages.reject { |m| m[:role] == 'system' }
|
||||
return { error: 'No conversation messages provided', error_code: 400, request_messages: messages } if conversation_messages.empty?
|
||||
|
||||
add_messages_if_needed(chat, conversation_messages)
|
||||
build_ruby_llm_response(chat.ask(conversation_messages.last[:content]), messages)
|
||||
end
|
||||
rescue StandardError => e
|
||||
ChatwootExceptionTracker.new(e, account: account).capture_exception
|
||||
{ error: e.message, request_messages: messages }
|
||||
end
|
||||
|
||||
def build_chat(context, model:, messages:, tools: [])
|
||||
chat = context.chat(model: model)
|
||||
system_msg = messages.find { |m| m[:role] == 'system' }
|
||||
chat.with_instructions(system_msg[:content]) if system_msg
|
||||
|
||||
if tools.any?
|
||||
tools.each { |tool| chat = chat.with_tool(tool) }
|
||||
chat.on_end_message { |message| record_generation(chat, message, model) }
|
||||
end
|
||||
|
||||
chat
|
||||
end
|
||||
|
||||
def add_messages_if_needed(chat, conversation_messages)
|
||||
return if conversation_messages.length == 1
|
||||
|
||||
conversation_messages[0...-1].each do |msg|
|
||||
chat.add_message(role: msg[:role].to_sym, content: msg[:content])
|
||||
end
|
||||
end
|
||||
|
||||
def build_ruby_llm_response(response, messages)
|
||||
{
|
||||
message: response.content,
|
||||
usage: {
|
||||
'prompt_tokens' => response.input_tokens,
|
||||
'completion_tokens' => response.output_tokens,
|
||||
'total_tokens' => (response.input_tokens || 0) + (response.output_tokens || 0)
|
||||
},
|
||||
request_messages: messages
|
||||
}
|
||||
end
|
||||
|
||||
def build_instrumentation_params(model, messages)
|
||||
{
|
||||
span_name: "llm.#{event_name}",
|
||||
account_id: account.id,
|
||||
conversation_id: conversation&.display_id,
|
||||
feature_name: event_name,
|
||||
model: model,
|
||||
messages: messages,
|
||||
temperature: nil,
|
||||
metadata: instrumentation_metadata
|
||||
}
|
||||
end
|
||||
|
||||
def instrumentation_metadata
|
||||
{
|
||||
channel_type: conversation&.inbox&.channel_type
|
||||
}.compact
|
||||
end
|
||||
|
||||
def conversation_messages(start_from: 0)
|
||||
messages = []
|
||||
character_count = start_from
|
||||
|
||||
conversation.messages
|
||||
.where(message_type: [:incoming, :outgoing])
|
||||
.where(private: false)
|
||||
.reorder('id desc')
|
||||
.each do |message|
|
||||
content = message.content_for_llm
|
||||
break unless content.present? && character_count + content.length <= TOKEN_LIMIT
|
||||
|
||||
messages.prepend({ role: (message.incoming? ? 'user' : 'assistant'), content: content })
|
||||
character_count += content.length
|
||||
end
|
||||
|
||||
messages
|
||||
end
|
||||
|
||||
def captain_tasks_enabled?
|
||||
account.feature_enabled?('captain_tasks')
|
||||
end
|
||||
|
||||
def api_key_configured?
|
||||
api_key.present?
|
||||
end
|
||||
|
||||
def api_key
|
||||
@api_key ||= openai_hook&.settings&.dig('api_key') || system_api_key
|
||||
end
|
||||
|
||||
def openai_hook
|
||||
@openai_hook ||= account.hooks.find_by(app_id: 'openai', status: 'enabled')
|
||||
end
|
||||
|
||||
def system_api_key
|
||||
@system_api_key ||= InstallationConfig.find_by(name: 'CAPTAIN_OPEN_AI_API_KEY')&.value
|
||||
end
|
||||
|
||||
def prompt_from_file(file_name)
|
||||
Rails.root.join('lib/integrations/openai/openai_prompts', "#{file_name}.liquid").read
|
||||
end
|
||||
|
||||
# Follow-up context for client-side refinement
|
||||
def build_follow_up_context?
|
||||
# FollowUpService should return its own updated context
|
||||
!is_a?(Captain::FollowUpService)
|
||||
end
|
||||
|
||||
def build_follow_up_context(messages, response)
|
||||
{
|
||||
event_name: event_name,
|
||||
original_context: extract_original_context(messages),
|
||||
last_response: response[:message],
|
||||
conversation_history: [],
|
||||
channel_type: conversation&.inbox&.channel_type
|
||||
}
|
||||
end
|
||||
|
||||
def extract_original_context(messages)
|
||||
# Get the most recent user message for follow-up context
|
||||
user_msg = messages.reverse.find { |m| m[:role] == 'user' }
|
||||
user_msg ? user_msg[:content] : nil
|
||||
end
|
||||
end
|
||||
Captain::BaseTaskService.prepend_mod_with('Captain::BaseTaskService')
|
||||
106
research/chatwoot/lib/captain/follow_up_service.rb
Normal file
106
research/chatwoot/lib/captain/follow_up_service.rb
Normal file
@@ -0,0 +1,106 @@
|
||||
class Captain::FollowUpService < Captain::BaseTaskService
|
||||
pattr_initialize [:account!, :follow_up_context!, :user_message!, { conversation_display_id: nil }]
|
||||
|
||||
ALLOWED_EVENT_NAMES = %w[
|
||||
professional
|
||||
casual
|
||||
friendly
|
||||
confident
|
||||
straightforward
|
||||
fix_spelling_grammar
|
||||
improve
|
||||
summarize
|
||||
reply_suggestion
|
||||
label_suggestion
|
||||
].freeze
|
||||
|
||||
def perform
|
||||
return { error: 'Follow-up context missing', error_code: 400 } unless valid_follow_up_context?
|
||||
|
||||
# Build context-aware system prompt
|
||||
system_prompt = build_follow_up_system_prompt(follow_up_context)
|
||||
|
||||
# Build full message array (convert history from string keys to symbol keys)
|
||||
history = follow_up_context['conversation_history'].to_a.map do |msg|
|
||||
{ role: msg['role'], content: msg['content'] }
|
||||
end
|
||||
|
||||
messages = [
|
||||
{ role: 'system', content: system_prompt },
|
||||
{ role: 'user', content: follow_up_context['original_context'] },
|
||||
{ role: 'assistant', content: follow_up_context['last_response'] },
|
||||
*history,
|
||||
{ role: 'user', content: user_message }
|
||||
]
|
||||
|
||||
response = make_api_call(model: GPT_MODEL, messages: messages)
|
||||
return response if response[:error]
|
||||
|
||||
response.merge(follow_up_context: update_follow_up_context(user_message, response[:message]))
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def build_follow_up_system_prompt(session_data)
|
||||
action_context = describe_previous_action(session_data['event_name'])
|
||||
|
||||
<<~PROMPT
|
||||
You just performed a #{action_context} action for a customer support agent.
|
||||
Your job now is to help them refine the result based on their feedback.
|
||||
Be concise and focused on their specific request.
|
||||
Output only the reply, no preamble, tags, or explanation.
|
||||
PROMPT
|
||||
end
|
||||
|
||||
def describe_previous_action(event_name)
|
||||
case event_name
|
||||
when 'professional', 'casual', 'friendly', 'confident', 'straightforward'
|
||||
"tone rewrite (#{event_name})"
|
||||
when 'fix_spelling_grammar'
|
||||
'spelling and grammar correction'
|
||||
when 'improve'
|
||||
'message improvement'
|
||||
when 'summarize'
|
||||
'conversation summary'
|
||||
when 'reply_suggestion'
|
||||
'reply suggestion'
|
||||
when 'label_suggestion'
|
||||
'label suggestion'
|
||||
else
|
||||
event_name
|
||||
end
|
||||
end
|
||||
|
||||
def valid_follow_up_context?
|
||||
return false unless follow_up_context.is_a?(Hash)
|
||||
return false unless ALLOWED_EVENT_NAMES.include?(follow_up_context['event_name'])
|
||||
|
||||
required_keys = %w[event_name original_context last_response]
|
||||
required_keys.all? { |key| follow_up_context[key].present? }
|
||||
end
|
||||
|
||||
def update_follow_up_context(user_msg, assistant_msg)
|
||||
updated_history = follow_up_context['conversation_history'].to_a + [
|
||||
{ 'role' => 'user', 'content' => user_msg },
|
||||
{ 'role' => 'assistant', 'content' => assistant_msg }
|
||||
]
|
||||
|
||||
{
|
||||
'event_name' => follow_up_context['event_name'],
|
||||
'original_context' => follow_up_context['original_context'],
|
||||
'last_response' => assistant_msg,
|
||||
'conversation_history' => updated_history,
|
||||
'channel_type' => follow_up_context['channel_type']
|
||||
}
|
||||
end
|
||||
|
||||
def instrumentation_metadata
|
||||
{
|
||||
channel_type: conversation&.inbox&.channel_type || follow_up_context['channel_type']
|
||||
}.compact
|
||||
end
|
||||
|
||||
def event_name
|
||||
'follow_up'
|
||||
end
|
||||
end
|
||||
93
research/chatwoot/lib/captain/label_suggestion_service.rb
Normal file
93
research/chatwoot/lib/captain/label_suggestion_service.rb
Normal file
@@ -0,0 +1,93 @@
|
||||
class Captain::LabelSuggestionService < Captain::BaseTaskService
|
||||
pattr_initialize [:account!, :conversation_display_id!]
|
||||
|
||||
def perform
|
||||
# Check cache first
|
||||
cached_response = read_from_cache
|
||||
return cached_response if cached_response.present?
|
||||
|
||||
# Build content
|
||||
content = labels_with_messages
|
||||
return nil if content.blank?
|
||||
|
||||
# Make API call
|
||||
response = make_api_call(
|
||||
model: GPT_MODEL, # TODO: Use separate model for label suggestion
|
||||
messages: [
|
||||
{ role: 'system', content: prompt_from_file('label_suggestion') },
|
||||
{ role: 'user', content: content }
|
||||
]
|
||||
)
|
||||
return response if response[:error].present?
|
||||
|
||||
# Clean up response
|
||||
result = { message: response[:message] ? response[:message].gsub(/^(label|labels):/i, '') : '' }
|
||||
|
||||
# Cache successful result
|
||||
write_to_cache(result)
|
||||
|
||||
result
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def cache_key
|
||||
return nil unless conversation
|
||||
|
||||
format(
|
||||
::Redis::Alfred::OPENAI_CONVERSATION_KEY,
|
||||
event_name: 'label_suggestion',
|
||||
conversation_id: conversation.id,
|
||||
updated_at: conversation.last_activity_at.to_i
|
||||
)
|
||||
end
|
||||
|
||||
def read_from_cache
|
||||
return nil unless cache_key
|
||||
|
||||
cached = Redis::Alfred.get(cache_key)
|
||||
JSON.parse(cached, symbolize_names: true) if cached.present?
|
||||
rescue JSON::ParserError
|
||||
nil
|
||||
end
|
||||
|
||||
def write_to_cache(response)
|
||||
Redis::Alfred.setex(cache_key, response.to_json) if cache_key
|
||||
end
|
||||
|
||||
def labels_with_messages
|
||||
return nil unless valid_conversation?(conversation)
|
||||
|
||||
labels = account.labels.pluck(:title).join(', ')
|
||||
messages = format_messages_as_string(start_from: labels.length)
|
||||
|
||||
return nil if messages.blank? || labels.blank?
|
||||
|
||||
"Messages:\n#{messages}\nLabels:\n#{labels}"
|
||||
end
|
||||
|
||||
def format_messages_as_string(start_from: 0)
|
||||
messages = conversation_messages(start_from: start_from)
|
||||
messages.map do |msg|
|
||||
sender_type = msg[:role] == 'user' ? 'Customer' : 'Agent'
|
||||
"#{sender_type}: #{msg[:content]}\n"
|
||||
end.join
|
||||
end
|
||||
|
||||
def valid_conversation?(conversation)
|
||||
return false if conversation.nil?
|
||||
return false if conversation.messages.incoming.count < 3
|
||||
return false if conversation.messages.count > 100
|
||||
return false if conversation.messages.count > 20 && !conversation.messages.last.incoming?
|
||||
|
||||
true
|
||||
end
|
||||
|
||||
def event_name
|
||||
'label_suggestion'
|
||||
end
|
||||
|
||||
def build_follow_up_context?
|
||||
false
|
||||
end
|
||||
end
|
||||
42
research/chatwoot/lib/captain/reply_suggestion_service.rb
Normal file
42
research/chatwoot/lib/captain/reply_suggestion_service.rb
Normal file
@@ -0,0 +1,42 @@
|
||||
class Captain::ReplySuggestionService < Captain::BaseTaskService
|
||||
pattr_initialize [:account!, :conversation_display_id!, :user!]
|
||||
|
||||
def perform
|
||||
make_api_call(
|
||||
model: GPT_MODEL,
|
||||
messages: [
|
||||
{ role: 'system', content: system_prompt },
|
||||
{ role: 'user', content: formatted_conversation }
|
||||
]
|
||||
)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def system_prompt
|
||||
template = prompt_from_file('reply')
|
||||
render_liquid_template(template, prompt_variables)
|
||||
end
|
||||
|
||||
def prompt_variables
|
||||
{
|
||||
'channel_type' => conversation.inbox.channel_type,
|
||||
'agent_name' => user.name,
|
||||
'agent_signature' => user.message_signature.presence
|
||||
}
|
||||
end
|
||||
|
||||
def render_liquid_template(template_content, variables = {})
|
||||
Liquid::Template.parse(template_content).render(variables)
|
||||
end
|
||||
|
||||
def formatted_conversation
|
||||
LlmFormatter::ConversationLlmFormatter.new(conversation).format(token_limit: TOKEN_LIMIT)
|
||||
end
|
||||
|
||||
def event_name
|
||||
'reply_suggestion'
|
||||
end
|
||||
end
|
||||
|
||||
Captain::ReplySuggestionService.prepend_mod_with('Captain::ReplySuggestionService')
|
||||
59
research/chatwoot/lib/captain/rewrite_service.rb
Normal file
59
research/chatwoot/lib/captain/rewrite_service.rb
Normal file
@@ -0,0 +1,59 @@
|
||||
class Captain::RewriteService < Captain::BaseTaskService
|
||||
pattr_initialize [:account!, :content!, :operation!, { conversation_display_id: nil }]
|
||||
|
||||
TONE_OPERATIONS = %i[casual professional friendly confident straightforward].freeze
|
||||
ALLOWED_OPERATIONS = (%i[fix_spelling_grammar improve] + TONE_OPERATIONS).freeze
|
||||
|
||||
def perform
|
||||
operation_sym = operation.to_sym
|
||||
raise ArgumentError, "Invalid operation: #{operation}" unless ALLOWED_OPERATIONS.include?(operation_sym)
|
||||
|
||||
send(operation_sym)
|
||||
end
|
||||
|
||||
TONE_OPERATIONS.each do |tone|
|
||||
define_method(tone) do
|
||||
call_llm_with_prompt(tone_rewrite_prompt(tone.to_s))
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def fix_spelling_grammar
|
||||
call_llm_with_prompt(prompt_from_file('fix_spelling_grammar'))
|
||||
end
|
||||
|
||||
def improve
|
||||
template = prompt_from_file('improve')
|
||||
|
||||
system_prompt = render_liquid_template(template, {
|
||||
'conversation_context' => conversation.to_llm_text(include_contact_details: true),
|
||||
'draft_message' => content
|
||||
})
|
||||
|
||||
call_llm_with_prompt(system_prompt, content)
|
||||
end
|
||||
|
||||
def call_llm_with_prompt(system_content, user_content = content)
|
||||
make_api_call(
|
||||
model: GPT_MODEL,
|
||||
messages: [
|
||||
{ role: 'system', content: system_content },
|
||||
{ role: 'user', content: user_content }
|
||||
]
|
||||
)
|
||||
end
|
||||
|
||||
def render_liquid_template(template_content, variables = {})
|
||||
Liquid::Template.parse(template_content).render(variables)
|
||||
end
|
||||
|
||||
def tone_rewrite_prompt(tone)
|
||||
template = prompt_from_file('tone_rewrite')
|
||||
render_liquid_template(template, 'tone' => tone)
|
||||
end
|
||||
|
||||
def event_name
|
||||
operation
|
||||
end
|
||||
end
|
||||
19
research/chatwoot/lib/captain/summary_service.rb
Normal file
19
research/chatwoot/lib/captain/summary_service.rb
Normal file
@@ -0,0 +1,19 @@
|
||||
class Captain::SummaryService < Captain::BaseTaskService
|
||||
pattr_initialize [:account!, :conversation_display_id!]
|
||||
|
||||
def perform
|
||||
make_api_call(
|
||||
model: GPT_MODEL,
|
||||
messages: [
|
||||
{ role: 'system', content: prompt_from_file('summary') },
|
||||
{ role: 'user', content: conversation.to_llm_text(include_contact_details: false) }
|
||||
]
|
||||
)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def event_name
|
||||
'summarize'
|
||||
end
|
||||
end
|
||||
60
research/chatwoot/lib/captain/tool_instrumentation.rb
Normal file
60
research/chatwoot/lib/captain/tool_instrumentation.rb
Normal file
@@ -0,0 +1,60 @@
|
||||
module Captain::ToolInstrumentation
|
||||
extend ActiveSupport::Concern
|
||||
include Integrations::LlmInstrumentationConstants
|
||||
|
||||
private
|
||||
|
||||
# Custom instrumentation for tool flows - outputs just the message (not full hash)
|
||||
def instrument_tool_session(params)
|
||||
return yield unless ChatwootApp.otel_enabled?
|
||||
|
||||
response = nil
|
||||
executed = false
|
||||
tracer.in_span(params[:span_name]) do |span|
|
||||
set_tool_session_attributes(span, params)
|
||||
response = yield
|
||||
executed = true
|
||||
span.set_attribute(ATTR_LANGFUSE_OBSERVATION_OUTPUT, response[:message] || response.to_json)
|
||||
set_tool_session_error_attributes(span, response) if response.is_a?(Hash)
|
||||
end
|
||||
response
|
||||
rescue StandardError => e
|
||||
ChatwootExceptionTracker.new(e, account: account).capture_exception
|
||||
executed ? response : yield
|
||||
end
|
||||
|
||||
def set_tool_session_attributes(span, params)
|
||||
span.set_attribute(ATTR_LANGFUSE_USER_ID, params[:account_id].to_s) if params[:account_id]
|
||||
span.set_attribute(ATTR_LANGFUSE_SESSION_ID, "#{params[:account_id]}_#{params[:conversation_id]}") if params[:conversation_id].present?
|
||||
span.set_attribute(ATTR_LANGFUSE_TAGS, [params[:feature_name]].to_json)
|
||||
span.set_attribute(ATTR_LANGFUSE_OBSERVATION_INPUT, params[:messages].to_json)
|
||||
end
|
||||
|
||||
def set_tool_session_error_attributes(span, response)
|
||||
error = response[:error] || response['error']
|
||||
return if error.blank?
|
||||
|
||||
span.set_attribute(ATTR_GEN_AI_RESPONSE_ERROR, error.to_json)
|
||||
span.status = OpenTelemetry::Trace::Status.error(error.to_s.truncate(1000))
|
||||
end
|
||||
|
||||
def record_generation(chat, message, model)
|
||||
return unless ChatwootApp.otel_enabled?
|
||||
return unless message.respond_to?(:role) && message.role.to_s == 'assistant'
|
||||
|
||||
tracer.in_span("llm.#{event_name}.generation") do |span|
|
||||
span.set_attribute(ATTR_GEN_AI_PROVIDER, 'openai')
|
||||
span.set_attribute(ATTR_GEN_AI_REQUEST_MODEL, model)
|
||||
span.set_attribute(ATTR_GEN_AI_USAGE_INPUT_TOKENS, message.input_tokens)
|
||||
span.set_attribute(ATTR_GEN_AI_USAGE_OUTPUT_TOKENS, message.output_tokens) if message.respond_to?(:output_tokens)
|
||||
span.set_attribute(ATTR_LANGFUSE_OBSERVATION_INPUT, format_chat_messages(chat))
|
||||
span.set_attribute(ATTR_LANGFUSE_OBSERVATION_OUTPUT, message.content.to_s) if message.respond_to?(:content)
|
||||
end
|
||||
rescue StandardError => e
|
||||
Rails.logger.warn "Failed to record generation: #{e.message}"
|
||||
end
|
||||
|
||||
def format_chat_messages(chat)
|
||||
chat.messages[0...-1].map { |m| { role: m.role.to_s, content: m.content.to_s } }.to_json
|
||||
end
|
||||
end
|
||||
Reference in New Issue
Block a user