Restructure omni services and add Chatwoot research snapshot
This commit is contained in:
@@ -0,0 +1,54 @@
|
||||
class Internal::AccountAnalysis::AccountUpdaterService
|
||||
def initialize(account)
|
||||
@account = account
|
||||
end
|
||||
|
||||
def update_with_analysis(analysis, error_message = nil)
|
||||
if error_message
|
||||
save_error(error_message)
|
||||
notify_on_discord
|
||||
return
|
||||
end
|
||||
|
||||
save_analysis_results(analysis)
|
||||
flag_account_if_needed(analysis)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def save_error(error_message)
|
||||
@account.internal_attributes['security_flagged'] = true
|
||||
@account.internal_attributes['security_flag_reason'] = "Error: #{error_message}"
|
||||
@account.save
|
||||
end
|
||||
|
||||
def save_analysis_results(analysis)
|
||||
@account.internal_attributes['last_threat_scan_at'] = Time.current
|
||||
@account.internal_attributes['last_threat_scan_level'] = analysis['threat_level']
|
||||
@account.internal_attributes['last_threat_scan_summary'] = analysis['threat_summary']
|
||||
@account.internal_attributes['last_threat_scan_recommendation'] = analysis['recommendation']
|
||||
@account.save!
|
||||
end
|
||||
|
||||
def flag_account_if_needed(analysis)
|
||||
return if analysis['threat_level'] == 'none'
|
||||
|
||||
if %w[high medium].include?(analysis['threat_level']) ||
|
||||
analysis['illegal_activities_detected'] == true ||
|
||||
analysis['recommendation'] == 'block'
|
||||
|
||||
@account.internal_attributes['security_flagged'] = true
|
||||
@account.internal_attributes['security_flag_reason'] = "Threat detected: #{analysis['threat_summary']}"
|
||||
@account.save!
|
||||
|
||||
Rails.logger.info("Flagging account #{@account.id} due to threat level: #{analysis['threat_level']}")
|
||||
end
|
||||
|
||||
notify_on_discord
|
||||
end
|
||||
|
||||
def notify_on_discord
|
||||
Rails.logger.info("Account #{@account.id} has been flagged for security review")
|
||||
Internal::AccountAnalysis::DiscordNotifierService.new.notify_flagged_account(@account)
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,77 @@
|
||||
class Internal::AccountAnalysis::ContentEvaluatorService
|
||||
include Integrations::LlmInstrumentation
|
||||
|
||||
def initialize
|
||||
Llm::Config.initialize!
|
||||
end
|
||||
|
||||
def evaluate(content)
|
||||
return default_evaluation if content.blank?
|
||||
|
||||
moderation_result = instrument_moderation_call(instrumentation_params(content)) do
|
||||
RubyLLM.moderate(content.to_s[0...10_000])
|
||||
end
|
||||
|
||||
build_evaluation(moderation_result)
|
||||
rescue StandardError => e
|
||||
handle_evaluation_error(e)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def instrumentation_params(content)
|
||||
{
|
||||
span_name: 'llm.internal.content_moderation',
|
||||
model: 'text-moderation-latest',
|
||||
input: content,
|
||||
feature_name: 'content_evaluator'
|
||||
}
|
||||
end
|
||||
|
||||
def build_evaluation(result)
|
||||
flagged = result.flagged?
|
||||
categories = result.flagged_categories
|
||||
|
||||
evaluation = {
|
||||
'threat_level' => flagged ? determine_threat_level(result) : 'safe',
|
||||
'threat_summary' => flagged ? "Content flagged for: #{categories.join(', ')}" : 'No threats detected',
|
||||
'detected_threats' => categories,
|
||||
'illegal_activities_detected' => categories.any? { |c| c.include?('violence') || c.include?('self-harm') },
|
||||
'recommendation' => flagged ? 'review' : 'approve'
|
||||
}
|
||||
|
||||
log_evaluation_results(evaluation)
|
||||
evaluation
|
||||
end
|
||||
|
||||
def determine_threat_level(result)
|
||||
scores = result.category_scores
|
||||
max_score = scores.values.max || 0
|
||||
|
||||
case max_score
|
||||
when 0.8.. then 'critical'
|
||||
when 0.5..0.8 then 'high'
|
||||
when 0.2..0.5 then 'medium'
|
||||
else 'low'
|
||||
end
|
||||
end
|
||||
|
||||
def default_evaluation(error_type = nil)
|
||||
{
|
||||
'threat_level' => 'unknown',
|
||||
'threat_summary' => 'Failed to complete content evaluation',
|
||||
'detected_threats' => error_type ? [error_type] : [],
|
||||
'illegal_activities_detected' => false,
|
||||
'recommendation' => 'review'
|
||||
}
|
||||
end
|
||||
|
||||
def log_evaluation_results(evaluation)
|
||||
Rails.logger.info("Moderation evaluation - Level: #{evaluation['threat_level']}, Threats: #{evaluation['detected_threats'].join(', ')}")
|
||||
end
|
||||
|
||||
def handle_evaluation_error(error)
|
||||
Rails.logger.error("Error evaluating content: #{error.message}")
|
||||
default_evaluation('evaluation_failure')
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,47 @@
|
||||
class Internal::AccountAnalysis::DiscordNotifierService
|
||||
def notify_flagged_account(account)
|
||||
if webhook_url.blank?
|
||||
Rails.logger.error('Cannot send Discord notification: No webhook URL configured')
|
||||
return
|
||||
end
|
||||
|
||||
HTTParty.post(
|
||||
webhook_url,
|
||||
body: build_message(account).to_json,
|
||||
headers: { 'Content-Type' => 'application/json' }
|
||||
)
|
||||
|
||||
Rails.logger.info("Discord notification sent for flagged account #{account.id}")
|
||||
rescue StandardError => e
|
||||
Rails.logger.error("Error sending Discord notification: #{e.message}")
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def build_message(account)
|
||||
analysis = account.internal_attributes
|
||||
user = account.users.order(id: :asc).first
|
||||
|
||||
content = <<~MESSAGE
|
||||
---
|
||||
An account has been flagged in our security system with the following details:
|
||||
|
||||
🆔 **Account Details:**
|
||||
Account ID: #{account.id}
|
||||
User Email: #{user&.email || 'N/A'}
|
||||
Threat Level: #{analysis['last_threat_scan_level']}
|
||||
|
||||
🔎 **System Recommendation:** #{analysis['last_threat_scan_recommendation']}
|
||||
#{analysis['illegal_activities_detected'] ? '⚠️ Potential illegal activities detected' : 'No illegal activities detected'}
|
||||
|
||||
📝 **Findings:**
|
||||
#{analysis['last_threat_scan_summary']}
|
||||
MESSAGE
|
||||
|
||||
{ content: content }
|
||||
end
|
||||
|
||||
def webhook_url
|
||||
@webhook_url ||= InstallationConfig.find_by(name: 'ACCOUNT_SECURITY_NOTIFICATION_WEBHOOK_URL')&.value
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,31 @@
|
||||
class Internal::AccountAnalysis::PromptsService
|
||||
class << self
|
||||
def threat_analyser(content)
|
||||
<<~PROMPT
|
||||
Analyze the following website content for potential security threats, scams, or illegal activities.
|
||||
|
||||
Focus on identifying:
|
||||
1. Phishing attempts
|
||||
2. Fraudulent business practices
|
||||
3. Malware distribution
|
||||
4. Illegal product/service offerings
|
||||
5. Money laundering indicators
|
||||
6. Identity theft schemes
|
||||
|
||||
Always classify websites under construction or without content to be a medium.
|
||||
|
||||
Website content:
|
||||
#{content}
|
||||
|
||||
Provide your analysis in the following JSON format:
|
||||
{
|
||||
"threat_level": "none|low|medium|high",
|
||||
"threat_summary": "Brief summary of findings",
|
||||
"detected_threats": ["threat1", "threat2"],
|
||||
"illegal_activities_detected": true|false,
|
||||
"recommendation": "approve|review|block"
|
||||
}
|
||||
PROMPT
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,43 @@
|
||||
class Internal::AccountAnalysis::ThreatAnalyserService
|
||||
def initialize(account)
|
||||
@account = account
|
||||
@user = account.users.order(id: :asc).first
|
||||
@domain = extract_domain_from_email(@user&.email)
|
||||
end
|
||||
|
||||
def perform
|
||||
if @domain.blank?
|
||||
Rails.logger.info("Skipping threat analysis for account #{@account.id}: No domain found")
|
||||
return
|
||||
end
|
||||
|
||||
website_content = Internal::AccountAnalysis::WebsiteScraperService.new(@domain).perform
|
||||
if website_content.blank?
|
||||
Rails.logger.info("Skipping threat analysis for account #{@account.id}: No website content found")
|
||||
Internal::AccountAnalysis::AccountUpdaterService.new(@account).update_with_analysis(nil, 'Scraping error: No content found')
|
||||
return
|
||||
end
|
||||
|
||||
content = <<~MESSAGE
|
||||
Domain: #{@domain}
|
||||
Content: #{website_content}
|
||||
MESSAGE
|
||||
threat_analysis = Internal::AccountAnalysis::ContentEvaluatorService.new.evaluate(content)
|
||||
Rails.logger.info("Completed threat analysis: level=#{threat_analysis['threat_level']} for account-id: #{@account.id}")
|
||||
|
||||
Internal::AccountAnalysis::AccountUpdaterService.new(@account).update_with_analysis(threat_analysis)
|
||||
|
||||
threat_analysis
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def extract_domain_from_email(email)
|
||||
return nil if email.blank?
|
||||
|
||||
email.split('@').last
|
||||
rescue StandardError => e
|
||||
Rails.logger.error("Error extracting domain from email #{email}: #{e.message}")
|
||||
nil
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,32 @@
|
||||
class Internal::AccountAnalysis::WebsiteScraperService
|
||||
def initialize(domain)
|
||||
@domain = domain
|
||||
end
|
||||
|
||||
def perform
|
||||
return nil if @domain.blank?
|
||||
|
||||
Rails.logger.info("Scraping website: #{external_link}")
|
||||
|
||||
begin
|
||||
response = HTTParty.get(external_link, follow_redirects: true)
|
||||
response.to_s
|
||||
rescue StandardError => e
|
||||
Rails.logger.error("Error scraping website for domain #{@domain}: #{e.message}")
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
def external_link
|
||||
sanitize_url(@domain)
|
||||
end
|
||||
|
||||
def sanitize_url(domain)
|
||||
url = domain
|
||||
url = "https://#{domain}" unless domain.start_with?('http://', 'https://')
|
||||
Rails.logger.info("Sanitized URL: #{url}")
|
||||
url
|
||||
end
|
||||
end
|
||||
Reference in New Issue
Block a user