Restructure omni services and add Chatwoot research snapshot
This commit is contained in:
@@ -0,0 +1,130 @@
|
||||
require 'rails_helper'
|
||||
|
||||
RSpec.describe Internal::AccountAnalysis::AccountUpdaterService do
|
||||
let(:account) { create(:account) }
|
||||
let(:service) { described_class.new(account) }
|
||||
let(:discord_notifier) { instance_double(Internal::AccountAnalysis::DiscordNotifierService, notify_flagged_account: true) }
|
||||
|
||||
before do
|
||||
allow(Internal::AccountAnalysis::DiscordNotifierService).to receive(:new).and_return(discord_notifier)
|
||||
allow(Rails.logger).to receive(:info)
|
||||
end
|
||||
|
||||
describe '#update_with_analysis' do
|
||||
context 'when error_message is provided' do
|
||||
it 'saves the error and notifies Discord' do
|
||||
service.update_with_analysis({}, 'Analysis failed')
|
||||
|
||||
expect(account.internal_attributes['security_flagged']).to be true
|
||||
expect(account.internal_attributes['security_flag_reason']).to eq('Error: Analysis failed')
|
||||
expect(discord_notifier).to have_received(:notify_flagged_account).with(account)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when analysis is successful' do
|
||||
let(:analysis) do
|
||||
{
|
||||
'threat_level' => 'none',
|
||||
'threat_summary' => 'No threats detected',
|
||||
'recommendation' => 'allow'
|
||||
}
|
||||
end
|
||||
|
||||
it 'saves the analysis results' do
|
||||
allow(Time).to receive(:current).and_return('2023-01-01 12:00:00')
|
||||
|
||||
service.update_with_analysis(analysis)
|
||||
|
||||
expect(account.internal_attributes['last_threat_scan_at']).to eq('2023-01-01 12:00:00')
|
||||
expect(account.internal_attributes['last_threat_scan_level']).to eq('none')
|
||||
expect(account.internal_attributes['last_threat_scan_summary']).to eq('No threats detected')
|
||||
expect(account.internal_attributes['last_threat_scan_recommendation']).to eq('allow')
|
||||
end
|
||||
|
||||
it 'does not flag the account when threat level is none' do
|
||||
service.update_with_analysis(analysis)
|
||||
|
||||
expect(account.internal_attributes).not_to include('security_flagged')
|
||||
expect(discord_notifier).not_to have_received(:notify_flagged_account)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when analysis detects high threat level' do
|
||||
let(:analysis) do
|
||||
{
|
||||
'threat_level' => 'high',
|
||||
'threat_summary' => 'Suspicious activity detected',
|
||||
'recommendation' => 'review',
|
||||
'illegal_activities_detected' => false
|
||||
}
|
||||
end
|
||||
|
||||
it 'flags the account and notifies Discord' do
|
||||
service.update_with_analysis(analysis)
|
||||
|
||||
expect(account.internal_attributes['security_flagged']).to be true
|
||||
expect(account.internal_attributes['security_flag_reason']).to eq('Threat detected: Suspicious activity detected')
|
||||
expect(discord_notifier).to have_received(:notify_flagged_account).with(account)
|
||||
expect(Rails.logger).to have_received(:info).with("Flagging account #{account.id} due to threat level: high")
|
||||
expect(Rails.logger).to have_received(:info).with("Account #{account.id} has been flagged for security review")
|
||||
end
|
||||
end
|
||||
|
||||
context 'when analysis detects medium threat level' do
|
||||
let(:analysis) do
|
||||
{
|
||||
'threat_level' => 'medium',
|
||||
'threat_summary' => 'Potential issues found',
|
||||
'recommendation' => 'review',
|
||||
'illegal_activities_detected' => false
|
||||
}
|
||||
end
|
||||
|
||||
it 'flags the account and notifies Discord' do
|
||||
service.update_with_analysis(analysis)
|
||||
|
||||
expect(account.internal_attributes['security_flagged']).to be true
|
||||
expect(account.internal_attributes['security_flag_reason']).to eq('Threat detected: Potential issues found')
|
||||
expect(discord_notifier).to have_received(:notify_flagged_account).with(account)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when analysis detects illegal activities' do
|
||||
let(:analysis) do
|
||||
{
|
||||
'threat_level' => 'low',
|
||||
'threat_summary' => 'Minor issues found',
|
||||
'recommendation' => 'review',
|
||||
'illegal_activities_detected' => true
|
||||
}
|
||||
end
|
||||
|
||||
it 'flags the account and notifies Discord' do
|
||||
service.update_with_analysis(analysis)
|
||||
|
||||
expect(account.internal_attributes['security_flagged']).to be true
|
||||
expect(account.internal_attributes['security_flag_reason']).to eq('Threat detected: Minor issues found')
|
||||
expect(discord_notifier).to have_received(:notify_flagged_account).with(account)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when analysis recommends blocking' do
|
||||
let(:analysis) do
|
||||
{
|
||||
'threat_level' => 'low',
|
||||
'threat_summary' => 'Minor issues found',
|
||||
'recommendation' => 'block',
|
||||
'illegal_activities_detected' => false
|
||||
}
|
||||
end
|
||||
|
||||
it 'flags the account and notifies Discord' do
|
||||
service.update_with_analysis(analysis)
|
||||
|
||||
expect(account.internal_attributes['security_flagged']).to be true
|
||||
expect(account.internal_attributes['security_flag_reason']).to eq('Threat detected: Minor issues found')
|
||||
expect(discord_notifier).to have_received(:notify_flagged_account).with(account)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,199 @@
|
||||
require 'rails_helper'
|
||||
|
||||
RSpec.describe Internal::AccountAnalysis::ContentEvaluatorService do
|
||||
let(:service) { described_class.new }
|
||||
let(:content) { 'This is some test content' }
|
||||
let(:mock_moderation_result) do
|
||||
instance_double(
|
||||
RubyLLM::Moderation,
|
||||
flagged?: false,
|
||||
flagged_categories: [],
|
||||
category_scores: {}
|
||||
)
|
||||
end
|
||||
|
||||
before do
|
||||
create(:installation_config, name: 'CAPTAIN_OPEN_AI_API_KEY', value: 'test-key')
|
||||
allow(RubyLLM).to receive(:moderate).and_return(mock_moderation_result)
|
||||
end
|
||||
|
||||
describe '#evaluate' do
|
||||
context 'when content is safe' do
|
||||
it 'returns safe evaluation with approval recommendation' do
|
||||
result = service.evaluate(content)
|
||||
|
||||
expect(result).to include(
|
||||
'threat_level' => 'safe',
|
||||
'threat_summary' => 'No threats detected',
|
||||
'detected_threats' => [],
|
||||
'illegal_activities_detected' => false,
|
||||
'recommendation' => 'approve'
|
||||
)
|
||||
end
|
||||
|
||||
it 'logs the evaluation results' do
|
||||
expect(Rails.logger).to receive(:info).with('Moderation evaluation - Level: safe, Threats: ')
|
||||
service.evaluate(content)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when content is flagged' do
|
||||
let(:mock_moderation_result) do
|
||||
instance_double(
|
||||
RubyLLM::Moderation,
|
||||
flagged?: true,
|
||||
flagged_categories: %w[harassment hate],
|
||||
category_scores: { 'harassment' => 0.6, 'hate' => 0.3 }
|
||||
)
|
||||
end
|
||||
|
||||
it 'returns flagged evaluation with review recommendation' do
|
||||
result = service.evaluate(content)
|
||||
|
||||
expect(result).to include(
|
||||
'threat_level' => 'high',
|
||||
'threat_summary' => 'Content flagged for: harassment, hate',
|
||||
'detected_threats' => %w[harassment hate],
|
||||
'illegal_activities_detected' => false,
|
||||
'recommendation' => 'review'
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when content contains violence' do
|
||||
let(:mock_moderation_result) do
|
||||
instance_double(
|
||||
RubyLLM::Moderation,
|
||||
flagged?: true,
|
||||
flagged_categories: ['violence'],
|
||||
category_scores: { 'violence' => 0.9 }
|
||||
)
|
||||
end
|
||||
|
||||
it 'marks illegal activities detected for violence' do
|
||||
result = service.evaluate(content)
|
||||
|
||||
expect(result['illegal_activities_detected']).to be true
|
||||
expect(result['threat_level']).to eq('critical')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when content contains self-harm' do
|
||||
let(:mock_moderation_result) do
|
||||
instance_double(
|
||||
RubyLLM::Moderation,
|
||||
flagged?: true,
|
||||
flagged_categories: ['self-harm'],
|
||||
category_scores: { 'self-harm' => 0.85 }
|
||||
)
|
||||
end
|
||||
|
||||
it 'marks illegal activities detected for self-harm' do
|
||||
result = service.evaluate(content)
|
||||
|
||||
expect(result['illegal_activities_detected']).to be true
|
||||
end
|
||||
end
|
||||
|
||||
context 'when content is blank' do
|
||||
let(:blank_content) { '' }
|
||||
|
||||
it 'returns default evaluation without calling moderation API' do
|
||||
expect(RubyLLM).not_to receive(:moderate)
|
||||
|
||||
result = service.evaluate(blank_content)
|
||||
|
||||
expect(result).to include(
|
||||
'threat_level' => 'unknown',
|
||||
'threat_summary' => 'Failed to complete content evaluation',
|
||||
'detected_threats' => [],
|
||||
'illegal_activities_detected' => false,
|
||||
'recommendation' => 'review'
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when error occurs during evaluation' do
|
||||
before do
|
||||
allow(RubyLLM).to receive(:moderate).and_raise(StandardError.new('Test error'))
|
||||
end
|
||||
|
||||
it 'logs error and returns default evaluation with error type' do
|
||||
expect(Rails.logger).to receive(:error).with('Error evaluating content: Test error')
|
||||
|
||||
result = service.evaluate(content)
|
||||
|
||||
expect(result).to include(
|
||||
'threat_level' => 'unknown',
|
||||
'threat_summary' => 'Failed to complete content evaluation',
|
||||
'detected_threats' => ['evaluation_failure'],
|
||||
'illegal_activities_detected' => false,
|
||||
'recommendation' => 'review'
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
context 'with threat level determination' do
|
||||
it 'returns critical for scores >= 0.8' do
|
||||
mock_result = instance_double(
|
||||
RubyLLM::Moderation,
|
||||
flagged?: true,
|
||||
flagged_categories: ['harassment'],
|
||||
category_scores: { 'harassment' => 0.85 }
|
||||
)
|
||||
allow(RubyLLM).to receive(:moderate).and_return(mock_result)
|
||||
|
||||
result = service.evaluate(content)
|
||||
expect(result['threat_level']).to eq('critical')
|
||||
end
|
||||
|
||||
it 'returns high for scores between 0.5 and 0.8' do
|
||||
mock_result = instance_double(
|
||||
RubyLLM::Moderation,
|
||||
flagged?: true,
|
||||
flagged_categories: ['harassment'],
|
||||
category_scores: { 'harassment' => 0.65 }
|
||||
)
|
||||
allow(RubyLLM).to receive(:moderate).and_return(mock_result)
|
||||
|
||||
result = service.evaluate(content)
|
||||
expect(result['threat_level']).to eq('high')
|
||||
end
|
||||
|
||||
it 'returns medium for scores between 0.2 and 0.5' do
|
||||
mock_result = instance_double(
|
||||
RubyLLM::Moderation,
|
||||
flagged?: true,
|
||||
flagged_categories: ['harassment'],
|
||||
category_scores: { 'harassment' => 0.35 }
|
||||
)
|
||||
allow(RubyLLM).to receive(:moderate).and_return(mock_result)
|
||||
|
||||
result = service.evaluate(content)
|
||||
expect(result['threat_level']).to eq('medium')
|
||||
end
|
||||
|
||||
it 'returns low for scores below 0.2' do
|
||||
mock_result = instance_double(
|
||||
RubyLLM::Moderation,
|
||||
flagged?: true,
|
||||
flagged_categories: ['harassment'],
|
||||
category_scores: { 'harassment' => 0.15 }
|
||||
)
|
||||
allow(RubyLLM).to receive(:moderate).and_return(mock_result)
|
||||
|
||||
result = service.evaluate(content)
|
||||
expect(result['threat_level']).to eq('low')
|
||||
end
|
||||
end
|
||||
|
||||
context 'with content truncation' do
|
||||
let(:long_content) { 'a' * 15_000 }
|
||||
|
||||
it 'truncates content to 10000 characters before sending to moderation' do
|
||||
expect(RubyLLM).to receive(:moderate).with('a' * 10_000).and_return(mock_moderation_result)
|
||||
service.evaluate(long_content)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,73 @@
|
||||
require 'rails_helper'
|
||||
|
||||
RSpec.describe Internal::AccountAnalysis::DiscordNotifierService do
|
||||
let(:service) { described_class.new }
|
||||
let(:webhook_url) { 'https://discord.com/api/webhooks/123456789/some-token' }
|
||||
let(:account) do
|
||||
create(
|
||||
:account,
|
||||
internal_attributes: {
|
||||
'last_threat_scan_level' => 'high',
|
||||
'last_threat_scan_recommendation' => 'review',
|
||||
'illegal_activities_detected' => true,
|
||||
'last_threat_scan_summary' => 'Suspicious activity detected'
|
||||
}
|
||||
)
|
||||
end
|
||||
let!(:user) { create(:user, account: account) }
|
||||
|
||||
before do
|
||||
allow(Rails.logger).to receive(:info)
|
||||
allow(Rails.logger).to receive(:error)
|
||||
end
|
||||
|
||||
describe '#notify_flagged_account' do
|
||||
context 'when webhook URL is configured' do
|
||||
before do
|
||||
create(:installation_config, name: 'ACCOUNT_SECURITY_NOTIFICATION_WEBHOOK_URL', value: webhook_url)
|
||||
stub_request(:post, webhook_url).to_return(status: 200)
|
||||
end
|
||||
|
||||
it 'sends notification to Discord webhook' do
|
||||
service.notify_flagged_account(account)
|
||||
expect(WebMock).to have_requested(:post, webhook_url)
|
||||
.with(
|
||||
body: hash_including(
|
||||
content: include(
|
||||
"Account ID: #{account.id}",
|
||||
"User Email: #{user.email}",
|
||||
'Threat Level: high',
|
||||
'**System Recommendation:** review',
|
||||
'⚠️ Potential illegal activities detected',
|
||||
'Suspicious activity detected'
|
||||
)
|
||||
)
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when webhook URL is not configured' do
|
||||
it 'logs error and does not make HTTP request' do
|
||||
service.notify_flagged_account(account)
|
||||
|
||||
expect(Rails.logger).to have_received(:error)
|
||||
.with('Cannot send Discord notification: No webhook URL configured')
|
||||
expect(WebMock).not_to have_requested(:post, webhook_url)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when HTTP request fails' do
|
||||
before do
|
||||
create(:installation_config, name: 'ACCOUNT_SECURITY_NOTIFICATION_WEBHOOK_URL', value: webhook_url)
|
||||
stub_request(:post, webhook_url).to_raise(StandardError.new('Connection failed'))
|
||||
end
|
||||
|
||||
it 'catches exception and logs error' do
|
||||
service.notify_flagged_account(account)
|
||||
|
||||
expect(Rails.logger).to have_received(:error)
|
||||
.with('Error sending Discord notification: Connection failed')
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,62 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require 'rails_helper'
|
||||
|
||||
RSpec.describe Internal::AccountAnalysis::ThreatAnalyserService do
|
||||
subject { described_class.new(account) }
|
||||
|
||||
let(:account) { create(:account) }
|
||||
let(:user) { create(:user, email: 'test@example.com', account: account) }
|
||||
let(:website_scraper) { instance_double(Internal::AccountAnalysis::WebsiteScraperService) }
|
||||
let(:content_evaluator) { instance_double(Internal::AccountAnalysis::ContentEvaluatorService) }
|
||||
let(:account_updater) { instance_double(Internal::AccountAnalysis::AccountUpdaterService) }
|
||||
let(:website_content) { 'This is the website content' }
|
||||
let(:threat_analysis) { { 'threat_level' => 'medium' } }
|
||||
|
||||
before do
|
||||
user
|
||||
|
||||
allow(Internal::AccountAnalysis::WebsiteScraperService).to receive(:new).with('example.com').and_return(website_scraper)
|
||||
allow(Internal::AccountAnalysis::ContentEvaluatorService).to receive(:new).and_return(content_evaluator)
|
||||
allow(Internal::AccountAnalysis::AccountUpdaterService).to receive(:new).with(account).and_return(account_updater)
|
||||
end
|
||||
|
||||
describe '#perform' do
|
||||
before do
|
||||
allow(website_scraper).to receive(:perform).and_return(website_content)
|
||||
allow(content_evaluator).to receive(:evaluate).and_return(threat_analysis)
|
||||
allow(account_updater).to receive(:update_with_analysis)
|
||||
allow(Rails.logger).to receive(:info)
|
||||
end
|
||||
|
||||
it 'performs threat analysis and updates the account' do
|
||||
expected_content = <<~MESSAGE
|
||||
Domain: example.com
|
||||
Content: This is the website content
|
||||
MESSAGE
|
||||
|
||||
expect(website_scraper).to receive(:perform)
|
||||
expect(content_evaluator).to receive(:evaluate).with(expected_content)
|
||||
expect(account_updater).to receive(:update_with_analysis).with(threat_analysis)
|
||||
expect(Rails.logger).to receive(:info).with("Completed threat analysis: level=medium for account-id: #{account.id}")
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to eq(threat_analysis)
|
||||
end
|
||||
|
||||
context 'when website content is blank' do
|
||||
before do
|
||||
allow(website_scraper).to receive(:perform).and_return(nil)
|
||||
end
|
||||
|
||||
it 'logs info and updates account with error' do
|
||||
expect(Rails.logger).to receive(:info).with("Skipping threat analysis for account #{account.id}: No website content found")
|
||||
expect(account_updater).to receive(:update_with_analysis).with(nil, 'Scraping error: No content found')
|
||||
expect(content_evaluator).not_to receive(:evaluate)
|
||||
|
||||
result = subject.perform
|
||||
expect(result).to be_nil
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,45 @@
|
||||
require 'rails_helper'
|
||||
|
||||
RSpec.describe Internal::AccountAnalysis::WebsiteScraperService do
|
||||
describe '#perform' do
|
||||
let(:service) { described_class.new(domain) }
|
||||
let(:html_content) { '<html><body>This is sample website content</body></html>' }
|
||||
|
||||
before do
|
||||
allow(Rails.logger).to receive(:info)
|
||||
allow(Rails.logger).to receive(:error)
|
||||
end
|
||||
|
||||
context 'when domain is nil' do
|
||||
let(:domain) { nil }
|
||||
|
||||
it 'returns nil' do
|
||||
expect(service.perform).to be_nil
|
||||
end
|
||||
end
|
||||
|
||||
context 'when domain is present' do
|
||||
let(:domain) { 'example.com' }
|
||||
|
||||
before do
|
||||
allow(HTTParty).to receive(:get).and_return(html_content)
|
||||
end
|
||||
|
||||
it 'returns the stripped and normalized content' do
|
||||
expect(service.perform).to eq(html_content)
|
||||
end
|
||||
end
|
||||
|
||||
context 'when an error occurs' do
|
||||
let(:domain) { 'example.com' }
|
||||
|
||||
before do
|
||||
allow(HTTParty).to receive(:get).and_raise(StandardError.new('Error'))
|
||||
end
|
||||
|
||||
it 'returns nil' do
|
||||
expect(service.perform).to be_nil
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,134 @@
|
||||
require 'rails_helper'
|
||||
|
||||
RSpec.describe Internal::Accounts::InternalAttributesService do
|
||||
let!(:account) { create(:account, internal_attributes: { 'test_key' => 'test_value' }) }
|
||||
let(:service) { described_class.new(account) }
|
||||
let(:business_features) { Enterprise::Billing::HandleStripeEventService::BUSINESS_PLAN_FEATURES }
|
||||
let(:enterprise_features) { Enterprise::Billing::HandleStripeEventService::ENTERPRISE_PLAN_FEATURES }
|
||||
|
||||
describe '#initialize' do
|
||||
it 'sets the account' do
|
||||
expect(service.account).to eq(account)
|
||||
end
|
||||
end
|
||||
|
||||
describe '#get' do
|
||||
it 'returns the value for a valid key' do
|
||||
# Manually set the value first since the key needs to be in VALID_KEYS
|
||||
allow(service).to receive(:validate_key!).and_return(true)
|
||||
account.internal_attributes['manually_managed_features'] = ['test']
|
||||
|
||||
expect(service.get('manually_managed_features')).to eq(['test'])
|
||||
end
|
||||
|
||||
it 'raises an error for an invalid key' do
|
||||
expect { service.get('invalid_key') }.to raise_error(ArgumentError, 'Invalid internal attribute key: invalid_key')
|
||||
end
|
||||
end
|
||||
|
||||
describe '#set' do
|
||||
it 'sets the value for a valid key' do
|
||||
# Stub the validation to allow our test key
|
||||
allow(service).to receive(:validate_key!).and_return(true)
|
||||
|
||||
service.set('manually_managed_features', %w[feature1 feature2])
|
||||
account.reload
|
||||
|
||||
expect(account.internal_attributes['manually_managed_features']).to eq(%w[feature1 feature2])
|
||||
end
|
||||
|
||||
it 'raises an error for an invalid key' do
|
||||
expect { service.set('invalid_key', 'value') }.to raise_error(ArgumentError, 'Invalid internal attribute key: invalid_key')
|
||||
end
|
||||
|
||||
it 'creates internal_attributes hash if it is empty' do
|
||||
account.update(internal_attributes: {})
|
||||
|
||||
# Stub the validation to allow our test key
|
||||
allow(service).to receive(:validate_key!).and_return(true)
|
||||
|
||||
service.set('manually_managed_features', ['feature1'])
|
||||
account.reload
|
||||
|
||||
expect(account.internal_attributes['manually_managed_features']).to eq(['feature1'])
|
||||
end
|
||||
end
|
||||
|
||||
describe '#manually_managed_features' do
|
||||
it 'returns an empty array when no features are set' do
|
||||
expect(service.manually_managed_features).to eq([])
|
||||
end
|
||||
|
||||
it 'returns the features when they are set' do
|
||||
account.update(internal_attributes: { 'manually_managed_features' => %w[feature1 feature2] })
|
||||
|
||||
expect(service.manually_managed_features).to eq(%w[feature1 feature2])
|
||||
end
|
||||
end
|
||||
|
||||
describe '#manually_managed_features=' do
|
||||
# Use a real SLA feature which is in the BUSINESS_PLAN_FEATURES
|
||||
let(:valid_feature) { 'sla' }
|
||||
|
||||
before do
|
||||
# Make sure the feature is allowed through validation
|
||||
allow(service).to receive(:valid_feature_list).and_return([valid_feature, 'custom_roles'])
|
||||
end
|
||||
|
||||
it 'saves features as an array' do
|
||||
service.manually_managed_features = valid_feature
|
||||
account.reload
|
||||
|
||||
expect(account.internal_attributes['manually_managed_features']).to eq([valid_feature])
|
||||
end
|
||||
|
||||
it 'handles nil input' do
|
||||
service.manually_managed_features = nil
|
||||
account.reload
|
||||
|
||||
expect(account.internal_attributes['manually_managed_features']).to eq([])
|
||||
end
|
||||
|
||||
it 'handles array input' do
|
||||
service.manually_managed_features = [valid_feature, 'custom_roles']
|
||||
account.reload
|
||||
|
||||
expect(account.internal_attributes['manually_managed_features']).to eq([valid_feature, 'custom_roles'])
|
||||
end
|
||||
|
||||
it 'filters out invalid features' do
|
||||
service.manually_managed_features = [valid_feature, 'invalid_feature']
|
||||
account.reload
|
||||
|
||||
expect(account.internal_attributes['manually_managed_features']).to eq([valid_feature])
|
||||
end
|
||||
|
||||
it 'removes duplicates' do
|
||||
service.manually_managed_features = [valid_feature, valid_feature]
|
||||
account.reload
|
||||
|
||||
expect(account.internal_attributes['manually_managed_features']).to eq([valid_feature])
|
||||
end
|
||||
|
||||
it 'removes empty strings' do
|
||||
service.manually_managed_features = [valid_feature, '', ' ']
|
||||
account.reload
|
||||
|
||||
expect(account.internal_attributes['manually_managed_features']).to eq([valid_feature])
|
||||
end
|
||||
|
||||
it 'trims whitespace' do
|
||||
service.manually_managed_features = [" #{valid_feature} "]
|
||||
account.reload
|
||||
|
||||
expect(account.internal_attributes['manually_managed_features']).to eq([valid_feature])
|
||||
end
|
||||
end
|
||||
|
||||
describe '#valid_feature_list' do
|
||||
it 'returns a combination of business and enterprise features' do
|
||||
expect(service.valid_feature_list).to include(*business_features)
|
||||
expect(service.valid_feature_list).to include(*enterprise_features)
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,79 @@
|
||||
require 'rails_helper'
|
||||
|
||||
RSpec.describe Internal::ReconcilePlanConfigService do
|
||||
describe '#perform' do
|
||||
let(:service) { described_class.new }
|
||||
|
||||
context 'when pricing plan is community' do
|
||||
before do
|
||||
allow(ChatwootHub).to receive(:pricing_plan).and_return('community')
|
||||
end
|
||||
|
||||
it 'disables the premium features for accounts' do
|
||||
account = create(:account)
|
||||
account.enable_features!('disable_branding', 'audit_logs', 'captain_integration')
|
||||
account_with_captain = create(:account)
|
||||
account_with_captain.enable_features!('captain_integration')
|
||||
disable_branding_account = create(:account)
|
||||
disable_branding_account.enable_features!('disable_branding')
|
||||
service.perform
|
||||
expect(account.reload.enabled_features.keys).not_to include('captain_integration', 'disable_branding', 'audit_logs')
|
||||
expect(account_with_captain.reload.enabled_features.keys).not_to include('captain_integration')
|
||||
expect(disable_branding_account.reload.enabled_features.keys).not_to include('disable_branding')
|
||||
end
|
||||
|
||||
it 'creates a premium config reset warning if config was modified' do
|
||||
create(:installation_config, name: 'INSTALLATION_NAME', value: 'custom-name')
|
||||
service.perform
|
||||
expect(Redis::Alfred.get(Redis::Alfred::CHATWOOT_INSTALLATION_CONFIG_RESET_WARNING)).to eq('true')
|
||||
end
|
||||
|
||||
it 'will not create a premium config reset warning if config is not modified' do
|
||||
create(:installation_config, name: 'INSTALLATION_NAME', value: 'Chatwoot')
|
||||
service.perform
|
||||
expect(Redis::Alfred.get(Redis::Alfred::CHATWOOT_INSTALLATION_CONFIG_RESET_WARNING)).to be_nil
|
||||
end
|
||||
|
||||
it 'updates the premium configs to default' do
|
||||
create(:installation_config, name: 'INSTALLATION_NAME', value: 'custom-name')
|
||||
create(:installation_config, name: 'LOGO', value: '/custom-path/logo.svg')
|
||||
service.perform
|
||||
expect(InstallationConfig.find_by(name: 'INSTALLATION_NAME').value).to eq('Chatwoot')
|
||||
expect(InstallationConfig.find_by(name: 'LOGO').value).to eq('/brand-assets/logo.svg')
|
||||
end
|
||||
end
|
||||
|
||||
context 'when pricing plan is not community' do
|
||||
before do
|
||||
allow(ChatwootHub).to receive(:pricing_plan).and_return('enterprise')
|
||||
end
|
||||
|
||||
it 'unset premium config warning on upgrade' do
|
||||
Redis::Alfred.set(Redis::Alfred::CHATWOOT_INSTALLATION_CONFIG_RESET_WARNING, true)
|
||||
service.perform
|
||||
expect(Redis::Alfred.get(Redis::Alfred::CHATWOOT_INSTALLATION_CONFIG_RESET_WARNING)).to be_nil
|
||||
end
|
||||
|
||||
it 'does not disable the premium features for accounts' do
|
||||
account = create(:account)
|
||||
account.enable_features!('disable_branding', 'audit_logs', 'captain_integration')
|
||||
account_with_captain = create(:account)
|
||||
account_with_captain.enable_features!('captain_integration')
|
||||
disable_branding_account = create(:account)
|
||||
disable_branding_account.enable_features!('disable_branding')
|
||||
service.perform
|
||||
expect(account.reload.enabled_features.keys).to include('captain_integration', 'disable_branding', 'audit_logs')
|
||||
expect(account_with_captain.reload.enabled_features.keys).to include('captain_integration')
|
||||
expect(disable_branding_account.reload.enabled_features.keys).to include('disable_branding')
|
||||
end
|
||||
|
||||
it 'does not update the LOGO config' do
|
||||
create(:installation_config, name: 'INSTALLATION_NAME', value: 'custom-name')
|
||||
create(:installation_config, name: 'LOGO', value: '/custom-path/logo.svg')
|
||||
service.perform
|
||||
expect(InstallationConfig.find_by(name: 'INSTALLATION_NAME').value).to eq('custom-name')
|
||||
expect(InstallationConfig.find_by(name: 'LOGO').value).to eq('/custom-path/logo.svg')
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
Reference in New Issue
Block a user