Restructure omni services and add Chatwoot research snapshot

This commit is contained in:
Ruslan Bakiev
2026-02-21 11:11:27 +07:00
parent edea7a0034
commit b73babbbf6
7732 changed files with 978203 additions and 32 deletions

View File

@@ -0,0 +1,65 @@
class Account::ContactsExportJob < ApplicationJob
queue_as :low
def perform(account_id, user_id, column_names, params)
@account = Account.find(account_id)
@params = params
@account_user = @account.users.find(user_id)
headers = valid_headers(column_names)
generate_csv(headers)
send_mail
end
private
def generate_csv(headers)
csv_data = CSV.generate do |csv|
csv << headers
contacts.each do |contact|
csv << headers.map { |header| contact.send(header) }
end
end
attach_export_file(csv_data)
end
def contacts
if @params.present? && @params[:payload].present? && @params[:payload].any?
result = ::Contacts::FilterService.new(@account, @account_user, @params).perform
result[:contacts]
elsif @params[:label].present?
@account.contacts.resolved_contacts(use_crm_v2: @account.feature_enabled?('crm_v2')).tagged_with(@params[:label], any: true)
else
@account.contacts.resolved_contacts(use_crm_v2: @account.feature_enabled?('crm_v2'))
end
end
def valid_headers(column_names)
(column_names.presence || default_columns) & Contact.column_names
end
def attach_export_file(csv_data)
return if csv_data.blank?
@account.contacts_export.attach(
io: StringIO.new(csv_data),
filename: "#{@account.name}_#{@account.id}_contacts.csv",
content_type: 'text/csv'
)
end
def send_mail
file_url = account_contact_export_url
mailer = AdministratorNotifications::AccountNotificationMailer.with(account: @account)
mailer.contact_export_complete(file_url, @account_user.email)&.deliver_later
end
def account_contact_export_url
Rails.application.routes.url_helpers.rails_blob_url(@account.contacts_export)
end
def default_columns
%w[id name email phone_number]
end
end

View File

@@ -0,0 +1,10 @@
class Account::ConversationsResolutionSchedulerJob < ApplicationJob
queue_as :scheduled_jobs
def perform
Account.with_auto_resolve.find_each(batch_size: 100) do |account|
Conversations::ResolutionJob.perform_later(account: account)
end
end
end
Account::ConversationsResolutionSchedulerJob.prepend_mod_with('Account::ConversationsResolutionSchedulerJob')

View File

@@ -0,0 +1,44 @@
class ActionCableBroadcastJob < ApplicationJob
queue_as :critical
include Events::Types
CONVERSATION_UPDATE_EVENTS = [
CONVERSATION_READ,
CONVERSATION_UPDATED,
TEAM_CHANGED,
ASSIGNEE_CHANGED,
CONVERSATION_STATUS_CHANGED
].freeze
def perform(members, event_name, data)
return if members.blank?
broadcast_data = prepare_broadcast_data(event_name, data)
broadcast_to_members(members, event_name, broadcast_data)
end
private
# Ensures that only the latest available data is sent to prevent UI issues
# caused by out-of-order events during high-traffic periods. This prevents
# the conversation job from processing outdated data.
def prepare_broadcast_data(event_name, data)
return data unless CONVERSATION_UPDATE_EVENTS.include?(event_name)
account = Account.find(data[:account_id])
conversation = account.conversations.find_by!(display_id: data[:id])
conversation.push_event_data.merge(account_id: data[:account_id])
end
def broadcast_to_members(members, event_name, broadcast_data)
members.each do |member|
ActionCable.server.broadcast(
member,
{
event: event_name,
data: broadcast_data
}
)
end
end
end

View File

@@ -0,0 +1,7 @@
class AgentBots::WebhookJob < WebhookJob
queue_as :high
def perform(url, payload, webhook_type = :agent_bot_webhook)
super(url, payload, webhook_type)
end
end

View File

@@ -0,0 +1,37 @@
class Agents::DestroyJob < ApplicationJob
queue_as :low
def perform(account, user)
ActiveRecord::Base.transaction do
destroy_notification_setting(account, user)
remove_user_from_teams(account, user)
remove_user_from_inboxes(account, user)
unassign_conversations(account, user)
end
end
private
def remove_user_from_inboxes(account, user)
inboxes = account.inboxes.all
inbox_members = user.inbox_members.where(inbox_id: inboxes.pluck(:id))
inbox_members.destroy_all
end
def remove_user_from_teams(account, user)
teams = account.teams.all
team_members = user.team_members.where(team_id: teams.pluck(:id))
team_members.destroy_all
end
def destroy_notification_setting(account, user)
setting = user.notification_settings.find_by(account_id: account.id)
setting&.destroy!
end
def unassign_conversations(account, user)
# rubocop:disable Rails/SkipsModelValidations
user.assigned_conversations.where(account: account).in_batches.update_all(assignee_id: nil)
# rubocop:enable Rails/SkipsModelValidations
end
end

View File

@@ -0,0 +1,8 @@
class ApplicationJob < ActiveJob::Base
# https://api.rubyonrails.org/v5.2.1/classes/ActiveJob/Exceptions/ClassMethods.html
discard_on ActiveJob::DeserializationError do |job, error|
Rails.logger.info("Skipping #{job.class} with #{
job.instance_variable_get(:@serialized_arguments)
} because of ActiveJob::DeserializationError (#{error.message})")
end
end

View File

@@ -0,0 +1,22 @@
class AutoAssignment::AssignmentJob < ApplicationJob
queue_as :default
def perform(inbox_id:)
inbox = Inbox.find_by(id: inbox_id)
return unless inbox
service = AutoAssignment::AssignmentService.new(inbox: inbox)
assigned_count = service.perform_bulk_assignment(limit: bulk_assignment_limit)
Rails.logger.info "Assigned #{assigned_count} conversations for inbox #{inbox.id}"
rescue StandardError => e
Rails.logger.error "Bulk assignment failed for inbox #{inbox_id}: #{e.message}"
raise e if Rails.env.test?
end
private
def bulk_assignment_limit
ENV.fetch('AUTO_ASSIGNMENT_BULK_LIMIT', 100).to_i
end
end

View File

@@ -0,0 +1,19 @@
class AutoAssignment::PeriodicAssignmentJob < ApplicationJob
queue_as :scheduled_jobs
def perform
Account.find_in_batches do |accounts|
accounts.each do |account|
next unless account.feature_enabled?('assignment_v2')
account.inboxes.joins(:assignment_policy).find_in_batches do |inboxes|
inboxes.each do |inbox|
next unless inbox.auto_assignment_v2_enabled?
AutoAssignment::AssignmentJob.perform_later(inbox_id: inbox.id)
end
end
end
end
end
end

View File

@@ -0,0 +1,13 @@
class Avatar::AvatarFromGravatarJob < ApplicationJob
queue_as :purgable
def perform(avatarable, email)
return if GlobalConfigService.load('DISABLE_GRAVATAR', '').present?
return if email.blank?
return if avatarable.avatar_url.present?
hash = Digest::MD5.hexdigest(email)
gravatar_url = "https://www.gravatar.com/avatar/#{hash}?d=404"
Avatar::AvatarFromUrlJob.perform_later(avatarable, gravatar_url)
end
end

View File

@@ -0,0 +1,86 @@
# Downloads and attaches avatar images from a URL.
# Notes:
# - For contact objects, we use `additional_attributes` to rate limit the
# job and track state.
# - We save the hash of the synced URL to retrigger downloads only when
# there is a change in the underlying asset.
# - A 1 minute rate limit window is enforced via `last_avatar_sync_at`.
class Avatar::AvatarFromUrlJob < ApplicationJob
include UrlHelper
queue_as :purgable
MAX_DOWNLOAD_SIZE = 15 * 1024 * 1024
RATE_LIMIT_WINDOW = 1.minute
def perform(avatarable, avatar_url)
return unless avatarable.respond_to?(:avatar)
return unless url_valid?(avatar_url)
return unless should_sync_avatar?(avatarable, avatar_url)
avatar_file = Down.download(avatar_url, max_size: MAX_DOWNLOAD_SIZE)
raise Down::Error, 'Invalid file' unless valid_file?(avatar_file)
avatarable.avatar.attach(
io: avatar_file,
filename: avatar_file.original_filename,
content_type: avatar_file.content_type
)
rescue Down::NotFound
Rails.logger.info "AvatarFromUrlJob: avatar not found at #{avatar_url}"
rescue Down::Error => e
Rails.logger.error "AvatarFromUrlJob error for #{avatar_url}: #{e.class} - #{e.message}"
ensure
update_avatar_sync_attributes(avatarable, avatar_url)
end
private
def should_sync_avatar?(avatarable, avatar_url)
# Only Contacts are rate-limited and hash-gated.
return true unless avatarable.is_a?(Contact)
attrs = avatarable.additional_attributes || {}
return false if within_rate_limit?(attrs)
return false if duplicate_url?(attrs, avatar_url)
true
end
def within_rate_limit?(attrs)
ts = attrs['last_avatar_sync_at']
return false if ts.blank?
Time.zone.parse(ts) > RATE_LIMIT_WINDOW.ago
end
def duplicate_url?(attrs, avatar_url)
stored_hash = attrs['avatar_url_hash']
stored_hash.present? && stored_hash == generate_url_hash(avatar_url)
end
def generate_url_hash(url)
Digest::SHA256.hexdigest(url)
end
def update_avatar_sync_attributes(avatarable, avatar_url)
# Only Contacts have sync attributes persisted
return unless avatarable.is_a?(Contact)
return if avatar_url.blank?
additional_attributes = avatarable.additional_attributes || {}
additional_attributes['last_avatar_sync_at'] = Time.current.iso8601
additional_attributes['avatar_url_hash'] = generate_url_hash(avatar_url)
# Persist without triggering validations that may fail due to avatar file checks
avatarable.update_columns(additional_attributes: additional_attributes) # rubocop:disable Rails/SkipsModelValidations
end
def valid_file?(file)
return false if file.original_filename.blank?
true
end
end

View File

@@ -0,0 +1,66 @@
class BulkActionsJob < ApplicationJob
include DateRangeHelper
queue_as :medium
attr_accessor :records
MODEL_TYPE = ['Conversation'].freeze
def perform(account:, params:, user:)
@account = account
Current.user = user
@params = params
@records = records_to_updated(params[:ids])
bulk_update
ensure
Current.reset
end
def bulk_update
bulk_remove_labels
bulk_conversation_update
end
def bulk_conversation_update
params = available_params(@params)
records.each do |conversation|
bulk_add_labels(conversation)
bulk_snoozed_until(conversation)
conversation.update(params) if params
end
end
def bulk_remove_labels
records.each do |conversation|
remove_labels(conversation)
end
end
def available_params(params)
return unless params[:fields]
params[:fields].delete_if { |key, value| value.nil? && key == 'status' }
end
def bulk_add_labels(conversation)
conversation.add_labels(@params[:labels][:add]) if @params[:labels] && @params[:labels][:add]
end
def bulk_snoozed_until(conversation)
conversation.snoozed_until = parse_date_time(@params[:snoozed_until].to_s) if @params[:snoozed_until]
end
def remove_labels(conversation)
return unless @params[:labels] && @params[:labels][:remove]
labels = conversation.label_list - @params[:labels][:remove]
conversation.update(label_list: labels)
end
def records_to_updated(ids)
current_model = @params[:type].camelcase
return unless MODEL_TYPE.include?(current_model)
current_model.constantize&.where(account_id: @account.id, display_id: ids)
end
end

View File

@@ -0,0 +1,7 @@
class Campaigns::TriggerOneoffCampaignJob < ApplicationJob
queue_as :low
def perform(campaign)
campaign.trigger!
end
end

View File

@@ -0,0 +1,7 @@
class Channels::Twilio::TemplatesSyncJob < ApplicationJob
queue_as :low
def perform(twilio_channel)
Twilio::TemplateSyncService.new(channel: twilio_channel).call
end
end

View File

@@ -0,0 +1,7 @@
class Channels::Whatsapp::TemplatesSyncJob < ApplicationJob
queue_as :low
def perform(whatsapp_channel)
whatsapp_channel.sync_templates
end
end

View File

@@ -0,0 +1,12 @@
class Channels::Whatsapp::TemplatesSyncSchedulerJob < ApplicationJob
queue_as :low
def perform
Channel::Whatsapp.order(Arel.sql('message_templates_last_updated IS NULL DESC, message_templates_last_updated ASC'))
.where('message_templates_last_updated <= ? OR message_templates_last_updated IS NULL', 3.hours.ago)
.limit(Limits::BULK_EXTERNAL_HTTP_CALLS_LIMIT)
.each do |channel|
Channels::Whatsapp::TemplatesSyncJob.perform_later(channel)
end
end
end

View File

@@ -0,0 +1,26 @@
class ContactIpLookupJob < ApplicationJob
queue_as :default
def perform(contact)
update_contact_location_from_ip(contact)
rescue Errno::ETIMEDOUT => e
Rails.logger.warn "Exception: ip resolution failed : #{e.message}"
end
private
def update_contact_location_from_ip(contact)
geocoder_result = IpLookupService.new.perform(get_contact_ip(contact))
return unless geocoder_result
contact.additional_attributes ||= {}
contact.additional_attributes['city'] = geocoder_result.city
contact.additional_attributes['country'] = geocoder_result.country
contact.additional_attributes['country_code'] = geocoder_result.country_code
contact.save!
end
def get_contact_ip(contact)
contact.additional_attributes&.dig('updated_at_ip') || contact.additional_attributes&.dig('created_at_ip')
end
end

View File

@@ -0,0 +1,14 @@
class Contacts::BulkActionJob < ApplicationJob
queue_as :medium
def perform(account_id, user_id, params)
account = Account.find(account_id)
user = User.find(user_id)
Contacts::BulkActionService.new(
account: account,
user: user,
params: params
).perform
end
end

View File

@@ -0,0 +1,16 @@
class ConversationReplyEmailJob < ApplicationJob
queue_as :mailers
def perform(conversation_id, last_queued_id)
conversation = Conversation.find(conversation_id)
return unless conversation.account.active?
if conversation.messages.incoming&.last&.content_type == 'incoming_email'
ConversationReplyMailer.with(account: conversation.account).reply_without_summary(conversation, last_queued_id).deliver_later
else
ConversationReplyMailer.with(account: conversation.account).reply_with_summary(conversation, last_queued_id).deliver_later
end
Redis::Alfred.delete(format(::Redis::Alfred::CONVERSATION_MAILER_KEY, conversation_id: conversation.id))
end
end

View File

@@ -0,0 +1,7 @@
class Conversations::ActivityMessageJob < ApplicationJob
queue_as :high
def perform(conversation, message_params)
conversation.messages.create!(message_params)
end
end

View File

@@ -0,0 +1,7 @@
class Conversations::ReopenSnoozedConversationsJob < ApplicationJob
queue_as :low
def perform
Conversation.where(status: :snoozed).where(snoozed_until: 3.days.ago..Time.current).all.find_each(batch_size: 100, &:open!)
end
end

View File

@@ -0,0 +1,27 @@
class Conversations::ResolutionJob < ApplicationJob
queue_as :low
def perform(account:)
# limiting the number of conversations to be resolved to avoid any performance issues
resolvable_conversations = conversation_scope(account).limit(Limits::BULK_ACTIONS_LIMIT)
resolvable_conversations.each do |conversation|
# send message from bot that conversation has been resolved
# do this is account.auto_resolve_message is set
::MessageTemplates::Template::AutoResolve.new(conversation: conversation).perform if account.auto_resolve_message.present?
conversation.add_labels(account.auto_resolve_label) if account.auto_resolve_label.present?
conversation.toggle_status
end
end
private
def conversation_scope(account)
base_scope = if account.auto_resolve_ignore_waiting
account.conversations.resolvable_not_waiting(account.auto_resolve_after)
else
account.conversations.resolvable_all(account.auto_resolve_after)
end
# Exclude orphan conversations where contact was deleted but conversation cleanup is pending
base_scope.where.not(contact_id: nil)
end
end

View File

@@ -0,0 +1,21 @@
class Conversations::UpdateMessageStatusJob < ApplicationJob
queue_as :deferred
# This job only support marking messages as read or delivered, update this array if we want to support more statuses
VALID_STATUSES = %w[read delivered].freeze
def perform(conversation_id, timestamp, status = :read)
return unless VALID_STATUSES.include?(status.to_s)
conversation = Conversation.find_by(id: conversation_id)
return unless conversation
# Mark every message created before the user's viewing time read or delivered
conversation.messages.where(status: %w[sent delivered])
.where.not(message_type: 'incoming')
.where('messages.created_at <= ?', timestamp).find_each do |message|
Messages::StatusUpdateService.new(message, status).perform
end
end
end

View File

@@ -0,0 +1,24 @@
class Conversations::UserMentionJob < ApplicationJob
queue_as :default
def perform(mentioned_user_ids, conversation_id, account_id)
mentioned_user_ids.each do |mentioned_user_id|
mention = Mention.find_by(
user_id: mentioned_user_id,
conversation_id: conversation_id,
account_id: account_id
)
if mention.nil?
Mention.create!(
user_id: mentioned_user_id,
conversation_id: conversation_id,
mentioned_at: Time.zone.now,
account_id: account_id
)
else
mention.update(mentioned_at: Time.zone.now)
end
end
end
end

View File

@@ -0,0 +1,36 @@
class Crm::SetupJob < ApplicationJob
queue_as :default
def perform(hook_id)
hook = Integrations::Hook.find_by(id: hook_id)
return if hook.blank? || hook.disabled?
begin
setup_service = create_setup_service(hook)
return if setup_service.nil?
setup_service.setup
rescue StandardError => e
ChatwootExceptionTracker.new(e, account: hook.account).capture_exception
Rails.logger.error "Error in CRM setup for hook ##{hook_id} (#{hook.app_id}): #{e.message}"
end
end
private
def create_setup_service(hook)
case hook.app_id
when 'leadsquared'
Crm::Leadsquared::SetupService.new(hook)
# Add cases for future CRMs here
# when 'hubspot'
# Crm::Hubspot::SetupService.new(hook)
# when 'zoho'
# Crm::Zoho::SetupService.new(hook)
else
Rails.logger.error "Unsupported CRM app_id: #{hook.app_id}"
nil
end
end
end

View File

@@ -0,0 +1,122 @@
# TODO: logic is written tailored to contact import since its the only import available
# let's break this logic and clean this up in future
class DataImportJob < ApplicationJob
queue_as :low
retry_on ActiveStorage::FileNotFoundError, wait: 1.minute, attempts: 3
def perform(data_import)
@data_import = data_import
@contact_manager = DataImport::ContactManager.new(@data_import.account)
begin
process_import_file
send_import_notification_to_admin
rescue CSV::MalformedCSVError => e
handle_csv_error(e)
end
end
private
def process_import_file
@data_import.update!(status: :processing)
contacts, rejected_contacts = parse_csv_and_build_contacts
import_contacts(contacts)
update_data_import_status(contacts.length, rejected_contacts.length)
save_failed_records_csv(rejected_contacts)
end
def parse_csv_and_build_contacts
contacts = []
rejected_contacts = []
with_import_file do |file|
csv_reader(file).each do |row|
current_contact = @contact_manager.build_contact(row.to_h.with_indifferent_access)
if current_contact.valid?
contacts << current_contact
else
append_rejected_contact(row, current_contact, rejected_contacts)
end
end
end
[contacts, rejected_contacts]
end
def append_rejected_contact(row, contact, rejected_contacts)
row['errors'] = contact.errors.full_messages.join(', ')
rejected_contacts << row
end
def import_contacts(contacts)
# <struct ActiveRecord::Import::Result failed_instances=[], num_inserts=1, ids=[444, 445], results=[]>
Contact.import(contacts, synchronize: contacts, on_duplicate_key_ignore: true, track_validation_failures: true, validate: true, batch_size: 1000)
end
def update_data_import_status(processed_records, rejected_records)
@data_import.update!(status: :completed, processed_records: processed_records, total_records: processed_records + rejected_records)
end
def save_failed_records_csv(rejected_contacts)
csv_data = generate_csv_data(rejected_contacts)
return if csv_data.blank?
@data_import.failed_records.attach(io: StringIO.new(csv_data), filename: "#{Time.zone.today.strftime('%Y%m%d')}_contacts.csv",
content_type: 'text/csv')
end
def generate_csv_data(rejected_contacts)
headers = csv_headers
headers << 'errors'
return if rejected_contacts.blank?
CSV.generate do |csv|
csv << headers
rejected_contacts.each do |record|
csv << record
end
end
end
def handle_csv_error(error) # rubocop:disable Lint/UnusedMethodArgument
@data_import.update!(status: :failed)
send_import_failed_notification_to_admin
end
def send_import_notification_to_admin
AdministratorNotifications::AccountNotificationMailer.with(account: @data_import.account).contact_import_complete(@data_import).deliver_later
end
def send_import_failed_notification_to_admin
AdministratorNotifications::AccountNotificationMailer.with(account: @data_import.account).contact_import_failed.deliver_later
end
def csv_headers
header_row = nil
with_import_file do |file|
header_row = csv_reader(file).first
end
header_row&.headers || []
end
def csv_reader(file)
file.rewind
raw_data = file.read
utf8_data = raw_data.force_encoding('UTF-8')
clean_data = utf8_data.valid_encoding? ? utf8_data : utf8_data.encode('UTF-16le', invalid: :replace, replace: '').encode('UTF-8')
CSV.new(StringIO.new(clean_data), headers: true)
end
def with_import_file
temp_dir = Rails.root.join('tmp/imports')
FileUtils.mkdir_p(temp_dir)
@data_import.import_file.open(tmpdir: temp_dir) do |file|
file.binmode
yield file
end
end
end

View File

@@ -0,0 +1,43 @@
class DeleteObjectJob < ApplicationJob
queue_as :low
BATCH_SIZE = 5_000
def perform(object, user = nil, ip = nil)
# Pre-purge heavy associations for large objects to avoid
# timeouts & race conditions due to destroy_async fan-out.
purge_heavy_associations(object)
object.destroy!
process_post_deletion_tasks(object, user, ip)
end
def process_post_deletion_tasks(object, user, ip); end
private
def heavy_associations
{
Account => %i[conversations contacts inboxes reporting_events],
Inbox => %i[conversations contact_inboxes reporting_events]
}.freeze
end
def purge_heavy_associations(object)
klass = heavy_associations.keys.find { |k| object.is_a?(k) }
return unless klass
heavy_associations[klass].each do |assoc|
next unless object.respond_to?(assoc)
batch_destroy(object.public_send(assoc))
end
end
def batch_destroy(relation)
relation.find_in_batches(batch_size: BATCH_SIZE) do |batch|
batch.each(&:destroy!)
end
end
end
DeleteObjectJob.prepend_mod_with('DeleteObjectJob')

View File

@@ -0,0 +1,7 @@
class EventDispatcherJob < ApplicationJob
queue_as :critical
def perform(event_name, timestamp, data)
Rails.configuration.dispatcher.async_dispatcher.publish_event(event_name, timestamp, data)
end
end

View File

@@ -0,0 +1,83 @@
class HookJob < MutexApplicationJob
retry_on LockAcquisitionError, wait: 3.seconds, attempts: 3
queue_as :medium
def perform(hook, event_name, event_data = {})
return if hook.disabled?
case hook.app_id
when 'slack'
process_slack_integration(hook, event_name, event_data)
when 'dialogflow'
process_dialogflow_integration(hook, event_name, event_data)
when 'google_translate'
google_translate_integration(hook, event_name, event_data)
when 'leadsquared'
process_leadsquared_integration_with_lock(hook, event_name, event_data)
end
rescue StandardError => e
Rails.logger.error e
end
private
def process_slack_integration(hook, event_name, event_data)
return unless ['message.created'].include?(event_name)
message = event_data[:message]
if message.attachments.blank?
::SendOnSlackJob.perform_later(message, hook)
else
::SendOnSlackJob.set(wait: 2.seconds).perform_later(message, hook)
end
end
def process_dialogflow_integration(hook, event_name, event_data)
return unless ['message.created', 'message.updated'].include?(event_name)
Integrations::Dialogflow::ProcessorService.new(event_name: event_name, hook: hook, event_data: event_data).perform
end
def google_translate_integration(hook, event_name, event_data)
return unless ['message.created'].include?(event_name)
message = event_data[:message]
Integrations::GoogleTranslate::DetectLanguageService.new(hook: hook, message: message).perform
end
def process_leadsquared_integration_with_lock(hook, event_name, event_data)
# Why do we need a mutex here? glad you asked
# When a new conversation is created. We get a contact created event, immediately followed by
# a contact updated event, and then a conversation created event.
# This all happens within milliseconds of each other.
# Now each of these subsequent event handlers need to have a leadsquared lead created and the contact to have the ID.
# If the lead data is not present, we try to search the API and create a new lead if it doesn't exist.
# This gives us a bad race condition that allows the API to create multiple leads for the same contact.
#
# This would have not been a problem if the email and phone number were unique identifiers for contacts at LeadSquared
# But then this is configurable in the LeadSquared settings, and may or may not be unique.
valid_event_names = ['contact.updated', 'conversation.created', 'conversation.resolved']
return unless valid_event_names.include?(event_name)
return unless hook.feature_allowed?
key = format(::Redis::Alfred::CRM_PROCESS_MUTEX, hook_id: hook.id)
with_lock(key) do
process_leadsquared_integration(hook, event_name, event_data)
end
end
def process_leadsquared_integration(hook, event_name, event_data)
# Process the event with the processor service
processor = Crm::Leadsquared::ProcessorService.new(hook)
case event_name
when 'contact.updated'
processor.handle_contact(event_data[:contact])
when 'conversation.created'
processor.handle_conversation_created(event_data[:conversation])
when 'conversation.resolved'
processor.handle_conversation_resolved(event_data[:conversation])
end
end
end

View File

@@ -0,0 +1,47 @@
class Inboxes::BulkAutoAssignmentJob < ApplicationJob
queue_as :scheduled_jobs
include BillingHelper
def perform
Account.feature_assignment_v2.find_each do |account|
if should_skip_auto_assignment?(account)
Rails.logger.info("Skipping auto assignment for account #{account.id}")
next
end
account.inboxes.where(enable_auto_assignment: true).find_each do |inbox|
process_assignment(inbox)
end
end
end
private
def process_assignment(inbox)
allowed_agent_ids = inbox.member_ids_with_assignment_capacity
if allowed_agent_ids.blank?
Rails.logger.info("No agents available to assign conversation to inbox #{inbox.id}")
return
end
assign_conversations(inbox, allowed_agent_ids)
end
def assign_conversations(inbox, allowed_agent_ids)
unassigned_conversations = inbox.conversations.unassigned.open.limit(Limits::AUTO_ASSIGNMENT_BULK_LIMIT)
unassigned_conversations.find_each do |conversation|
::AutoAssignment::AgentAssignmentService.new(
conversation: conversation,
allowed_agent_ids: allowed_agent_ids
).perform
Rails.logger.info("Assigned conversation #{conversation.id} to agent #{allowed_agent_ids.first}")
end
end
def should_skip_auto_assignment?(account)
return false unless ChatwootApp.chatwoot_cloud?
default_plan?(account)
end
end

View File

@@ -0,0 +1,24 @@
class Inboxes::FetchImapEmailInboxesJob < ApplicationJob
queue_as :scheduled_jobs
include BillingHelper
def perform
email_inboxes = Inbox.where(channel_type: 'Channel::Email')
email_inboxes.find_each(batch_size: 100) do |inbox|
::Inboxes::FetchImapEmailsJob.perform_later(inbox.channel) if should_fetch_emails?(inbox)
end
end
private
def should_fetch_emails?(inbox)
return false if inbox.account.suspended?
return false unless inbox.channel.imap_enabled
return false if inbox.channel.reauthorization_required?
return true unless ChatwootApp.chatwoot_cloud?
return false if default_plan?(inbox.account)
true
end
end

View File

@@ -0,0 +1,54 @@
require 'net/imap'
class Inboxes::FetchImapEmailsJob < MutexApplicationJob
queue_as :scheduled_jobs
def perform(channel, interval = 1)
return unless should_fetch_email?(channel)
key = format(::Redis::Alfred::EMAIL_MESSAGE_MUTEX, inbox_id: channel.inbox.id)
with_lock(key, 5.minutes) do
process_email_for_channel(channel, interval)
end
rescue *ExceptionList::IMAP_EXCEPTIONS => e
Rails.logger.error "Authorization error for email channel - #{channel.inbox.id} : #{e.message}"
rescue EOFError, OpenSSL::SSL::SSLError, Net::IMAP::NoResponseError, Net::IMAP::BadResponseError, Net::IMAP::InvalidResponseError,
Net::IMAP::ResponseParseError, Net::IMAP::ResponseReadError, Net::IMAP::ResponseTooLargeError => e
Rails.logger.error "Error for email channel - #{channel.inbox.id} : #{e.message}"
rescue LockAcquisitionError
Rails.logger.error "Lock failed for #{channel.inbox.id}"
rescue StandardError => e
ChatwootExceptionTracker.new(e, account: channel.account).capture_exception
end
private
def should_fetch_email?(channel)
channel.imap_enabled? && !channel.reauthorization_required?
end
def process_email_for_channel(channel, interval)
inbound_emails = if channel.microsoft?
Imap::MicrosoftFetchEmailService.new(channel: channel, interval: interval).perform
elsif channel.google?
Imap::GoogleFetchEmailService.new(channel: channel, interval: interval).perform
else
Imap::FetchEmailService.new(channel: channel, interval: interval).perform
end
inbound_emails.map do |inbound_mail|
process_mail(inbound_mail, channel)
end
rescue OAuth2::Error => e
Rails.logger.error "Error for email channel - #{channel.inbox.id} : #{e.message}"
channel.authorization_error!
end
def process_mail(inbound_mail, channel)
Imap::ImapMailbox.new.process(inbound_mail, channel)
rescue StandardError => e
ChatwootExceptionTracker.new(e, account: channel.account).capture_exception
Rails.logger.error("
#{channel.provider} Email dropped: #{inbound_mail.from} and message_source_id: #{inbound_mail.message_id}")
end
end

View File

@@ -0,0 +1,11 @@
class Inboxes::SyncWidgetPreChatCustomFieldsJob < ApplicationJob
queue_as :default
def perform(account, field_name)
account.web_widgets.all.find_each do |web_widget|
pre_chat_fields = web_widget.pre_chat_form_options['pre_chat_fields']
web_widget.pre_chat_form_options['pre_chat_fields'] = pre_chat_fields.reject { |field| field['name'] == field_name }
web_widget.save!
end
end
end

View File

@@ -0,0 +1,23 @@
class Inboxes::UpdateWidgetPreChatCustomFieldsJob < ApplicationJob
queue_as :default
def perform(account, custom_attribute)
attribute_key = custom_attribute['attribute_key']
account.web_widgets.all.find_each do |web_widget|
pre_chat_fields = web_widget.pre_chat_form_options['pre_chat_fields']
pre_chat_fields.each_with_index do |pre_chat_field, index|
next unless pre_chat_field['name'] == attribute_key
web_widget.pre_chat_form_options['pre_chat_fields'][index] =
pre_chat_field.deep_merge({
'label' => custom_attribute['attribute_display_name'],
'placeholder' => custom_attribute['attribute_display_name'],
'values' => custom_attribute['attribute_values'],
'regex_pattern' => custom_attribute['regex_pattern'],
'regex_cue' => custom_attribute['regex_cue']
})
end
web_widget.save!
end
end
end

View File

@@ -0,0 +1,20 @@
class Internal::CheckNewVersionsJob < ApplicationJob
queue_as :scheduled_jobs
def perform
return unless Rails.env.production?
@instance_info = ChatwootHub.sync_with_hub
update_version_info
end
private
def update_version_info
return if @instance_info['version'].blank?
::Redis::Alfred.set(::Redis::Alfred::LATEST_CHATWOOT_VERSION, @instance_info['version'])
end
end
Internal::CheckNewVersionsJob.prepend_mod_with('Internal::CheckNewVersionsJob')

View File

@@ -0,0 +1,27 @@
class Internal::DeleteAccountsJob < ApplicationJob
queue_as :scheduled_jobs
def perform
delete_expired_accounts
end
private
def delete_expired_accounts
accounts_pending_deletion.each do |account|
AccountDeletionService.new(account: account).perform
end
end
def accounts_pending_deletion
Account.where("custom_attributes->>'marked_for_deletion_at' IS NOT NULL")
.select { |account| deletion_period_expired?(account) }
end
def deletion_period_expired?(account)
deletion_time = account.custom_attributes['marked_for_deletion_at']
return false if deletion_time.blank?
DateTime.parse(deletion_time) <= Time.current
end
end

View File

@@ -0,0 +1,39 @@
# housekeeping
# remove stale contacts for subset of accounts each day
# - have no identification (email, phone_number, and identifier are NULL)
# - have no conversations
# - are older than 30 days
class Internal::ProcessStaleContactsJob < ApplicationJob
queue_as :housekeeping
# Number of day-based groups to split accounts into
DISTRIBUTION_GROUPS = 5
# Max accounts to process in one batch
MAX_ACCOUNTS_PER_BATCH = 20
# Process only a subset of accounts per day to avoid flooding the queue
def perform
return unless ChatwootApp.chatwoot_cloud?
# Use the day of the month to determine which accounts to process
day_of_month = Date.current.day
remainder = day_of_month % DISTRIBUTION_GROUPS
# Count total accounts for logging
total_accounts = Account.count
log_message = "ProcessStaleContactsJob: Processing accounts with ID % #{DISTRIBUTION_GROUPS} = "
log_message += "#{remainder} (out of #{total_accounts} total accounts)"
Rails.logger.info log_message
# Process only accounts where ID % 5 = remainder for today
# This ensures each account is processed approximately once every 5 days
Account.where("id % #{DISTRIBUTION_GROUPS} = ?", remainder).find_each(batch_size: MAX_ACCOUNTS_PER_BATCH) do |account|
Rails.logger.info "Enqueuing RemoveStaleContactsJob for account #{account.id}"
# Add a small delay between jobs to further reduce queue pressure
delay = rand(1..10).minutes
Internal::RemoveStaleContactsJob.set(wait: delay).perform_later(account)
end
end
end

View File

@@ -0,0 +1,12 @@
# housekeeping
# remove contact inboxes that does not have any conversations
# and are older than 3 months
class Internal::ProcessStaleRedisKeysJob < ApplicationJob
queue_as :low
def perform(account)
removed_count = Internal::RemoveStaleRedisKeysService.new(account_id: account.id).perform
Rails.logger.info "Successfully cleaned up Redis keys for account #{account.id} (removed #{removed_count} keys)"
end
end

View File

@@ -0,0 +1,11 @@
# housekeeping
# remove conversations that do not have a contact_id
# orphan conversations without contact cannot be accessed or used
class Internal::RemoveOrphanConversationsJob < ApplicationJob
queue_as :housekeeping
def perform
Internal::RemoveOrphanConversationsService.new.perform
end
end

View File

@@ -0,0 +1,11 @@
# housekeeping
# remove contact inboxes that does not have any conversations
# and are older than 3 months
class Internal::RemoveStaleContactInboxesJob < ApplicationJob
queue_as :scheduled_jobs
def perform
Internal::RemoveStaleContactInboxesService.new.perform
end
end

View File

@@ -0,0 +1,13 @@
# housekeeping
# remove contacts that:
# - have no identification (email, phone_number, and identifier are NULL)
# - have no conversations
# - are older than 30 days
class Internal::RemoveStaleContactsJob < ApplicationJob
queue_as :housekeeping
def perform(account, batch_size = 1000)
Internal::RemoveStaleContactsService.new(account: account).perform(batch_size)
end
end

View File

@@ -0,0 +1,16 @@
# housekeeping
# ensure stale ONLINE PRESENCE KEYS for contacts are removed periodically
# should result in 50% redis mem size reduction
class Internal::RemoveStaleRedisKeysJob < ApplicationJob
queue_as :scheduled_jobs
def perform
Account.find_in_batches(batch_size: 100) do |accounts|
accounts.each do |account|
Rails.logger.info "Enqueuing ProcessStaleRedisKeysJob for account #{account.id}"
Internal::ProcessStaleRedisKeysJob.perform_later(account)
end
end
end
end

View File

@@ -0,0 +1,7 @@
class Internal::SeedAccountJob < ApplicationJob
queue_as :low
def perform(account)
Seeders::AccountSeeder.new(account: account).perform!
end
end

View File

@@ -0,0 +1,11 @@
class Labels::UpdateJob < ApplicationJob
queue_as :default
def perform(new_label_title, old_label_title, account_id)
Labels::UpdateService.new(
new_label_title: new_label_title,
old_label_title: old_label_title,
account_id: account_id
).perform
end
end

View File

@@ -0,0 +1,14 @@
class MacrosExecutionJob < ApplicationJob
queue_as :medium
def perform(macro, conversation_ids:, user:)
account = macro.account
conversations = account.conversations.where(display_id: conversation_ids.to_a)
return if conversations.blank?
conversations.each do |conversation|
::Macros::ExecutionService.new(macro, conversation, user).perform
end
end
end

View File

@@ -0,0 +1,17 @@
# Delete migration and spec after 2 consecutive releases.
class Migration::AddSearchIndexesJob < ApplicationJob
queue_as :scheduled_jobs
def perform
ActiveRecord::Migration[6.1].add_index(:messages, [:account_id, :inbox_id], algorithm: :concurrently)
ActiveRecord::Migration[6.1].add_index(:messages, :content, using: 'gin', opclass: :gin_trgm_ops, algorithm: :concurrently)
ActiveRecord::Migration[6.1].add_index(
:contacts,
[:name, :email, :phone_number, :identifier],
using: 'gin',
opclass: :gin_trgm_ops,
name: 'index_contacts_on_name_email_phone_number_identifier',
algorithm: :concurrently
)
end
end

View File

@@ -0,0 +1,13 @@
class Migration::BackfillCompaniesContactsCountJob < ApplicationJob
queue_as :async_database_migration
def perform
return unless ChatwootApp.enterprise?
Company.find_in_batches(batch_size: 100) do |company_batch|
company_batch.each do |company|
Company.reset_counters(company.id, :contacts)
end
end
end
end

View File

@@ -0,0 +1,14 @@
class Migration::ConversationBatchCacheLabelJob < ApplicationJob
queue_as :async_database_migration
# To cache the label, we simply access it from the object and save it. Anytime the object is
# saved in the future, ActsAsTaggable will automatically recompute it. This process is done
# initially when the user has not performed any action.
# Reference: https://github.com/mbleigh/acts-as-taggable-on/wiki/Caching
def perform(conversation_batch)
conversation_batch.each do |conversation|
conversation.label_list
conversation.save!
end
end
end

View File

@@ -0,0 +1,9 @@
class Migration::ConversationCacheLabelJob < ApplicationJob
queue_as :async_database_migration
def perform(account)
account.conversations.find_in_batches(batch_size: 100) do |conversation_batch|
Migration::ConversationBatchCacheLabelJob.perform_later(conversation_batch)
end
end
end

View File

@@ -0,0 +1,17 @@
# Delete migration and spec after 2 consecutive releases.
class Migration::ConversationsFirstReplySchedulerJob < ApplicationJob
queue_as :scheduled_jobs
def perform(account)
account.conversations.each do |conversation|
# rubocop:disable Rails/SkipsModelValidations
if conversation.messages.outgoing.where("(additional_attributes->'campaign_id') is null").count.positive?
conversation.update_columns(first_reply_created_at: conversation.messages.outgoing.where("(additional_attributes->'campaign_id') is null")
.first.created_at)
else
conversation.update_columns(first_reply_created_at: nil)
end
# rubocop:enable Rails/SkipsModelValidations
end
end
end

View File

@@ -0,0 +1,8 @@
# Delete migration and spec after 2 consecutive releases.
class Migration::RemoveMessageNotifications < ApplicationJob
queue_as :scheduled_jobs
def perform
Notification.where(primary_actor_type: 'Message').in_batches(of: 100).delete_all
end
end

View File

@@ -0,0 +1,19 @@
# Delete migration and spec after 2 consecutive releases.
class Migration::RemoveStaleNotificationsJob < ApplicationJob
queue_as :scheduled_jobs
def perform
remove_invalid_messages
end
private
def remove_invalid_messages
deleted_ids = []
Message.unscoped.distinct.pluck(:inbox_id).each_slice(1000) do |id_list|
deleted_ids = (id_list - Inbox.where(id: id_list).pluck(:id))
Message.where(inbox_id: deleted_ids.flatten).destroy_all
end
end
end

View File

@@ -0,0 +1,69 @@
# Delete migration and spec after 2 consecutive releases.
class Migration::UpdateFirstResponseTimeInReportingEventsJob < ApplicationJob
include ReportingEventHelper
queue_as :async_database_migration
def perform(account)
get_conversations_with_bot_handoffs(account)
account.reporting_events.where(name: 'first_response').find_each do |event|
conversation = event.conversation
# if the conversation has a bot handoff event, we don't need to update the response_time
next if conversation.nil? || @conversations_with_handoffs.include?(conversation.id)
update_event_data(event, conversation)
end
end
def get_conversations_with_bot_handoffs(account)
@conversations_with_handoffs = account.reporting_events.where(name: 'conversation_bot_handoff').pluck(:conversation_id)
end
def update_event_data(event, conversation)
last_bot_reply = conversation.messages.where(sender_type: 'AgentBot').order(created_at: :asc).last
return if last_bot_reply.blank?
first_human_reply = conversation.messages.where(sender_type: 'User').order(created_at: :asc).first
return if first_human_reply.blank?
# accomodate for campaign if required
# new_value = difference between the first_human_reply and the first_bot_reply if it exists or first_human_reply and created at
#
# conversation bot conversation
# start handoff resolved
# | | |
# |____|___|_________|____|_______|_____|________|
# bot reply ^ ^ human reply
# | |
# | |
# last_bot_reply first_human_reply
#
#
# bot handoff happens at the last_bot_reply created time
# the response time is the time between last bot reply created and the first human reply created
return if last_bot_reply.created_at.to_i >= first_human_reply.created_at.to_i
# this means a bot replied existed, so we need to update the event_start_time
update_event_details(event, last_bot_reply, first_human_reply, conversation.inbox)
end
def update_event_details(event, last_bot_reply, first_human_reply, inbox)
# rubocop:disable Rails/SkipsModelValidations
event.update_columns(event_start_time: last_bot_reply.created_at,
event_end_time: first_human_reply.created_at,
value: calculate_event_value(last_bot_reply, first_human_reply),
value_in_business_hours: calculate_event_value_in_business_hours(inbox, last_bot_reply,
first_human_reply),
user_id: event.user_id || first_human_reply.sender_id)
# rubocop:enable Rails/SkipsModelValidations
end
def calculate_event_value(last_bot_reply, first_human_reply)
first_human_reply.created_at.to_i - last_bot_reply.created_at.to_i
end
def calculate_event_value_in_business_hours(inbox, last_bot_reply, first_human_reply)
business_hours(inbox, last_bot_reply.created_at, first_human_reply.created_at)
end
end

View File

@@ -0,0 +1,49 @@
# MutexApplicationJob serves as a base class for jobs that require distributed locking mechanisms.
# It abstracts the locking logic using Redis and ensures that a block of code can be executed with
# mutual exclusion.
#
# The primary mechanism provided is the `with_lock` method, which accepts a key format and associated
# arguments. This method attempts to acquire a lock using the generated key, and if successful, it
# executes the provided block of code. If the lock cannot be acquired, it raises a LockAcquisitionError.
#
# To use this class, inherit from MutexApplicationJob and make use of the `with_lock` method in the
# `perform` method of the derived job class.
#
# Also see, retry mechanism here: https://edgeapi.rubyonrails.org/classes/ActiveJob/Exceptions/ClassMethods.html#method-i-retry_on
#
class MutexApplicationJob < ApplicationJob
class LockAcquisitionError < StandardError; end
def with_lock(lock_key, timeout = Redis::LockManager::LOCK_TIMEOUT)
lock_manager = Redis::LockManager.new
begin
if lock_manager.lock(lock_key, timeout)
log_attempt(lock_key, executions)
yield
# release the lock after the block has been executed
lock_manager.unlock(lock_key)
else
handle_failed_lock_acquisition(lock_key)
end
rescue StandardError => e
handle_error(e, lock_manager, lock_key)
end
end
private
def log_attempt(lock_key, executions)
Rails.logger.info "[#{self.class.name}] Acquired lock for: #{lock_key} on attempt #{executions}"
end
def handle_error(err, lock_manager, lock_key)
lock_manager.unlock(lock_key) unless err.is_a?(LockAcquisitionError)
raise err
end
def handle_failed_lock_acquisition(lock_key)
Rails.logger.warn "[#{self.class.name}] Failed to acquire lock on attempt #{executions}: #{lock_key}"
raise LockAcquisitionError, "Failed to acquire lock for key: #{lock_key}"
end
end

View File

@@ -0,0 +1,15 @@
class Notification::DeleteNotificationJob < ApplicationJob
queue_as :low
def perform(user, type: :all)
ActiveRecord::Base.transaction do
if type == :all
# Delete all notifications
user.notifications.destroy_all
elsif type == :read
# Delete only read notifications
user.notifications.where.not(read_at: nil).destroy_all
end
end
end
end

View File

@@ -0,0 +1,10 @@
class Notification::EmailNotificationJob < ApplicationJob
queue_as :default
def perform(notification)
# no need to send email if notification has been read already
return if notification.read_at.present?
Notification::EmailNotificationService.new(notification: notification).perform
end
end

View File

@@ -0,0 +1,7 @@
class Notification::PushNotificationJob < ApplicationJob
queue_as :default
def perform(notification)
Notification::PushNotificationService.new(notification: notification).perform
end
end

View File

@@ -0,0 +1,17 @@
class Notification::RemoveDuplicateNotificationJob < ApplicationJob
queue_as :default
def perform(notification)
return unless notification.is_a?(Notification)
user_id = notification.user_id
primary_actor_id = notification.primary_actor_id
# Find older notifications with the same user and primary_actor_id
duplicate_notifications = Notification.where(user_id: user_id, primary_actor_id: primary_actor_id)
.order(created_at: :desc)
# Skip the first one (the latest notification) and destroy the rest
duplicate_notifications.offset(1).each(&:destroy)
end
end

View File

@@ -0,0 +1,54 @@
class Notification::RemoveOldNotificationJob < ApplicationJob
queue_as :purgable
NOTIFICATION_LIMIT = 300
OLD_NOTIFICATION_THRESHOLD = 1.month
def perform
remove_old_notifications
trim_user_notifications
end
private
def remove_old_notifications
Notification.where('created_at < ?', OLD_NOTIFICATION_THRESHOLD.ago)
.delete_all
end
def trim_user_notifications
# Find users with more than NOTIFICATION_LIMIT notifications
user_ids_exceeding_limit.each do |user_id|
trim_notifications_for_user(user_id)
end
end
def user_ids_exceeding_limit
Notification.group(:user_id)
.having('COUNT(*) > ?', NOTIFICATION_LIMIT)
.pluck(:user_id)
end
def trim_notifications_for_user(user_id)
# Find the cutoff notification (the 301st when we want to keep top 300)
# Order by created_at DESC, then id DESC for deterministic ordering
cutoff = Notification.where(user_id: user_id)
.order(created_at: :desc, id: :desc)
.offset(NOTIFICATION_LIMIT)
.limit(1)
.pick(:created_at, :id)
return unless cutoff
cutoff_time, cutoff_id = cutoff
# Delete notifications older than cutoff, or same timestamp but lower/equal ID
# Since we order by id DESC, higher IDs are kept (come first), lower IDs deleted
# This avoids race conditions: notifications created after finding the cutoff
# will have timestamps > cutoff_time and won't be incorrectly deleted
Notification.where(user_id: user_id)
.where('created_at < ? OR (created_at = ? AND id <= ?)',
cutoff_time, cutoff_time, cutoff_id)
.delete_all
end
end

View File

@@ -0,0 +1,25 @@
class Notification::ReopenSnoozedNotificationsJob < ApplicationJob
queue_as :low
def perform
Notification.where(snoozed_until: 3.days.ago..Time.current).find_in_batches(batch_size: 100) do |notifications_batch|
notifications_batch.each do |notification|
update_notification(notification)
end
end
end
private
def update_notification(notification)
updated_meta = (notification.meta || {}).merge('last_snoozed_at' => notification.snoozed_until)
notification.update!(
snoozed_until: nil,
updated_at: Time.current,
last_activity_at: Time.current,
meta: updated_meta,
read_at: nil
)
end
end

View File

@@ -0,0 +1,11 @@
class SendOnSlackJob < MutexApplicationJob
queue_as :medium
retry_on LockAcquisitionError, wait: 1.second, attempts: 8
def perform(message, hook)
key = format(::Redis::Alfred::SLACK_MESSAGE_MUTEX, conversation_id: message.conversation_id, reference_id: hook.reference_id)
with_lock(key) do
Integrations::Slack::SendOnSlackService.new(message: message, hook: hook).perform
end
end
end

View File

@@ -0,0 +1,39 @@
class SendReplyJob < ApplicationJob
queue_as :high
CHANNEL_SERVICES = {
'Channel::TwitterProfile' => ::Twitter::SendOnTwitterService,
'Channel::TwilioSms' => ::Twilio::SendOnTwilioService,
'Channel::Line' => ::Line::SendOnLineService,
'Channel::Telegram' => ::Telegram::SendOnTelegramService,
'Channel::Whatsapp' => ::Whatsapp::SendOnWhatsappService,
'Channel::Sms' => ::Sms::SendOnSmsService,
'Channel::Instagram' => ::Instagram::SendOnInstagramService,
'Channel::Tiktok' => ::Tiktok::SendOnTiktokService,
'Channel::Email' => ::Email::SendOnEmailService,
'Channel::WebWidget' => ::Messages::SendEmailNotificationService,
'Channel::Api' => ::Messages::SendEmailNotificationService
}.freeze
def perform(message_id)
message = Message.find(message_id)
channel_name = message.conversation.inbox.channel.class.to_s
return send_on_facebook_page(message) if channel_name == 'Channel::FacebookPage'
service_class = CHANNEL_SERVICES[channel_name]
return unless service_class
service_class.new(message: message).perform
end
private
def send_on_facebook_page(message)
if message.conversation.additional_attributes['type'] == 'instagram_direct_message'
::Instagram::Messenger::SendOnInstagramService.new(message: message).perform
else
::Facebook::SendOnFacebookService.new(message: message).perform
end
end
end

View File

@@ -0,0 +1,49 @@
class SlackUnfurlJob < ApplicationJob
queue_as :low
def perform(params)
@params = params
set_integration_hook
return unless channel_has_access
Integrations::Slack::SlackLinkUnfurlService.new(params: @params, integration_hook: @integration_hook).perform
end
private
# Find the integration hook by taking first link from array of links
# Assume that all the links are from the same account, how ever there is a possibility that the links are from different accounts.
# TODO: Fix this edge case later
def set_integration_hook
url = extract_url
return unless url
account_id = extract_account_id(url)
@integration_hook = Integrations::Hook.find_by(account_id: account_id, app_id: 'slack')
end
def extract_url
@params.dig(:event, :links)&.first&.[](:url)
end
def extract_account_id(url)
account_id_regex = %r{/accounts/(\d+)}
match_data = url.match(account_id_regex)
match_data[1] if match_data
end
# Check the channel has access to the bot to unfurl the links
def channel_has_access
return if @integration_hook.blank?
slack_client = Slack::Web::Client.new(token: @integration_hook.access_token)
response = slack_client.conversations_members(channel: @params.dig(:event, :channel))
response['ok']
rescue Slack::Web::Api::Errors::ChannelNotFound => e
# The link unfurl event will not work for private channels and other accounts channels
# So we can ignore the error
Rails.logger.error "Exception in SlackUnfurlJob: #{e.message}"
false
end
end

View File

@@ -0,0 +1,25 @@
class TriggerScheduledItemsJob < ApplicationJob
queue_as :scheduled_jobs
def perform
# trigger the scheduled campaign jobs
Campaign.where(campaign_type: :one_off,
campaign_status: :active).where(scheduled_at: 3.days.ago..Time.current).all.find_each(batch_size: 100) do |campaign|
Campaigns::TriggerOneoffCampaignJob.perform_later(campaign)
end
# Job to reopen snoozed conversations
Conversations::ReopenSnoozedConversationsJob.perform_later
# Job to reopen snoozed notifications
Notification::ReopenSnoozedNotificationsJob.perform_later
# Job to auto-resolve conversations
Account::ConversationsResolutionSchedulerJob.perform_later
# Job to sync whatsapp templates
Channels::Whatsapp::TemplatesSyncSchedulerJob.perform_later
end
end
TriggerScheduledItemsJob.prepend_mod_with('TriggerScheduledItemsJob')

View File

@@ -0,0 +1,7 @@
class WebhookJob < ApplicationJob
queue_as :medium
# There are 3 types of webhooks, account, inbox and agent_bot
def perform(url, payload, webhook_type = :account_webhook)
Webhooks::Trigger.execute(url, payload, webhook_type)
end
end

View File

@@ -0,0 +1,8 @@
class Webhooks::FacebookDeliveryJob < ApplicationJob
queue_as :low
def perform(message)
response = ::Integrations::Facebook::MessageParser.new(message)
Integrations::Facebook::DeliveryStatus.new(params: response).perform
end
end

View File

@@ -0,0 +1,17 @@
class Webhooks::FacebookEventsJob < MutexApplicationJob
queue_as :default
retry_on LockAcquisitionError, wait: 1.second, attempts: 8
def perform(message)
response = ::Integrations::Facebook::MessageParser.new(message)
key = format(::Redis::Alfred::FACEBOOK_MESSAGE_MUTEX, sender_id: response.sender_id, recipient_id: response.recipient_id)
with_lock(key) do
process_message(response)
end
end
def process_message(response)
::Integrations::Facebook::MessageCreator.new(response).perform
end
end

View File

@@ -0,0 +1,213 @@
class Webhooks::InstagramEventsJob < MutexApplicationJob
queue_as :default
retry_on LockAcquisitionError, wait: 1.second, attempts: 8
# @return [Array] We will support further events like reaction or seen in future
SUPPORTED_EVENTS = [:message, :read].freeze
def perform(entries)
@entries = entries
key = format(::Redis::Alfred::IG_MESSAGE_MUTEX, sender_id: contact_instagram_id, ig_account_id: ig_account_id)
with_lock(key) do
process_entries(entries)
end
end
# https://developers.facebook.com/docs/messenger-platform/instagram/features/webhook
def process_entries(entries)
entries.each do |entry|
process_single_entry(entry.with_indifferent_access)
end
end
private
def process_single_entry(entry)
if test_event?(entry)
process_test_event(entry)
return
end
process_messages(entry)
end
def process_messages(entry)
messages(entry).each do |messaging|
Rails.logger.info("Instagram Events Job Messaging: #{messaging}")
instagram_id = instagram_id(messaging)
channel = find_channel(instagram_id)
next if channel.blank?
if (event_name = event_name(messaging))
send(event_name, messaging, channel)
end
end
end
def agent_message_via_echo?(messaging)
messaging[:message].present? && messaging[:message][:is_echo].present?
end
def test_event?(entry)
entry[:changes].present?
end
def process_test_event(entry)
messaging = extract_messaging_from_test_event(entry)
Instagram::TestEventService.new(messaging).perform if messaging.present?
end
def extract_messaging_from_test_event(entry)
entry[:changes].first&.dig(:value) if entry[:changes].present?
end
def instagram_id(messaging)
if agent_message_via_echo?(messaging)
messaging[:sender][:id]
else
messaging[:recipient][:id]
end
end
def ig_account_id
@entries&.first&.dig(:id)
end
def contact_instagram_id
entry = @entries&.first
return nil unless entry
# Handle both messaging and standby arrays
messaging = (entry[:messaging].presence || entry[:standby] || []).first
return nil unless messaging
# For echo messages (outgoing from our account), use recipient's ID (the contact)
# For incoming messages (from contact), use sender's ID (the contact)
if messaging.dig(:message, :is_echo)
messaging.dig(:recipient, :id)
else
messaging.dig(:sender, :id)
end
end
def sender_id
@entries&.dig(0, :messaging, 0, :sender, :id)
end
def find_channel(instagram_id)
# There will be chances for the instagram account to be connected to a facebook page,
# so we need to check for both instagram and facebook page channels
# priority is for instagram channel which created via instagram login
channel = Channel::Instagram.find_by(instagram_id: instagram_id)
# If not found, fallback to the facebook page channel
channel ||= Channel::FacebookPage.find_by(instagram_id: instagram_id)
channel
end
def event_name(messaging)
@event_name ||= SUPPORTED_EVENTS.find { |key| messaging.key?(key) }
end
def message(messaging, channel)
if channel.is_a?(Channel::Instagram)
::Instagram::MessageText.new(messaging, channel).perform
else
::Instagram::Messenger::MessageText.new(messaging, channel).perform
end
end
def read(messaging, channel)
# Use a single service to handle read status for both channel types since the params are same
::Instagram::ReadStatusService.new(params: messaging, channel: channel).perform
end
def messages(entry)
(entry[:messaging].presence || entry[:standby] || [])
end
end
# Actual response from Instagram webhook (both via Facebook page and Instagram direct)
# [
# {
# "time": <timestamp>,
# "id": <INSTAGRAM_USER_ID>,
# "messaging": [
# {
# "sender": {
# "id": <INSTAGRAM_USER_ID>
# },
# "recipient": {
# "id": <INSTAGRAM_USER_ID>
# },
# "timestamp": <timestamp>,
# "message": {
# "mid": <MESSAGE_ID>,
# "text": <MESSAGE_TEXT>
# }
# }
# ]
# }
# ]
# Instagram's webhook via Instagram direct testing quirk: Test payloads vs Actual payloads
# When testing in Facebook's developer dashboard, you'll get a Page-style
# payload with a "changes" object. But don't be fooled! Real Instagram DMs
# arrive in the familiar Messenger format with a "messaging" array.
# This apparent inconsistency is actually by design - Instagram's webhooks
# use different formats for testing vs production to maintain compatibility
# with both Instagram Direct and Facebook Page integrations.
# See: https://developers.facebook.com/docs/instagram-platform/webhooks#event-notifications
# Test response from via Instagram direct
# [
# {
# "id": "0",
# "time": <timestamp>,
# "changes": [
# {
# "field": "messages",
# "value": {
# "sender": {
# "id": "12334"
# },
# "recipient": {
# "id": "23245"
# },
# "timestamp": "1527459824",
# "message": {
# "mid": "random_mid",
# "text": "random_text"
# }
# }
# }
# ]
# }
# ]
# Test response via Facebook page
# [
# {
# "time": <timestamp>,,
# "id": "0",
# "messaging": [
# {
# "sender": {
# "id": "12334"
# },
# "recipient": {
# "id": "23245"
# },
# "timestamp": <timestamp>,
# "message": {
# "mid": "random_mid",
# "text": "random_text"
# }
# }
# ]
# }
# ]

View File

@@ -0,0 +1,24 @@
class Webhooks::LineEventsJob < ApplicationJob
queue_as :default
def perform(params: {}, signature: '', post_body: '')
@params = params
return unless valid_event_payload?
return unless valid_post_body?(post_body, signature)
Line::IncomingMessageService.new(inbox: @channel.inbox, params: @params['line'].with_indifferent_access).perform
end
private
def valid_event_payload?
@channel = Channel::Line.find_by(line_channel_id: @params[:line_channel_id]) if @params[:line_channel_id]
end
# https://developers.line.biz/en/reference/messaging-api/#signature-validation
# validate the line payload
def valid_post_body?(post_body, signature)
hash = OpenSSL::HMAC.digest(OpenSSL::Digest.new('SHA256'), @channel.line_channel_secret, post_body)
Base64.strict_encode64(hash) == signature
end
end

View File

@@ -0,0 +1,28 @@
class Webhooks::SmsEventsJob < ApplicationJob
queue_as :default
SUPPORTED_EVENTS = %w[message-received message-delivered message-failed].freeze
def perform(params = {})
return unless SUPPORTED_EVENTS.include?(params[:type])
channel = Channel::Sms.find_by(phone_number: params[:to])
return unless channel
process_event_params(channel, params)
end
private
def process_event_params(channel, params)
if delivery_event?(params)
Sms::DeliveryStatusService.new(channel: channel, params: params[:message].with_indifferent_access).perform
else
Sms::IncomingMessageService.new(inbox: channel.inbox, params: params[:message].with_indifferent_access).perform
end
end
def delivery_event?(params)
params[:type] == 'message-delivered' || params[:type] == 'message-failed'
end
end

View File

@@ -0,0 +1,44 @@
class Webhooks::TelegramEventsJob < ApplicationJob
queue_as :default
def perform(params = {})
return unless params[:bot_token]
channel = Channel::Telegram.find_by(bot_token: params[:bot_token])
if channel_is_inactive?(channel)
log_inactive_channel(channel, params)
return
end
process_event_params(channel, params)
end
private
def channel_is_inactive?(channel)
return true if channel.blank?
return true unless channel.account.active?
false
end
def log_inactive_channel(channel, params)
message = if channel&.id
"Account #{channel.account.id} is not active for channel #{channel.id}"
else
"Channel not found for bot_token: #{params[:bot_token]}"
end
Rails.logger.warn("Telegram event discarded: #{message}")
end
def process_event_params(channel, params)
return unless params[:telegram]
if params.dig(:telegram, :edited_message).present? || params.dig(:telegram, :edited_business_message).present?
Telegram::UpdateMessageService.new(inbox: channel.inbox, params: params['telegram'].with_indifferent_access).perform
else
Telegram::IncomingMessageService.new(inbox: channel.inbox, params: params['telegram'].with_indifferent_access).perform
end
end
end

View File

@@ -0,0 +1,69 @@
# https://business-api.tiktok.com/portal/docs?id=1832190670631937
class Webhooks::TiktokEventsJob < MutexApplicationJob
queue_as :default
retry_on LockAcquisitionError, wait: 2.seconds, attempts: 8
SUPPORTED_EVENTS = [:im_send_msg, :im_receive_msg, :im_mark_read_msg].freeze
def perform(event)
@event = event.with_indifferent_access
return if channel_is_inactive?
key = format(::Redis::Alfred::TIKTOK_MESSAGE_MUTEX, business_id: business_id, conversation_id: conversation_id)
with_lock(key, 10.seconds) do
process_event
end
end
private
def channel_is_inactive?
return true if channel.blank?
return true unless channel.account.active?
false
end
def process_event
return if event_name.blank? || channel.blank?
send(event_name)
end
def event_name
@event_name ||= SUPPORTED_EVENTS.include?(@event[:event].to_sym) ? @event[:event] : nil
end
def business_id
@business_id ||= @event[:user_openid]
end
def content
@content ||= JSON.parse(@event[:content]).deep_symbolize_keys
end
def conversation_id
@conversation_id ||= content[:conversation_id]
end
def channel
@channel ||= Channel::Tiktok.find_by(business_id: business_id)
end
# Receive real-time notifications if you send a message to a user.
def im_send_msg
# This can be either an echo message or a message sent directly via tiktok application
::Tiktok::MessageService.new(channel: channel, content: content, outgoing_echo: true).perform
end
# Receive real-time notifications if a user outside the European Economic Area (EEA), Switzerland, or the UK sends a message to you.
def im_receive_msg
::Tiktok::MessageService.new(channel: channel, content: content).perform
end
# Receive real-time notifications when a Personal Account user marks all messages in a session as read.
def im_mark_read_msg
::Tiktok::ReadStatusService.new(channel: channel, content: content).perform
end
end

View File

@@ -0,0 +1,7 @@
class Webhooks::TwilioDeliveryStatusJob < ApplicationJob
queue_as :low
def perform(params = {})
::Twilio::DeliveryStatusService.new(params: params).perform
end
end

View File

@@ -0,0 +1,17 @@
class Webhooks::TwilioEventsJob < ApplicationJob
queue_as :low
def perform(params = {})
# Skip processing if Body parameter, MediaUrl0, or location data is not present
# This is to skip processing delivery events being delivered to this endpoint
return if params[:Body].blank? && params[:MediaUrl0].blank? && !valid_location_message?(params)
::Twilio::IncomingMessageService.new(params: params).perform
end
private
def valid_location_message?(params)
params[:MessageType] == 'location' && params[:Latitude].present? && params[:Longitude].present?
end
end

View File

@@ -0,0 +1,102 @@
class Webhooks::WhatsappEventsJob < ApplicationJob
queue_as :low
def perform(params = {})
channel = find_channel_from_whatsapp_business_payload(params)
if channel_is_inactive?(channel)
Rails.logger.warn("Inactive WhatsApp channel: #{channel&.phone_number || "unknown - #{params[:phone_number]}"}")
return
end
if message_echo_event?(params)
handle_message_echo(channel, params)
else
handle_message_events(channel, params)
end
end
# Detects if the webhook is an SMB message echo event (message sent from WhatsApp Business app)
# This is part of WhatsApp coexistence feature where businesses can respond from both
# Chatwoot and the WhatsApp Business app, with messages synced to Chatwoot.
#
# Regular message payload (field: "messages"):
# {
# "entry": [{
# "changes": [{
# "field": "messages",
# "value": {
# "contacts": [{ "wa_id": "919745786257", "profile": { "name": "Customer" } }],
# "messages": [{ "from": "919745786257", "id": "wamid...", "text": { "body": "Hello" } }]
# }
# }]
# }]
# }
#
# Echo message payload (field: "smb_message_echoes"):
# {
# "entry": [{
# "changes": [{
# "field": "smb_message_echoes",
# "value": {
# "message_echoes": [{ "from": "971545296927", "to": "919745786257", "id": "wamid...", "text": { "body": "Hi" } }]
# }
# }]
# }]
# }
#
# Key differences:
# - field: "smb_message_echoes" instead of "messages"
# - message_echoes[] instead of messages[]
# - "from" is the business number, "to" is the contact (reversed from regular messages)
# - No "contacts" array in echo payload
def message_echo_event?(params)
params.dig(:entry, 0, :changes, 0, :field) == 'smb_message_echoes'
end
def handle_message_echo(channel, params)
Whatsapp::IncomingMessageWhatsappCloudService.new(inbox: channel.inbox, params: params, outgoing_echo: true).perform
end
def handle_message_events(channel, params)
case channel.provider
when 'whatsapp_cloud'
Whatsapp::IncomingMessageWhatsappCloudService.new(inbox: channel.inbox, params: params).perform
else
Whatsapp::IncomingMessageService.new(inbox: channel.inbox, params: params).perform
end
end
private
def channel_is_inactive?(channel)
return true if channel.blank?
return true if channel.reauthorization_required?
return true unless channel.account.active?
false
end
def find_channel_by_url_param(params)
return unless params[:phone_number]
Channel::Whatsapp.find_by(phone_number: params[:phone_number])
end
def find_channel_from_whatsapp_business_payload(params)
# for the case where facebook cloud api support multiple numbers for a single app
# https://github.com/chatwoot/chatwoot/issues/4712#issuecomment-1173838350
# we will give priority to the phone_number in the payload
return get_channel_from_wb_payload(params) if params[:object] == 'whatsapp_business_account'
find_channel_by_url_param(params)
end
def get_channel_from_wb_payload(wb_params)
phone_number = "+#{wb_params[:entry].first[:changes].first.dig(:value, :metadata, :display_phone_number)}"
phone_number_id = wb_params[:entry].first[:changes].first.dig(:value, :metadata, :phone_number_id)
channel = Channel::Whatsapp.find_by(phone_number: phone_number)
# validate to ensure the phone number id matches the whatsapp channel
return channel if channel && channel.provider_config['phone_number_id'] == phone_number_id
end
end