Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 91 additions & 0 deletions app/jobs/runtime/delayed_jobs_recover.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
module VCAP::CloudController
module Jobs
module Runtime
class DelayedJobsRecover < VCAP::CloudController::Jobs::CCJob
def perform
logger.info('Recover halted delayed jobs')
recover
end

def max_attempts
1
end

private

def recover
# Find stuck service instance create operations where the broker is still working
# but CC's polling job has permanently failed due to a transient error (e.g. brief db connection flip).
# Join path: service_instance_operations → service_instances → jobs → delayed_jobs.
#
# Filters:
# - service_instance_operations.state='in progress': the broker has not yet reported a final state
# (succeeded or failed) that CC could successfully persist; if CC had received and saved a final
# state from the broker, this column would already be 'succeeded' or 'failed' — not 'in progress'
# - service_instance_operations.type='create': scope to create operations only
# - service_instance_operations.created_at > cutoff: operations beyond the max async polling window
# are intentionally excluded — the broker has given up on them too, so re-enqueuing is pointless
# - jobs.state IN (POLLING, FAILED): the pollable job has not reached a terminal success state;
# POLLING covers the case where the failure hook itself couldn't write FAILED due to the DB flip
# - jobs.operation='service_instance.create': prevents matching update/delete jobs for the same
# service instance that happen to share the same resource_guid
# - delayed_jobs.failed_at IS NOT NULL: the delayed job permanently failed (exhausted max_attempts);
# jobs still alive or locked have failed_at=NULL and must not be touched
cutoff_time = Time.now - default_maximum_duration_seconds
stuck = ServiceInstanceOperation.
join(:service_instances, id: Sequel[:service_instance_operations][:service_instance_id]).
join(:jobs, resource_guid: Sequel[:service_instances][:guid]).
join(:delayed_jobs, guid: Sequel[:jobs][:delayed_job_guid]).
where(Sequel[:service_instance_operations][:state] => 'in progress').
where(Sequel[:service_instance_operations][:type] => 'create').
where { Sequel[:service_instance_operations][:created_at] > cutoff_time }.
where(Sequel[:jobs][:state] => [PollableJobModel::POLLING_STATE, PollableJobModel::FAILED_STATE]).
where(Sequel[:jobs][:operation] => 'service_instance.create').
exclude(Sequel[:delayed_jobs][:failed_at] => nil).
select(Sequel[:jobs][:guid].as(:pollable_guid), Sequel[:delayed_jobs][:guid].as(:dj_guid)).
order(Sequel[:service_instance_operations][:created_at]).
limit(batch_size)

stuck.each do |row|
delayed = Delayed::Job.first(guid: row[:dj_guid])
next unless delayed

reenqueue(row[:pollable_guid], delayed)
end
end

def reenqueue(pollable_guid, delayed)
# re-verify atomically that the pollable job still points to this dead delayed_job.
# if another process already re-enqueued a new job, pollable.delayed_job_guid was
# updated to the new delayed_job's guid, so where clause returns nil and we skip safely.
PollableJobModel.db.transaction do
pjob = PollableJobModel.where(guid: pollable_guid,
delayed_job_guid: delayed.guid,
state: [PollableJobModel::POLLING_STATE, PollableJobModel::FAILED_STATE]).
for_update.first
return unless pjob

# bring the pollable job into the clean polling state
pjob.update(cf_api_error: nil, state: PollableJobModel::POLLING_STATE)

# unwrap the serialized handler and re-enqueue via the reoccurring job's enqueue_next_job method
inner_job = Jobs::Enqueuer.unwrap_job(delayed.payload_object)
inner_job.send(:enqueue_next_job, pjob)
end
end

def default_maximum_duration_seconds
Config.config.get(:broker_client_max_async_poll_duration_minutes).minutes
end

def logger
@logger ||= Steno.logger('cc.background')
end

def batch_size
10
end
end
end
end
end
3 changes: 2 additions & 1 deletion config/cloud_controller.yml
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,8 @@ diego_sync:
pending_droplets:
frequency_in_seconds: 300
expiration_in_seconds: 42

delayed_jobs_recover:
frequency_in_seconds: 600
pending_builds:
expiration_in_seconds: 42
frequency_in_seconds: 300
Expand Down
38 changes: 38 additions & 0 deletions db/migrations/20260505071445_add_jobs_operation_state_index.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
Sequel.migration do
no_transaction # required for concurrently option on postgres

up do
if database_type == :postgres
VCAP::Migration.with_concurrent_timeout(self) do
add_index :jobs, %i[operation state],
name: :jobs_operation_state_index,
where: "state IN ('POLLING', 'FAILED')",
if_not_exists: true,
concurrently: true
end
elsif database_type == :mysql
alter_table(:jobs) do
# rubocop:disable Sequel/ConcurrentIndex -- MySQL does not support concurrent index operations
add_index %i[operation state], name: :jobs_operation_state_index unless @db.indexes(:jobs).key?(:jobs_operation_state_index)
# rubocop:enable Sequel/ConcurrentIndex
end
end
end

down do
if database_type == :postgres
VCAP::Migration.with_concurrent_timeout(self) do
drop_index :jobs, %i[operation state],
name: :jobs_operation_state_index,
if_exists: true,
concurrently: true
end
elsif database_type == :mysql
alter_table(:jobs) do
# rubocop:disable Sequel/ConcurrentIndex
drop_index %i[operation state], name: :jobs_operation_state_index if @db.indexes(:jobs).key?(:jobs_operation_state_index)
# rubocop:enable Sequel/ConcurrentIndex
end
end
end
end
3 changes: 2 additions & 1 deletion lib/cloud_controller/clock/scheduler.rb
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ class Scheduler
{ name: 'pending_droplets', class: Jobs::Runtime::PendingDropletCleanup },
{ name: 'pending_builds', class: Jobs::Runtime::PendingBuildCleanup },
{ name: 'failed_jobs', class: Jobs::Runtime::FailedJobsCleanup },
{ name: 'service_operations_initial_cleanup', class: Jobs::Runtime::ServiceOperationsInitialCleanup }
{ name: 'service_operations_initial_cleanup', class: Jobs::Runtime::ServiceOperationsInitialCleanup },
{ name: 'delayed_jobs_recover', class: Jobs::Runtime::DelayedJobsRecover }
].freeze

def initialize(config)
Expand Down
3 changes: 3 additions & 0 deletions lib/cloud_controller/config_schemas/clock_schema.rb
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@ class ClockSchema < VCAP::Config
completed_tasks: {
cutoff_age_in_days: Integer
},
delayed_jobs_recover: {
frequency_in_seconds: Integer
},
default_health_check_timeout: Integer,

uaa: {
Expand Down
1 change: 1 addition & 0 deletions lib/cloud_controller/jobs.rb
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
require 'jobs/runtime/expired_blob_cleanup'
require 'jobs/runtime/expired_orphaned_blob_cleanup'
require 'jobs/runtime/expired_resource_cleanup'
require 'jobs/runtime/delayed_jobs_recover'
require 'jobs/runtime/failed_jobs_cleanup'
require 'jobs/runtime/service_operations_initial_cleanup'
require 'jobs/runtime/legacy_jobs'
Expand Down
1 change: 1 addition & 0 deletions lib/tasks/jobs.rake
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ namespace :jobs do
'audit_events',
'failed_jobs',
'service_operations_initial_cleanup',
'delayed_jobs_recover',
'service_usage_events',
'completed_tasks',
'expired_blob_cleanup',
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
# rubocop:disable Migration/TooManyMigrationRuns
require 'spec_helper'
require 'migrations/helpers/migration_shared_context'

def operation_state_partial_index_present
# partial indexes are not returned in `db.indexes`. That's why we have to query this information manually.
partial_indexes = db.fetch("SELECT * FROM pg_indexes WHERE tablename = 'jobs' AND indexname = 'jobs_operation_state_index';")

index_present = false
partial_indexes.each do |_index|
index_present = true
end

index_present
end

RSpec.describe 'migration to add operation_state_index on jobs table', isolation: :truncation, type: :migration do
include_context 'migration' do
let(:migration_filename) { '20260505071445_add_jobs_operation_state_index.rb' }
end

describe 'jobs table' do
it 'adds index and handles idempotency gracefully' do
if db.database_type == :postgres
# Test up migration
expect(operation_state_partial_index_present).to be_falsey
expect { Sequel::Migrator.run(db, migrations_path, target: current_migration_index, allow_missing_migration_files: true) }.not_to raise_error
expect(operation_state_partial_index_present).to be_truthy

# Test up migration idempotency
expect { Sequel::Migrator.run(db, migrations_path, target: current_migration_index, allow_missing_migration_files: true) }.not_to raise_error
expect(operation_state_partial_index_present).to be_truthy

# Test down migration
expect { Sequel::Migrator.run(db, migrations_path, target: current_migration_index - 1, allow_missing_migration_files: true) }.not_to raise_error
expect(operation_state_partial_index_present).to be_falsey

# Test down migration idempotency
expect { Sequel::Migrator.run(db, migrations_path, target: current_migration_index - 1, allow_missing_migration_files: true) }.not_to raise_error
expect(operation_state_partial_index_present).to be_falsey

elsif db.database_type == :mysql
# Test up migration
expect(db.indexes(:jobs)).not_to include(:jobs_operation_state_index)
expect { Sequel::Migrator.run(db, migrations_path, target: current_migration_index, allow_missing_migration_files: true) }.not_to raise_error
expect(db.indexes(:jobs)).to include(:jobs_operation_state_index)

# Test up migration idempotency
expect { Sequel::Migrator.run(db, migrations_path, target: current_migration_index, allow_missing_migration_files: true) }.not_to raise_error
expect(db.indexes(:jobs)).to include(:jobs_operation_state_index)

# Test down migration
expect { Sequel::Migrator.run(db, migrations_path, target: current_migration_index - 1, allow_missing_migration_files: true) }.not_to raise_error
expect(db.indexes(:jobs)).not_to include(:jobs_operation_state_index)

# Test down migration idempotency
expect { Sequel::Migrator.run(db, migrations_path, target: current_migration_index - 1, allow_missing_migration_files: true) }.not_to raise_error
expect(db.indexes(:jobs)).not_to include(:jobs_operation_state_index)
end
end
end
end
# rubocop:enable Migration/TooManyMigrationRuns
Loading
Loading