diff --git a/jstests/sharding/range_deleter_upgrade_rd_task_removed.js b/jstests/sharding/range_deleter_upgrade_rd_task_removed.js new file mode 100644 index 00000000000..f76addb6f06 --- /dev/null +++ b/jstests/sharding/range_deleter_upgrade_rd_task_removed.js @@ -0,0 +1,58 @@ +/** + * Tests that setting orphan counters on upgrade is resilient to range deletions completing. + * + * @tags: [ + * requires_fcv_60 + * ] + */ + +(function() { +'use strict'; +load("jstests/libs/fail_point_util.js"); +load('jstests/libs/parallel_shell_helpers.js'); + +const st = new ShardingTest({shards: 2, rs: {nodes: 1}, other: {enableBalancer: false}}); + +// Setup database and collection for test +const dbName = 'db'; +const db = st.getDB(dbName); +assert.commandWorked( + st.s.adminCommand({enableSharding: dbName, primaryShard: st.shard0.shardName})); +const coll = db['test']; +const nss = coll.getFullName(); +assert.commandWorked(st.s.adminCommand({shardCollection: nss, key: {_id: 1}})); + +assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: lastContinuousFCV})); + +let rangeDeletionFP = configureFailPoint(st.shard0, "pauseBeforeRemovingRangeDeletionTask"); +let orphanCountSetterFP = configureFailPoint(st.shard0, "pauseBeforeSettingOrphanCountOnDocument"); + +jsTest.log("Move chunk to create range deletion on shard 0"); +assert.commandWorked(st.s.adminCommand({moveChunk: nss, find: {_id: 0}, to: st.shard1.shardName})); + +jsTest.log("Wait for range deletion to complete but pause before removing the document"); +rangeDeletionFP.wait(); + +jsTest.log("Begin FCV upgrade to 6.0 where orphan counts must be set"); +const FCVUpgrade = startParallelShell( + funWithArgs(function(fcv) { + assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: fcv})); + }, latestFCV), st.s.port); + +jsTest.log( + "Wait for us to find the range deletion document but pause before performing the update"); +orphanCountSetterFP.wait(); + +jsTest.log("Release the range deletion failpoint and wait for it to be removed"); +rangeDeletionFP.off(); +assert.soon(() => { + let rangeDeletionTasks = st.rs0.getPrimary().getDB('config').rangeDeletions.find().toArray(); + return rangeDeletionTasks.length === 0; +}); + +jsTest.log("Release the upgrade task"); +orphanCountSetterFP.off(); +FCVUpgrade(); + +st.stop(); +})(); diff --git a/src/mongo/db/s/range_deletion_util.cpp b/src/mongo/db/s/range_deletion_util.cpp index 9ca0a8f2518..22ad18d9046 100644 --- a/src/mongo/db/s/range_deletion_util.cpp +++ b/src/mongo/db/s/range_deletion_util.cpp @@ -72,6 +72,8 @@ MONGO_FAIL_POINT_DEFINE(hangAfterDoingDeletion); MONGO_FAIL_POINT_DEFINE(suspendRangeDeletion); MONGO_FAIL_POINT_DEFINE(throwWriteConflictExceptionInDeleteRange); MONGO_FAIL_POINT_DEFINE(throwInternalErrorInDeleteRange); +MONGO_FAIL_POINT_DEFINE(pauseBeforeRemovingRangeDeletionTask); +MONGO_FAIL_POINT_DEFINE(pauseBeforeSettingOrphanCountOnDocument); /** * Returns whether the currentCollection has the same UUID as the expectedCollectionUuid. Used to @@ -624,6 +626,7 @@ SharedSemiFuture removeDocumentsInRange( return s; } + pauseBeforeRemovingRangeDeletionTask.pauseWhileSet(); try { removePersistentRangeDeletionTask(nss, migrationId); } catch (const DBException& e) { @@ -668,6 +671,7 @@ void setOrphanCountersOnRangeDeletionTasks(OperationContext* opCtx) { opCtx, BSONObj(), [opCtx, &store, &setNumOrphansOnTask](const RangeDeletionTask& deletionTask) { + pauseBeforeSettingOrphanCountOnDocument.pauseWhileSet(); AutoGetCollection collection(opCtx, deletionTask.getNss(), MODE_IX); if (!collection || collection->uuid() != deletionTask.getCollectionUuid()) { // The deletion task is referring to a collection that has been dropped