diff --git a/jstests/sharding/removeQueryAnalyzerRepro.js b/jstests/sharding/removeQueryAnalyzerRepro.js new file mode 100644 index 00000000000..5fbe86c5be1 --- /dev/null +++ b/jstests/sharding/removeQueryAnalyzerRepro.js @@ -0,0 +1,52 @@ +import {configureFailPoint} from "jstests/libs/fail_point_util.js"; +import {funWithArgs} from "jstests/libs/parallel_shell_helpers.js"; +import {ShardingTest} from "jstests/libs/shardingtest.js"; + +const st = new ShardingTest({shards: {rs0: {nodes: 2}}}); + +const dbName = "test"; +const collName = "foo"; +const ns = dbName + '.' + collName; + +jsTest.log("Create collection initially"); +assert.commandWorked(st.s.getDB(dbName).createCollection(collName)); +assert.commandWorked( + st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 0.1})); + +let beforeCollections = st.s.getDB("config").getCollection("queryAnalyzers").find().toArray(); +jsTest.log("Found before collections: " + tojson(beforeCollections)); + +let dropFP = configureFailPoint(st.rs0.getPrimary(), "pauseBeforeRemovingQueryAnalyzerMetadata"); +let queryAnalyzerFP = + configureFailPoint(st.configRS.getPrimary(), "pauseBeforeDeletingQueryAnalyzer"); + +jsTest.log("Issue drop command"); +const awaitResult = startParallelShell(funWithArgs(function(dbName, collName) { + db.getSiblingDB(dbName).getCollection(collName).drop(); + }, dbName, collName), st.s.port); +jsTest.log("Wait until we are about to drop the query analyzer metadata"); +dropFP.wait(); + +jsTest.log("Release the FP and wait a bit so that we have issued the drop"); +dropFP.off(); +sleep(500); + +jsTest.log("Step down the primary"); +assert.commandWorked(st.rs0.getPrimary().adminCommand({replSetStepDown: 60, force: true})); + +jsTest.log("Wait for the drop to complete on the new primary"); +awaitResult(); + +jsTest.log("Recreate the collection"); +assert.commandWorked(st.s.getDB(dbName).createCollection(collName)); +assert.commandWorked( + st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 0.1})); + +jsTest.log("Release the initial query analyzer drop"); +queryAnalyzerFP.off(); +sleep(1000); + +let afterCollections = st.s.getDB("config").getCollection("queryAnalyzers").find().toArray(); +jsTest.log("Found before collections: " + tojson(afterCollections)); + +st.stop(); diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp index 1f946907046..dbcc2247eeb 100644 --- a/src/mongo/db/ops/write_ops_exec.cpp +++ b/src/mongo/db/ops/write_ops_exec.cpp @@ -209,6 +209,8 @@ MONGO_FAIL_POINT_DEFINE(hangTimeseriesInsertBeforeCommit); MONGO_FAIL_POINT_DEFINE(hangTimeseriesInsertBeforeWrite); MONGO_FAIL_POINT_DEFINE(failUnorderedTimeseriesInsert); +MONGO_FAIL_POINT_DEFINE(pauseBeforeDeletingQueryAnalyzer); + /** * Metrics group for the `updateMany` and `deleteMany` operations. For each * operation, the `duration` and `numDocs` will contribute to aggregated total @@ -1898,6 +1900,14 @@ WriteResult performDeletes(OperationContext* opCtx, const write_ops::DeleteCommandRequest& wholeOp, OperationSource source) { auto ns = wholeOp.getNamespace(); + if (MONGO_unlikely(ns == NamespaceString::kConfigQueryAnalyzersNamespace && + pauseBeforeDeletingQueryAnalyzer.shouldFail())) { + auto obj = pauseBeforeDeletingQueryAnalyzer.toBSON(); + logd("Count = {}", obj.getField("timesEntered")); + if (obj.getIntField("timesEntered"_sd) < 2) { + pauseBeforeDeletingQueryAnalyzer.pauseWhileSet(); + } + } if (source == OperationSource::kTimeseriesDelete) { if (!ns.isTimeseriesBucketsCollection()) { ns = ns.makeTimeseriesBucketsNamespace(); diff --git a/src/mongo/db/s/drop_collection_coordinator.cpp b/src/mongo/db/s/drop_collection_coordinator.cpp index 7b0edeb808c..3bfc3ac1a2b 100644 --- a/src/mongo/db/s/drop_collection_coordinator.cpp +++ b/src/mongo/db/s/drop_collection_coordinator.cpp @@ -88,6 +88,8 @@ namespace mongo { +MONGO_FAIL_POINT_DEFINE(pauseBeforeRemovingQueryAnalyzerMetadata); + void DropCollectionCoordinator::DropCollectionCoordinatorDependencyDeconstructor::init( DropCollectionCoordinator* coordinator) { _coordinator = coordinator; @@ -387,6 +389,8 @@ void DropCollectionCoordinator::_commitDropCollection( LOGV2_DEBUG(5390504, 2, "Dropping collection", logAttrs(nss()), "sharded"_attr = collIsSharded); + pauseBeforeRemovingQueryAnalyzerMetadata.pauseWhileSet(); + // Remove the query sampling configuration document for this collection, if it exists. _dependencyDeconstructor->getShardingDDLUtilsProxy()->removeQueryAnalyzerMetadataFromConfig( opCtx,