diff --git a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
|
index b74832e..1ba4ec4 100644
|
--- a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
|
+++ b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
|
@@ -44,14 +44,14 @@
|
|
// Propagate 'clusterTime' to shard 1. This ensures that its next write will be at time >=
|
// 'clusterTime'.
|
- testDB1.coll1.find().itcount();
|
+ st.s.getDB("config").coll1.find().itcount();
|
|
// Attempt a snapshot read at 'clusterTime' on shard 1. Test that it performs a noop write to
|
// advance its lastApplied optime past 'clusterTime'. The snapshot read itself may fail if the
|
// noop write advances the node's majority commit point past 'clusterTime' and it releases that
|
// snapshot. Test reading from the primary.
|
const shard1Session =
|
- st.rs1.getPrimary().getDB("test1").getMongo().startSession({causalConsistency: false});
|
+ st.configRS.getPrimary().getDB("test1").getMongo().startSession({causalConsistency: false});
|
shard1Session.startTransaction({readConcern: {level: "snapshot", atClusterTime: clusterTime}});
|
res = shard1Session.getDatabase("test1").runCommand({find: "coll1"});
|
if (res.ok === 0) {
|
diff --git a/src/mongo/db/read_concern_mongod.cpp b/src/mongo/db/read_concern_mongod.cpp
|
index 0967968..f45a835 100644
|
--- a/src/mongo/db/read_concern_mongod.cpp
|
+++ b/src/mongo/db/read_concern_mongod.cpp
|
@@ -134,6 +134,8 @@ Status makeNoopWriteIfNeeded(OperationContext* opCtx, LogicalTime clusterTime) {
|
auto shardingState = ShardingState::get(opCtx);
|
// standalone replica set, so there is no need to advance the OpLog on the primary.
|
if (!shardingState->enabled()) {
|
+ log() << "xxx not executing noop write to advance last applied op time because "
|
+ "sharding state not enabled";
|
return Status::OK();
|
}
|
|
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
|
index c66445f..f14fe0b 100644
|
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
|
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
|
@@ -1313,6 +1313,7 @@ Status ReplicationCoordinatorImpl::waitUntilOpTimeForReadUntil(OperationContext*
|
}
|
|
if (readConcern.getArgsAfterClusterTime() || readConcern.getArgsAtClusterTime()) {
|
+ log() << "xxx waiting until cluster time for read";
|
return _waitUntilClusterTimeForRead(opCtx, readConcern, deadline);
|
} else {
|
return _waitUntilOpTimeForReadDeprecated(opCtx, readConcern);
|
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
|
index 3a960b6..b708b94 100644
|
--- a/src/mongo/s/catalog/replset_dist_lock_manager.cpp
|
+++ b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
|
@@ -87,9 +87,9 @@ ReplSetDistLockManager::ReplSetDistLockManager(ServiceContext* globalContext,
|
ReplSetDistLockManager::~ReplSetDistLockManager() = default;
|
|
void ReplSetDistLockManager::startUp() {
|
- if (!_execThread) {
|
- _execThread = stdx::make_unique<stdx::thread>(&ReplSetDistLockManager::doTask, this);
|
- }
|
+ // if (!_execThread) {
|
+ // _execThread = stdx::make_unique<stdx::thread>(&ReplSetDistLockManager::doTask, this);
|
+ // }
|
}
|
|
void ReplSetDistLockManager::shutDown(OperationContext* opCtx) {
|
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
|
index 5558bed..3789aa1 100644
|
--- a/src/mongo/s/server.cpp
|
+++ b/src/mongo/s/server.cpp
|
@@ -420,8 +420,8 @@ ExitCode runMongosServer(ServiceContext* serviceContext) {
|
|
// Construct the sharding uptime reporter after the startup parameters have been parsed in order
|
// to ensure that it picks up the server port instead of reporting the default value.
|
- shardingUptimeReporter.emplace();
|
- shardingUptimeReporter->startPeriodicThread();
|
+ // shardingUptimeReporter.emplace();
|
+ // shardingUptimeReporter->startPeriodicThread();
|
|
clusterCursorCleanupJob.go();
|
|