- Spin up a cluster with the featureFlagCatalogShard
- Spin up another mongod to act as a second shard. There are now two shards (one acting as the CSRS also)
- Enable sharding and shard a collection. However, the collection doesn't show up in sh.status()
mongos> sh.shardCollection("test.foo", {a: "hashed"})
|
{
|
"collectionsharded" : "test.foo",
|
"ok" : 1,
|
"$clusterTime" : {
|
"clusterTime" : Timestamp(1678168344, 14),
|
"signature" : {
|
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
|
"keyId" : NumberLong(0)
|
}
|
},
|
"operationTime" : Timestamp(1678168344, 14)
|
}
|
mongos> sh.status()
|
--- Sharding Status ---
|
sharding version: { "_id" : 1, "clusterId" : ObjectId("64055a5d5956ee0f6d92edea") }
|
shards:
|
{ "_id" : "config", "host" : "csshard/localhost:27019", "state" : 1, "topologyTime" : Timestamp(1678072517, 2) }
|
{ "_id" : "shard2", "host" : "shard2/localhost:27021", "state" : 1, "topologyTime" : Timestamp(1678168305, 2) }
|
active mongoses:
|
"7.0.0-alpha-538-g7cec1b7" : 1
|
autosplit:
|
Currently enabled: yes
|
balancer:
|
Currently enabled: yes
|
Currently running: no
|
databases:
|
{ "_id" : "config", "primary" : "config", "partitioned" : true }
|
config.system.sessions
|
shard key: { "_id" : 1 }
|
unique: false
|
balancing: true
|
chunks:
|
config 1024
|
too many chunks to print, use verbose if you want to force print
|
{ "_id" : "test", "primary" : "shard2", "partitioned" : false, "version" : { "uuid" : UUID("bc946974-b27c-4441-8257-b9303c2d68bb"), "timestamp" : Timestamp(1678168331, 1), "lastMod" : 1 } }
|
config.collections shows the collection is sharded
mongos> db.collections.find()
|
{ "_id" : "config.system.sessions", "lastmodEpoch" : ObjectId("64055b985956ee0f6d92f14d"), "lastmod" : ISODate("2023-03-06T03:18:48.335Z"), "timestamp" : Timestamp(1678072728, 8), "uuid" : UUID("8a05a058-a950-4c6a-8da6-843213083109"), "key" : { "_id" : 1 }, "unique" : false, "noBalance" : false, "maxChunkSizeBytes" : NumberLong(200000), "noAutoSplit" : true }
|
{ "_id" : "test.foo", "lastmodEpoch" : ObjectId("6406d117f974587dc7641c70"), "lastmod" : ISODate("2023-03-07T05:52:23.895Z"), "timestamp" : Timestamp(1678168343, 10), "uuid" : UUID("ae6c58f4-549e-408e-afdc-f363959ecdae"), "key" : { "a" : "hashed" }, "unique" : false, "noBalance" : false }
|
config.chunks also shows 2 chunks, both on "shard2"
mongos> db.chunks.find({shard: "shard2"})
|
{ "_id" : ObjectId("6406d1175956ee0f6d962fea"), "uuid" : UUID("ae6c58f4-549e-408e-afdc-f363959ecdae"), "min" : { "a" : NumberLong(0) }, "max" : { "a" : NumberLong("4611686018427387902") }, "shard" : "shard2", "lastmod" : Timestamp(1, 2), "onCurrentShardSince" : Timestamp(1678168343, 10), "history" : [ { "validAfter" : Timestamp(1678168343, 10), "shard" : "shard2" } ] }
|
{ "_id" : ObjectId("6406d1175956ee0f6d962feb"), "uuid" : UUID("ae6c58f4-549e-408e-afdc-f363959ecdae"), "min" : { "a" : NumberLong("4611686018427387902") }, "max" : { "a" : { "$maxKey" : 1 } }, "shard" : "shard2", "lastmod" : Timestamp(1, 3), "onCurrentShardSince" : Timestamp(1678168343, 10), "history" : [ { "validAfter" : Timestamp(1678168343, 10), "shard" : "shard2" } ] }
|
Insert a document in case that helps. (It doesn't)
mongos> db.foo.insert({a:1})
|
WriteResult({ "nInserted" : 1 })
|
mongos> sh.status()
|
--- Sharding Status ---
|
sharding version: { "_id" : 1, "clusterId" : ObjectId("64055a5d5956ee0f6d92edea") }
|
shards:
|
{ "_id" : "config", "host" : "csshard/localhost:27019", "state" : 1, "topologyTime" : Timestamp(1678072517, 2) }
|
{ "_id" : "shard2", "host" : "shard2/localhost:27021", "state" : 1, "topologyTime" : Timestamp(1678168305, 2) }
|
active mongoses:
|
"7.0.0-alpha-538-g7cec1b7" : 1
|
autosplit:
|
Currently enabled: yes
|
balancer:
|
Currently enabled: yes
|
Currently running: no
|
databases:
|
{ "_id" : "config", "primary" : "config", "partitioned" : true }
|
config.system.sessions
|
shard key: { "_id" : 1 }
|
unique: false
|
balancing: true
|
chunks:
|
config 1024
|
too many chunks to print, use verbose if you want to force print
|
{ "_id" : "test", "primary" : "shard2", "partitioned" : false, "version" : { "uuid" : UUID("bc946974-b27c-4441-8257-b9303c2d68bb"), "timestamp" : Timestamp(1678168331, 1), "lastMod" : 1 } }
|
Manually move a chunk to the other shard ("config")
mongos> sh.moveChunk("test.foo", {a:1}, "config")
|
{
|
"millis" : 209,
|
"ok" : 1,
|
"$clusterTime" : {
|
"clusterTime" : Timestamp(1678169435, 33),
|
"signature" : {
|
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
|
"keyId" : NumberLong(0)
|
}
|
},
|
"operationTime" : Timestamp(1678169435, 33)
|
}
|
Now that there are chunks on both shards, does that help? (Nope)
mongos> sh.status()
|
--- Sharding Status ---
|
sharding version: { "_id" : 1, "clusterId" : ObjectId("64055a5d5956ee0f6d92edea") }
|
shards:
|
{ "_id" : "config", "host" : "csshard/localhost:27019", "state" : 1, "topologyTime" : Timestamp(1678072517, 2) }
|
{ "_id" : "shard2", "host" : "shard2/localhost:27021", "state" : 1, "topologyTime" : Timestamp(1678168305, 2) }
|
active mongoses:
|
"7.0.0-alpha-538-g7cec1b7" : 1
|
autosplit:
|
Currently enabled: yes
|
balancer:
|
Currently enabled: yes
|
Currently running: no
|
databases:
|
{ "_id" : "config", "primary" : "config", "partitioned" : true }
|
config.system.sessions
|
shard key: { "_id" : 1 }
|
unique: false
|
balancing: true
|
chunks:
|
config 1024
|
too many chunks to print, use verbose if you want to force print
|
{ "_id" : "test", "primary" : "shard2", "partitioned" : false, "version" : { "uuid" : UUID("bc946974-b27c-4441-8257-b9303c2d68bb"), "timestamp" : Timestamp(1678168331, 1), "lastMod" : 1 } }
|
The chunk has indeed been moved - only one chunk on shard2 now
mongos> db.chunks.find({shard: "shard2"})
|
{ "_id" : ObjectId("6406d1175956ee0f6d962fea"), "uuid" : UUID("ae6c58f4-549e-408e-afdc-f363959ecdae"), "min" : { "a" : NumberLong(0) }, "max" : { "a" : NumberLong("4611686018427387902") }, "shard" : "shard2", "lastmod" : Timestamp(2, 1), "onCurrentShardSince" : Timestamp(1678168343, 10), "history" : [ { "validAfter" : Timestamp(1678168343, 10), "shard" : "shard2" } ] }
|
mongos>
|
How come the cluster is not acknowledging my sharded collection?
|