[SERVER-60594] mongos 4.4.8 slow on mongos,but fast on mongod Created: 11/Oct/21  Updated: 14/May/22  Resolved: 28/Jan/22

Status: Closed
Project: Core Server
Component/s: None
Affects Version/s: None
Fix Version/s: None

Type: Bug Priority: Major - P3
Reporter: jing xu Assignee: Edwin Zhou
Resolution: Done Votes: 1
Labels: None
Remaining Estimate: Not Specified
Time Spent: Not Specified
Original Estimate: Not Specified

Attachments: PNG File Screen Shot 2021-12-22 at 11.18.43 AM.png    
Operating System: ALL
Participants:

 Description   

i upgrade from 4.4.4 to 4.4.8 cluster.
cluster has three mongos,three data nodes and one config using replicate with three nodes.
i recently i find many slow logs on mongos,op_msg more than 1000ms。but data node there is no any more than 100ms.

i check io,mem,cpu, there is no stress on it. i guess exists problem on mongos.
but i can't find it.

mongos slow log:
{"t":

{"$date":"2021-10-11T21:15:25.154+08:00"}

,"s":"I", "c":"COMMAND", "id":51803, "ctx":"conn3042892","msg":"Slow query","attr":{"type":"command","ns":"wzjMonitor.expWzjMonitor","appName":"wzj_op","command":{"find":"expWzjMonitor","filter":

{"no":"YT3224052005840"}

,"projection":

{"no":1,"del":1,"curT":1,"oType":1,"matOrg":1,"_id":0}

,"$db":"wzjMonitor","$clusterTime":{"clusterTime":{"$timestamp":{"t":1633958124,"i":152}},"signature":{"hash":{"$binary":{"base64":"XbmmyAeBvyjB0IIpxpqKars52yg=","subType":"0"}},"keyId":6958985885933109266}},"lsid":{"id":

{"$uuid":"5b593b65-b82d-40e8-8c94-682d9e4abc3d"}

}},"nShards":1,"cursorExhausted":true,"numYields":0,"nreturned":1,"reslen":323,"protocol":"op_msg","durationMillis":1080}}

but i execute it just 2ms.
mongos> db.expWzjMonitor.find(

{"no":"YT3224052005840"}

).projection(

{"no":1,"del":1,"curT":1,"oType":1,"matOrg":1,"_id":0}

).explain("executionStats")
{
"queryPlanner" : {
"mongosPlannerVersion" : 1,
"winningPlan" : {
"stage" : "SINGLE_SHARD",
"shards" : [
{
"shardName" : "shard2",
"connectionString" : "shard2/srvdb303.yto.cloud:21002,srvdb305.yto.cloud:21002,srvdb307.yto.cloud:21002",
"serverInfo" :

{ "host" : "srvdb305.yto.cloud", "port" : 21002, "version" : "4.4.8", "gitVersion" : "83b8bb8b6b325d8d8d3dfd2ad9f744bdad7d6ca0" }

,
"plannerVersion" : 1,
"namespace" : "wzjMonitor.expWzjMonitor",
"indexFilterSet" : false,
"parsedQuery" : {
"no" :

{ "$eq" : "YT3224052005840" }

},
"winningPlan" : {
"stage" : "PROJECTION_SIMPLE",
"transformBy" :

{ "no" : 1, "del" : 1, "curT" : 1, "oType" : 1, "matOrg" : 1, "_id" : 0 }

,
"inputStage" : {
"stage" : "SHARDING_FILTER",
"inputStage" : {
"stage" : "FETCH",
"filter" : {
"no" :

{ "$eq" : "YT3224052005840" }

},
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" :

{ "no" : "hashed" }

,
"indexName" : "no_hashed",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" :

{ "no" : [ "[-857763331883698335, -857763331883698335]" ] }

}
}
}
},
"rejectedPlans" : [ ]
}
]
}
},
"executionStats" : {
"nReturned" : 1,
"executionTimeMillis" : 2,
"totalKeysExamined" : 1,
"totalDocsExamined" : 1,
"executionStages" : {
"stage" : "SINGLE_SHARD",
"nReturned" : 1,
"executionTimeMillis" : 2,
"totalKeysExamined" : 1,
"totalDocsExamined" : 1,
"totalChildMillis" : NumberLong(0),
"shards" : [
{
"shardName" : "shard2",
"executionSuccess" : true,
"nReturned" : 1,
"executionTimeMillis" : 0,
"totalKeysExamined" : 1,
"totalDocsExamined" : 1,
"executionStages" : {
"stage" : "PROJECTION_SIMPLE",
"nReturned" : 1,
"executionTimeMillisEstimate" : 0,
"works" : 2,
"advanced" : 1,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"transformBy" :

{ "no" : 1, "del" : 1, "curT" : 1, "oType" : 1, "matOrg" : 1, "_id" : 0 }

,
"inputStage" : {
"stage" : "SHARDING_FILTER",
"nReturned" : 1,
"executionTimeMillisEstimate" : 0,
"works" : 2,
"advanced" : 1,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"chunkSkips" : 0,
"inputStage" : {
"stage" : "FETCH",
"filter" : {
"no" :

{ "$eq" : "YT3224052005840" }

},
"nReturned" : 1,
"executionTimeMillisEstimate" : 0,
"works" : 2,
"advanced" : 1,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"docsExamined" : 1,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 1,
"executionTimeMillisEstimate" : 0,
"works" : 2,
"advanced" : 1,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"keyPattern" :

{ "no" : "hashed" }

,
"indexName" : "no_hashed",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" :

{ "no" : [ "[-857763331883698335, -857763331883698335]" ] }

,
"keysExamined" : 1,
"seeks" : 1,
"dupsTested" : 0,
"dupsDropped" : 0
}
}
}
}
}
]
}
},
"serverInfo" :

{ "host" : "srvdb303.yto.cloud", "port" : 21051, "version" : "4.4.8", "gitVersion" : "83b8bb8b6b325d8d8d3dfd2ad9f744bdad7d6ca0" }

,
"ok" : 1,
"operationTime" : Timestamp(1633958637, 1646),
"$clusterTime" : {
"clusterTime" : Timestamp(1633958637, 1649),
"signature" :

{ "hash" : BinData(0,"yhztP+fehpQiXUYJNGyv/zitOrc="), "keyId" : NumberLong("6958985885933109266") }

}
}



 Comments   
Comment by jing xu [ 14/May/22 ]

hi edwin zhou:
i remove serviceExecutor,but it is still slow.on mongos is 1484ms,but on shard is 1ms.
more mongos.log |grep 1234567
{"t":

{"$date":"2022-05-14T13:24:22.449+08:00"}

,"s":"I", "c":"COMMAND", "id":51803,
"ctx":"conn587389","msg":"Slow query","attr":{"type":"command","ns":"xiaoxu.orderMail",
"command":{"find":"OrderMail","filter":{"$and":[

{"mailNo":"1234567"}

,{"createTime":{"$gte":

{"$date":"2022-04-14T05:24:20.968Z"}

}},
{"createTime":{"$lte":

{"$date":"2022-05-15T05:24:20.968Z"}

}}]},"sort":

{"createTime":-1}

,
"nShards":1,"cursorExhausted":true,"numYields":0,
"nreturned":0,"reslen":234,"protocol":"op_msg","durationMillis":1484}}

more shard1.log
{"t":

{"$date":"2022-05-14T13:24:22.449+08:00"}

,"s":"I", "c":"COMMAND", "id":51803, "ctx":"conn410512","msg":"Slow query",
"attr":{"type":"command","ns":"xiaoxu.OrderMail","command":{"find":"OrderMail","filter":{"$and":[

{"mailNo":"1234567"}

,
{"createTime":{"$gte":

{"$date":"2022-04-14T05:24:20.968Z"}

}},{"createTime":{"$lte":

{"$date":"2022-05-15T05:24:20.968Z"}

}}]},
"sort":

{"createTime":-1}

,
"storage":{},"protocol":"op_msg","durationMillis":1}}

Comment by Edwin Zhou [ 28/Jan/22 ]

Hi 601290552@qq.com,

We haven’t heard back from you for some time, so I’m going to close this ticket. If this is still an issue for you, please provide additional information and we will reopen the ticket.

Best,
Edwin

Comment by Edwin Zhou [ 07/Jan/22 ]

Hi 601290552@qq.com,

We still need additional information to diagnose the problem. If this is still an issue for you, would you please let us know if performance has improved after disabling the serviceExecutro setting?

Best,
Edwin

Comment by Edwin Zhou [ 22/Dec/21 ]

Hi 601290552@qq.com,

I tremendously appreciate your patience and I apologize for the wait as I investigate this issue.

I believe that the reason for increased latency on the mongos is because serviceExecutor is set to adaptive.

At C 2021-11-05T09:35:00.584Z, the service executor starts to kick in, and 2 seconds later at D, the slow log lines start to appear.

I recommend removing the serviceExecutor setting on all of your config files. We're interesting in hearing if performance improves after this setting is disabled.

Best,
Edwin

Comment by jing xu [ 18/Dec/21 ]

Hi Edwin:
is it ok?

Comment by jing xu [ 20/Nov/21 ]

Hi Edwin:
i have upload six files to support uploader link.please help me anlayze it .tks.
this link is:
curl -X POST https://upload.box.com/api/2.0/files/content \
-H 'Authorization: Bearer 1!nT3yRosOCSe_UNZDQ9hFZRhc0UvI9l8KqQDkMYlYiWHqwvpJgQipZBt4M7AuHqELswFtEb9Cfl_-7knVItKkRwGq4bHz6dxG9moKgA3IdvvmSXYMJJJrZNdV4kKI3p3s4QfJouTRK4KuHPp0AL6LsKjW6Fe6oNA6psmqV-sB7LLgkaGfnJTzZ4gOuvMT0BmNDfCOsZ0jbcGW88_-Lafk_-3VjldfhVbxEMVyevJMwKpLigjH5Vu9957CLP55KUTeXbxzMcy-mtAZPoOfNYSBXzYnvH9U6H9oks3qZZkjtD2P4XliFom43tL5j4yGV5uUa-cNqSgrCmWCIn1DPh0rQ2-w60ke6yTmZhfa8lMofzKKtQxCg7DbqDkGJ3qwFMiTmb62VGsLu6iK3RQwXsoo70AgM8o.' \
-H 'Content-Type: multipart/form-data' \
-F attributes='{"name": "<filename>", "parent": {"id": "148229314081"}}' \
-F file=@<filename> > /dev/null

for example:

[root@~]# curl -X POST https://upload.box.com/api/2.0/files/content \
> -H 'Authorization: Bearer 1!nT3yRosOCSe_UNZDQ9hFZRhc0UvI9l8KqQDkMYlYiWHqwvpJgQipZBt4M7AuHqELswFtEb9Cfl_-7knVItKkRwGq4bHz6dxG9moKgA3IdvvmSXYMJJJrZNdV4kKI3p3s4QfJouTRK4KuHPp0AL6LsKjW6Fe6oNA6psmqV-sB7LLgkaGfnJTzZ4gOuvMT0BmNDfCOsZ0jbcGW88_-Lafk_-3VjldfhVbxEMVyevJMwKpLigjH5Vu9957CLP55KUTeXbxzMcy-mtAZPoOfNYSBXzYnvH9U6H9oks3qZZkjtD2P4XliFom43tL5j4yGV5uUa-cNqSgrCmWCIn1DPh0rQ2-w60ke6yTmZhfa8lMofzKKtQxCg7DbqDkGJ3qwFMiTmb62VGsLu6iK3RQwXsoo70AgM8o.' \
> -H 'Content-Type: multipart/form-data' \
> -F attributes='{"name": "mongod21001.log.2021-11-05T16-00-01.tar", "parent": {"id": "148229314081"}}' \
> -F file=@mongod21001.log.2021-11-05T16-00-01.tar > /dev/null
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 25.3M 100 1190 100 25.3M 41 908k 0:00:29 0:00:28 0:00:01 3672k
[root@ ~]# curl -X POST https://upload.box.com/api/2.0/files/content \
> -H 'Authorization: Bearer 1!nT3yRosOCSe_UNZDQ9hFZRhc0UvI9l8KqQDkMYlYiWHqwvpJgQipZBt4M7AuHqELswFtEb9Cfl_-7knVItKkRwGq4bHz6dxG9moKgA3IdvvmSXYMJJJrZNdV4kKI3p3s4QfJouTRK4KuHPp0AL6LsKjW6Fe6oNA6psmqV-sB7LLgkaGfnJTzZ4gOuvMT0BmNDfCOsZ0jbcGW88_-Lafk_-3VjldfhVbxEMVyevJMwKpLigjH5Vu9957CLP55KUTeXbxzMcy-mtAZPoOfNYSBXzYnvH9U6H9oks3qZZkjtD2P4XliFom43tL5j4yGV5uUa-cNqSgrCmWCIn1DPh0rQ2-w60ke6yTmZhfa8lMofzKKtQxCg7DbqDkGJ3qwFMiTmb62VGsLu6iK3RQwXsoo70AgM8o.' \
> -H 'Content-Type: multipart/form-data' \
> -F attributes='{"name": "mongos.log.2021-11-05T16-00-01.bad.tar", "parent": {"id": "148229314081"}}' \
> -F file=@mongos.log.2021-11-05T16-00-01.bad.tar > /dev/null
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 274M 100 1190 100 274M 2 686k 0:09:55 0:06:49 0:03:06 3648k

rw-rr-. 1 root root 211510092 11月 6 12:42 config.diagnostic.data.tar
rw-rr-. 1 root root 2809498 11月 6 12:42 config.log.2021-11-05T16-00-01.tar
rw-rr-. 1 root root 210495777 11月 6 12:42 mongod21001.diagnostic.data.tar
rw-rr-. 1 root root 217081963 11月 6 12:42 mongos.diagnostic.data.tar
rw-rr-. 1 root root 287995774 11月 6 12:42 mongos.log.2021-11-05T16-00-01.bad.tar
rw-rr-. 1 root root 26528629 11月 6 12:42 mongod21001.log.2021-11-05T16-00-01.tar

Comment by Edwin Zhou [ 19/Nov/21 ]

Hi 601290552@qq.com,

I deeply apologize for the delay in my response. Unfortunately, I wasn't able to find any diagnostic data in the upload portal and it appears that the link has now expired. Here is another support uploader link.

Best,
Edwin

Comment by jing xu [ 12/Nov/21 ]

Hi Edwin:
it is ok?

Comment by jing xu [ 06/Nov/21 ]

collection expInfos read write latencyStats for all shard less than 1ms.
mongos> db.expInfos.latencyStats({}).pretty();
{
"ns" : "expMonitordb.expInfos",
"shard" : "shard2",
"localTime" : ISODate("2021-11-06T05:02:20.602Z"),
"latencyStats" : {
"reads" :

{ "latency" : NumberLong("5859860908720"), "ops" : NumberLong("37615589290") }

,
"writes" :

{ "latency" : NumberLong("10417613930579"), "ops" : NumberLong("46642821404") }

,
"commands" :

{ "latency" : NumberLong("5699662536"), "ops" : NumberLong(167013) }

,
"transactions" :

{ "latency" : NumberLong(0), "ops" : NumberLong(0) }

}
}
{
"ns" : "expMonitordb.expInfos",
"shard" : "shard1",
"localTime" : ISODate("2021-11-06T05:02:20.603Z"),
"latencyStats" : {
"reads" :

{ "latency" : NumberLong("1567670429936"), "ops" : NumberLong("9148125487") }

,
"writes" :

{ "latency" : NumberLong("2486299823686"), "ops" : NumberLong("10949404338") }

,
"commands" :

{ "latency" : NumberLong(3454887), "ops" : NumberLong(136413) }

,
"transactions" :

{ "latency" : NumberLong(0), "ops" : NumberLong(0) }

}
}
{
"ns" : "expMonitordb.expInfos",
"shard" : "shard3",
"localTime" : ISODate("2021-11-06T05:02:20.602Z"),
"latencyStats" : {
"reads" :

{ "latency" : NumberLong("2870996851142"), "ops" : NumberLong("21898375246") }

,
"writes" :

{ "latency" : NumberLong("5975989252073"), "ops" : NumberLong("26883638500") }

,
"commands" :

{ "latency" : NumberLong(3471878), "ops" : NumberLong(161625) }

,
"transactions" :

{ "latency" : NumberLong(0), "ops" : NumberLong(0) }

}
}
{
"ns" : "expMonitordb.expInfos",
"shard" : "shard4",
"localTime" : ISODate("2021-11-06T05:02:20.603Z"),
"latencyStats" : {
"reads" :

{ "latency" : NumberLong("5088637166162"), "ops" : NumberLong("37545121408") }

,
"writes" :

{ "latency" : NumberLong("10874201314068"), "ops" : NumberLong("46555568313") }

,
"commands" :

{ "latency" : NumberLong("2524085012"), "ops" : NumberLong(166565) }

,
"transactions" :

{ "latency" : NumberLong(0), "ops" : NumberLong(0) }

}
}
{
"ns" : "expMonitordb.expInfos",
"shard" : "shard5",
"localTime" : ISODate("2021-11-06T05:02:20.602Z"),
"latencyStats" : {
"reads" :

{ "latency" : NumberLong("5212380349074"), "ops" : NumberLong("35428495213") }

,
"writes" :

{ "latency" : NumberLong("10305021739249"), "ops" : NumberLong("43890507895") }

,
"commands" :

{ "latency" : NumberLong("2370567887"), "ops" : NumberLong(165245) }

,
"transactions" :

{ "latency" : NumberLong(0), "ops" : NumberLong(0) }

}
}
{
"ns" : "expMonitordb.expInfos",
"shard" : "shard6",
"localTime" : ISODate("2021-11-06T05:02:20.603Z"),
"latencyStats" : {
"reads" :

{ "latency" : NumberLong("6184783099730"), "ops" : NumberLong("36255590047") }

,
"writes" :

{ "latency" : NumberLong("11301554888483"), "ops" : NumberLong("44929440018") }

,
"commands" :

{ "latency" : NumberLong("6023699670"), "ops" : NumberLong(165685) }

,
"transactions" :

{ "latency" : NumberLong(0), "ops" : NumberLong(0) }

}
}

Comment by jing xu [ 06/Nov/21 ]

Hi Edwin:
i has upload it.
the following files:
rw-rr-. 1 root root 211510092 11月 6 12:42 config.diagnostic.data.tar
rw-rr-. 1 root root 2809498 11月 6 12:42 config.log.2021-11-05T16-00-01.tar
rw-rr-. 1 root root 217081963 11月 6 12:42 mongos.diagnostic.data.tar
rw-rr-. 1 root root 287995774 11月 6 12:42 mongos.log.2021-11-05T16-00-01.bad.tar
rw-rr-. 1 root root 26528629 11月 6 12:42 mongod21001.log.2021-11-05T16-00-01.tar
rw-rr-. 1 root root 210495777 11月 6 12:42 mongod21001.diagnostic.data.tar

for example the collection is expInfos

from 2021.11.05 00:00:00 to 2021.11.06 00:00:00
from one mongos slow log more than 100ms:
more mongos.log.2021-11-05T16-00-01 |grep expInfos |wc -l
1620425
from one shard slow log more than 100ms:
[mongo@srvdb1 log]$ more shard1.log.2021-11-05T16-00-01 |grep expInfos |wc -l
21052

there are six shards,every shard slow log is near the same.
so that one day total slow log from all shards:140000 more than 100ms.
but one mongos has 1620425 more than 100ms.

so mongos has bottleneck.

Comment by jing xu [ 05/Nov/21 ]

Hi Edwin:
I forgot it.i will upload it later.

Comment by Edwin Zhou [ 04/Nov/21 ]

Hi 601290552@qq.com,

We still need additional information to diagnose the problem. If this is still an issue for you, would you please upload mongod.log files from the requested nodes covering the incident and the $dbpath/diagnostic.data?

Best,
Edwin

Comment by Edwin Zhou [ 18/Oct/21 ]

Hi 601290552@qq.com,

To investigate this as a possible bug, we'd like information from the following nodes in the cluster:

  • The mongos where you run the command
  • The config server primary
  • The primary shard for the database.

For each of these nodes, please archive (tar or zip) the mongod.log files covering the incident and the $dbpath/diagnostic.data directory (the contents are described here)

I've created a secure upload portal for you. Files uploaded to this portal are hosted on Box, are visible only to MongoDB employees.

Best,
Edwin

Comment by jing xu [ 11/Oct/21 ]

i upgrade from 4.4.4 to 4.4.8 cluster.
cluster has three mongos,three data nodes and one config using replicate with three nodes.
i recently i find many slow logs on mongos,op_msg more than 1000ms。but data node there is no any more than 100ms.

i check io,mem,cpu, there is no stress on it. i guess exists problem on mongos.
but i can't find it.

mongos slow log:
{"t":

{"$date":"2021-10-11T21:15:25.154+08:00"}

,"s":"I", "c":"COMMAND", "id":51803, "ctx":"conn3042892","msg":"Slow query","attr":{"type":"command","ns":"wzjMonitor.expWzjMonitor","appName":"wzj_op","command":{"find":"expWzjMonitor","filter":

{"no":"103224052005840"}

,"projection":

{"no":1,"del":1,"curT":1,"oType":1,"matOrg":1,"_id":0}

,"$db":"wzjMonitor","$clusterTime":{"clusterTime":{"$timestamp":{"t":1633958124,"i":152}},"signature":{"hash":{"$binary":{"base64":"XbmmyAeBvyjB0IIpxpqKars52yg=","subType":"0"}},"keyId":6958985885933109266}},"lsid":{"id":

{"$uuid":"5b593b65-b82d-40e8-8c94-682d9e4abc3d"}

}},"nShards":1,"cursorExhausted":true,"numYields":0,"nreturned":1,"reslen":323,"protocol":"op_msg","durationMillis":1080}}

but i execute it just 2ms.
mongos> db.expWzjMonitor.find(

{"no":"103224052005840"}

).projection(

{"no":1,"del":1,"curT":1,"oType":1,"matOrg":1,"_id":0}

).explain("executionStats")
{
"queryPlanner" : {
"mongosPlannerVersion" : 1,
"winningPlan" : {
"stage" : "SINGLE_SHARD",
"shards" : [
{
"shardName" : "shard2",
"connectionString" : "shard2/srvdb303.yto.cloud:21002,srvdb305.yto.cloud:21002,srvdb307.yto.cloud:21002",
"serverInfo" :

{ "host" : "srvdb305.yto.cloud", "port" : 21002, "version" : "4.4.8", "gitVersion" : "83b8bb8b6b325d8d8d3dfd2ad9f744bdad7d6ca0" }

,
"plannerVersion" : 1,
"namespace" : "wzjMonitor.expWzjMonitor",
"indexFilterSet" : false,
"parsedQuery" : {
"no" :

{ "$eq" : "YT3224052005840" }

},
"winningPlan" : {
"stage" : "PROJECTION_SIMPLE",
"transformBy" :

{ "no" : 1, "del" : 1, "curT" : 1, "oType" : 1, "matOrg" : 1, "_id" : 0 }

,
"inputStage" : {
"stage" : "SHARDING_FILTER",
"inputStage" : {
"stage" : "FETCH",
"filter" : {
"no" :

{ "$eq" : "YT3224052005840" }

},
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" :

{ "no" : "hashed" }

,
"indexName" : "no_hashed",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" :

{ "no" : [ "[-857763331883698335, -857763331883698335]" ] }

}
}
}
},
"rejectedPlans" : [ ]
}
]
}
},
"executionStats" : {
"nReturned" : 1,
"executionTimeMillis" : 2,
"totalKeysExamined" : 1,
"totalDocsExamined" : 1,
"executionStages" : {
"stage" : "SINGLE_SHARD",
"nReturned" : 1,
"executionTimeMillis" : 2,
"totalKeysExamined" : 1,
"totalDocsExamined" : 1,
"totalChildMillis" : NumberLong(0),
"shards" : [
{
"shardName" : "shard2",
"executionSuccess" : true,
"nReturned" : 1,
"executionTimeMillis" : 0,
"totalKeysExamined" : 1,
"totalDocsExamined" : 1,
"executionStages" : {
"stage" : "PROJECTION_SIMPLE",
"nReturned" : 1,
"executionTimeMillisEstimate" : 0,
"works" : 2,
"advanced" : 1,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"transformBy" :

{ "no" : 1, "del" : 1, "curT" : 1, "oType" : 1, "matOrg" : 1, "_id" : 0 }

,
"inputStage" : {
"stage" : "SHARDING_FILTER",
"nReturned" : 1,
"executionTimeMillisEstimate" : 0,
"works" : 2,
"advanced" : 1,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"chunkSkips" : 0,
"inputStage" : {
"stage" : "FETCH",
"filter" : {
"no" :

{ "$eq" : "YT3224052005840" }

},
"nReturned" : 1,
"executionTimeMillisEstimate" : 0,
"works" : 2,
"advanced" : 1,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"docsExamined" : 1,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 1,
"executionTimeMillisEstimate" : 0,
"works" : 2,
"advanced" : 1,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"keyPattern" :

{ "no" : "hashed" }

,
"indexName" : "no_hashed",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" :

{ "no" : [ "[-857763331883698335, -857763331883698335]" ] }

,
"keysExamined" : 1,
"seeks" : 1,
"dupsTested" : 0,
"dupsDropped" : 0
}
}
}
}
}
]
}
},
"serverInfo" :

{ "host" : "srvdb303.yto.cloud", "port" : 21051, "version" : "4.4.8", "gitVersion" : "83b8bb8b6b325d8d8d3dfd2ad9f744bdad7d6ca0" }

,
"ok" : 1,
"operationTime" : Timestamp(1633958637, 1646),
"$clusterTime" : {
"clusterTime" : Timestamp(1633958637, 1649),
"signature" :

{ "hash" : BinData(0,"yhztP+fehpQiXUYJNGyv/zitOrc="), "keyId" : NumberLong("6958985885933109266") }

}
}

Generated at Thu Feb 08 05:50:14 UTC 2024 using Jira 9.7.1#970001-sha1:2222b88b221c4928ef0de3161136cc90c8356a66.