2019-09-26T15:54:18.155+0200 I COMMAND [conn410479] command buzzguru_master.video command: find { find: "video", filter: { videoId: "XrJFIL_f5Rs" }, limit: 1, runtimeConstants: { localNow: new Date(1569506057883), clusterTime: Timestamp(1569506057, 10338) }, shardVersion: [ Timestamp(2296, 1), ObjectId('5d8b790a355ec980dea6c7dc') ], lsid: { id: UUID("be64a2c6-82d4-4fb4-8510-4e97fdc92836"), uid: BinData(0, E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855) }, $clusterTime: { clusterTime: Timestamp(1569506057, 10338), signature: { hash: BinData(0, 37F216C20481B671BDCB062D7EA37A47A24CE457), keyId: 6733616200154087441 } }, $audit: { $impersonatedUsers: [ { user: "buzzguru_master", db: "buzzguru_master" } ], $impersonatedRoles: [ { role: "read", db: "buzzguru_master" }, { role: "dbOwner", db: "buzzguru_master" }, { role: "enableSharding", db: "buzzguru_master" }, { role: "readWrite", db: "buzzguru_master" }, { role: "userAdmin", db: "buzzguru_master" }, { role: "dbAdmin", db: "buzzguru_master" } ] }, $client: { driver: { name: "mongo-go-driver", version: "v1.1.1+prerelease" }, os: { type: "linux", architecture: "amd64" }, platform: "go1.13", mongos: { host: "mongo4:27000", client: "172.18.0.7:57172", version: "4.2.0" } }, $configServerState: { opTime: { ts: Timestamp(1569506057, 10129), t: 1 } }, $db: "buzzguru_master" } numYields:0 ok:0 errMsg:"migration commit in progress for buzzguru_master.video" errName:StaleConfig errCode:13388 reslen:536 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 1 } }, ReplicationStateTransition: { acquireCount: { w: 2 } }, Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 199291 } }, Mutex: { acquireCount: { r: 3 } } } protocol:op_msg 271ms 2019-09-26T15:54:18.155+0200 I COMMAND [conn410431] command buzzguru_master.video command: find { find: "video", filter: { videoId: "Q6f08_O4JXE" }, limit: 1, runtimeConstants: { localNow: new Date(1569506057791), clusterTime: Timestamp(1569506057, 10282) }, shardVersion: [ Timestamp(2296, 1), ObjectId('5d8b790a355ec980dea6c7dc') ], lsid: { id: UUID("b691939f-fe2a-4918-a219-988c5300499d"), uid: BinData(0, E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855) }, $clusterTime: { clusterTime: Timestamp(1569506057, 10283), signature: { hash: BinData(0, 37F216C20481B671BDCB062D7EA37A47A24CE457), keyId: 6733616200154087441 } }, $audit: { $impersonatedUsers: [ { user: "buzzguru_master", db: "buzzguru_master" } ], $impersonatedRoles: [ { role: "read", db: "buzzguru_master" }, { role: "dbOwner", db: "buzzguru_master" }, { role: "enableSharding", db: "buzzguru_master" }, { role: "readWrite", db: "buzzguru_master" }, { role: "userAdmin", db: "buzzguru_master" }, { role: "dbAdmin", db: "buzzguru_master" } ] }, $client: { driver: { name: "mongo-go-driver", version: "v1.1.1+prerelease" }, os: { type: "linux", architecture: "amd64" }, platform: "go1.13", mongos: { host: "mongo4:27000", client: "172.18.0.7:57160", version: "4.2.0" } }, $configServerState: { opTime: { ts: Timestamp(1569506057, 10129), t: 1 } }, $db: "buzzguru_master" } numYields:0 ok:0 errMsg:"migration commit in progress for buzzguru_master.video" errName:StaleConfig errCode:13388 reslen:556 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 1 } }, ReplicationStateTransition: { acquireCount: { w: 2 } }, Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 290215 } }, Mutex: { acquireCount: { r: 3 } } } protocol:op_msg 363ms 2019-09-26T15:54:18.154+0200 I COMMAND [conn410449] command buzzguru_master.video command: find { find: "video", filter: { videoId: "GjJOxbCdAA4" }, limit: 1, runtimeConstants: { localNow: new Date(1569506057596), clusterTime: Timestamp(1569506057, 10196) }, shardVersion: [ Timestamp(2296, 1), ObjectId('5d8b790a355ec980dea6c7dc') ], lsid: { id: UUID("56b42414-e869-4023-b318-1045e5737953"), uid: BinData(0, E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855) }, $clusterTime: { clusterTime: Timestamp(1569506057, 10196), signature: { hash: BinData(0, 37F216C20481B671BDCB062D7EA37A47A24CE457), keyId: 6733616200154087441 } }, $audit: { $impersonatedUsers: [ { user: "buzzguru_master", db: "buzzguru_master" } ], $impersonatedRoles: [ { role: "read", db: "buzzguru_master" }, { role: "dbOwner", db: "buzzguru_master" }, { role: "enableSharding", db: "buzzguru_master" }, { role: "readWrite", db: "buzzguru_master" }, { role: "userAdmin", db: "buzzguru_master" }, { role: "dbAdmin", db: "buzzguru_master" } ] }, $client: { driver: { name: "mongo-go-driver", version: "v1.1.1+prerelease" }, os: { type: "linux", architecture: "amd64" }, platform: "go1.13", mongos: { host: "mongo4:27000", client: "172.18.0.7:57026", version: "4.2.0" } }, $configServerState: { opTime: { ts: Timestamp(1569506057, 10129), t: 1 } }, $db: "buzzguru_master" } numYields:0 ok:0 errMsg:"migration commit in progress for buzzguru_master.video" errName:StaleConfig errCode:13388 reslen:556 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 1 } }, ReplicationStateTransition: { acquireCount: { w: 2 } }, Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 486265 } }, Mutex: { acquireCount: { r: 3 } } } protocol:op_msg 558ms 2019-09-26T15:54:18.154+0200 I COMMAND [conn410457] command buzzguru_master.video command: find { find: "video", filter: { videoId: "g4DbDyyteM0" }, limit: 1, runtimeConstants: { localNow: new Date(1569506057764), clusterTime: Timestamp(1569506057, 10273) }, shardVersion: [ Timestamp(2296, 1), ObjectId('5d8b790a355ec980dea6c7dc') ], lsid: { id: UUID("5758844b-867a-4878-b9ba-dfbc84a2deff"), uid: BinData(0, E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855) }, $clusterTime: { clusterTime: Timestamp(1569506057, 10273), signature: { hash: BinData(0, 37F216C20481B671BDCB062D7EA37A47A24CE457), keyId: 6733616200154087441 } }, $audit: { $impersonatedUsers: [ { user: "buzzguru_master", db: "buzzguru_master" } ], $impersonatedRoles: [ { role: "read", db: "buzzguru_master" }, { role: "dbOwner", db: "buzzguru_master" }, { role: "enableSharding", db: "buzzguru_master" }, { role: "readWrite", db: "buzzguru_master" }, { role: "userAdmin", db: "buzzguru_master" }, { role: "dbAdmin", db: "buzzguru_master" } ] }, $client: { driver: { name: "mongo-go-driver", version: "v1.1.1+prerelease" }, os: { type: "linux", architecture: "amd64" }, platform: "go1.13", mongos: { host: "mongo4:27000", client: "172.18.0.7:56958", version: "4.2.0" } }, $configServerState: { opTime: { ts: Timestamp(1569506057, 10129), t: 1 } }, $db: "buzzguru_master" } numYields:0 ok:0 errMsg:"migration commit in progress for buzzguru_master.video" errName:StaleConfig errCode:13388 reslen:556 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 1 } }, ReplicationStateTransition: { acquireCount: { w: 2 } }, Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 317516 } }, Mutex: { acquireCount: { r: 3 } } } protocol:op_msg 389ms 2019-09-26T15:54:18.155+0200 I COMMAND [conn410435] command buzzguru_master.video command: find { find: "video", filter: { videoId: "7VyHP3hYl60" }, limit: 1, runtimeConstants: { localNow: new Date(1569506057698), clusterTime: Timestamp(1569506057, 10259) }, shardVersion: [ Timestamp(2296, 1), ObjectId('5d8b790a355ec980dea6c7dc') ], lsid: { id: UUID("7684e9fd-1e1c-4db5-8393-bc86972ebcfe"), uid: BinData(0, E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855) }, $clusterTime: { clusterTime: Timestamp(1569506057, 10259), signature: { hash: BinData(0, 37F216C20481B671BDCB062D7EA37A47A24CE457), keyId: 6733616200154087441 } }, $audit: { $impersonatedUsers: [ { user: "buzzguru_master", db: "buzzguru_master" } ], $impersonatedRoles: [ { role: "read", db: "buzzguru_master" }, { role: "dbOwner", db: "buzzguru_master" }, { role: "enableSharding", db: "buzzguru_master" }, { role: "readWrite", db: "buzzguru_master" }, { role: "userAdmin", db: "buzzguru_master" }, { role: "dbAdmin", db: "buzzguru_master" } ] }, $client: { driver: { name: "mongo-go-driver", version: "v1.1.1+prerelease" }, os: { type: "linux", architecture: "amd64" }, platform: "go1.13", mongos: { host: "mongo4:27000", client: "172.18.0.7:57022", version: "4.2.0" } }, $configServerState: { opTime: { ts: Timestamp(1569506057, 10129), t: 1 } }, $db: "buzzguru_master" } numYields:0 ok:0 errMsg:"migration commit in progress for buzzguru_master.video" errName:StaleConfig errCode:13388 reslen:556 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 1 } }, ReplicationStateTransition: { acquireCount: { w: 2 } }, Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 383747 } }, Mutex: { acquireCount: { r: 3 } } } protocol:op_msg 456ms 2019-09-26T15:54:18.155+0200 I COMMAND [conn410388] command buzzguru_master.video command: find { find: "video", filter: { videoId: "ZX4fUv4-wFQ" }, limit: 1, runtimeConstants: { localNow: new Date(1569506057779), clusterTime: Timestamp(1569506057, 10278) }, shardVersion: [ Timestamp(2296, 1), ObjectId('5d8b790a355ec980dea6c7dc') ], lsid: { id: UUID("6ffb6f58-e77a-40a1-8c92-71a9174a5900"), uid: BinData(0, E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855) }, $clusterTime: { clusterTime: Timestamp(1569506057, 10278), signature: { hash: BinData(0, 37F216C20481B671BDCB062D7EA37A47A24CE457), keyId: 6733616200154087441 } }, $audit: { $impersonatedUsers: [ { user: "buzzguru_master", db: "buzzguru_master" } ], $impersonatedRoles: [ { role: "read", db: "buzzguru_master" }, { role: "dbOwner", db: "buzzguru_master" }, { role: "enableSharding", db: "buzzguru_master" }, { role: "readWrite", db: "buzzguru_master" }, { role: "userAdmin", db: "buzzguru_master" }, { role: "dbAdmin", db: "buzzguru_master" } ] }, $client: { driver: { name: "mongo-go-driver", version: "v1.1.1+prerelease" }, os: { type: "linux", architecture: "amd64" }, platform: "go1.13", mongos: { host: "mongo4:27000", client: "172.18.0.7:56956", version: "4.2.0" } }, $configServerState: { opTime: { ts: Timestamp(1569506057, 10129), t: 1 } }, $db: "buzzguru_master" } numYields:0 ok:0 errMsg:"migration commit in progress for buzzguru_master.video" errName:StaleConfig errCode:13388 reslen:556 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 1 } }, ReplicationStateTransition: { acquireCount: { w: 2 } }, Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 302569 } }, Mutex: { acquireCount: { r: 3 } } } protocol:op_msg 375ms 2019-09-26T15:54:18.154+0200 I COMMAND [conn410490] command buzzguru_master.video command: find { find: "video", filter: { videoId: "DepkTEcH-ow" }, limit: 1, runtimeConstants: { localNow: new Date(1569506057886), clusterTime: Timestamp(1569506057, 10341) }, shardVersion: [ Timestamp(2296, 1), ObjectId('5d8b790a355ec980dea6c7dc') ], lsid: { id: UUID("41cc596e-c57a-4ec5-9679-cb83e5b79295"), uid: BinData(0, E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855) }, $clusterTime: { clusterTime: Timestamp(1569506057, 10341), signature: { hash: BinData(0, 37F216C20481B671BDCB062D7EA37A47A24CE457), keyId: 6733616200154087441 } }, $audit: { $impersonatedUsers: [ { user: "buzzguru_master", db: "buzzguru_master" } ], $impersonatedRoles: [ { role: "read", db: "buzzguru_master" }, { role: "dbOwner", db: "buzzguru_master" }, { role: "enableSharding", db: "buzzguru_master" }, { role: "readWrite", db: "buzzguru_master" }, { role: "userAdmin", db: "buzzguru_master" }, { role: "dbAdmin", db: "buzzguru_master" } ] }, $client: { driver: { name: "mongo-go-driver", version: "v1.1.1+prerelease" }, os: { type: "linux", architecture: "amd64" }, platform: "go1.13", mongos: { host: "mongo4:27000", client: "172.18.0.7:56968", version: "4.2.0" } }, $configServerState: { opTime: { ts: Timestamp(1569506057, 10129), t: 1 } }, $db: "buzzguru_master" } numYields:0 ok:0 errMsg:"migration commit in progress for buzzguru_master.video" errName:StaleConfig errCode:13388 reslen:536 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 1 } }, ReplicationStateTransition: { acquireCount: { w: 2 } }, Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 194255 } }, Mutex: { acquireCount: { r: 3 } } } protocol:op_msg 266ms 2019-09-26T15:54:18.154+0200 I COMMAND [conn410478] command buzzguru_master.video command: find { find: "video", filter: { videoId: "yEpddMxNSVA" }, limit: 1, runtimeConstants: { localNow: new Date(1569506057875), clusterTime: Timestamp(1569506057, 10334) }, shardVersion: [ Timestamp(2296, 1), ObjectId('5d8b790a355ec980dea6c7dc') ], lsid: { id: UUID("ca34d240-3c04-4d2f-8ffc-96e7a2560462"), uid: BinData(0, E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855) }, $clusterTime: { clusterTime: Timestamp(1569506057, 10335), signature: { hash: BinData(0, 37F216C20481B671BDCB062D7EA37A47A24CE457), keyId: 6733616200154087441 } }, $audit: { $impersonatedUsers: [ { user: "buzzguru_master", db: "buzzguru_master" } ], $impersonatedRoles: [ { role: "read", db: "buzzguru_master" }, { role: "dbOwner", db: "buzzguru_master" }, { role: "enableSharding", db: "buzzguru_master" }, { role: "readWrite", db: "buzzguru_master" }, { role: "userAdmin", db: "buzzguru_master" }, { role: "dbAdmin", db: "buzzguru_master" } ] }, $client: { driver: { name: "mongo-go-driver", version: "v1.1.1+prerelease" }, os: { type: "linux", architecture: "amd64" }, platform: "go1.13", mongos: { host: "mongo4:27000", client: "172.18.0.7:57032", version: "4.2.0" } }, $configServerState: { opTime: { ts: Timestamp(1569506057, 10129), t: 1 } }, $db: "buzzguru_master" } numYields:0 ok:0 errMsg:"migration commit in progress for buzzguru_master.video" errName:StaleConfig errCode:13388 reslen:536 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 1 } }, ReplicationStateTransition: { acquireCount: { w: 2 } }, Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 206570 } }, Mutex: { acquireCount: { r: 3 } } } protocol:op_msg 279ms 2019-09-26T15:54:18.155+0200 I COMMAND [conn410483] command buzzguru_master.video command: find { find: "video", filter: { videoId: "b35Res08iI4" }, limit: 1, runtimeConstants: { localNow: new Date(1569506057880), clusterTime: Timestamp(1569506057, 10337) }, shardVersion: [ Timestamp(2296, 1), ObjectId('5d8b790a355ec980dea6c7dc') ], lsid: { id: UUID("db879418-323e-482c-a5ba-7ef8bccefe85"), uid: BinData(0, E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855) }, $clusterTime: { clusterTime: Timestamp(1569506057, 10337), signature: { hash: BinData(0, 37F216C20481B671BDCB062D7EA37A47A24CE457), keyId: 6733616200154087441 } }, $audit: { $impersonatedUsers: [ { user: "buzzguru_master", db: "buzzguru_master" } ], $impersonatedRoles: [ { role: "read", db: "buzzguru_master" }, { role: "dbOwner", db: "buzzguru_master" }, { role: "enableSharding", db: "buzzguru_master" }, { role: "readWrite", db: "buzzguru_master" }, { role: "userAdmin", db: "buzzguru_master" }, { role: "dbAdmin", db: "buzzguru_master" } ] }, $client: { driver: { name: "mongo-go-driver", version: "v1.1.1+prerelease" }, os: { type: "linux", architecture: "amd64" }, platform: "go1.13", mongos: { host: "mongo4:27000", client: "172.18.0.7:57016", version: "4.2.0" } }, $configServerState: { opTime: { ts: Timestamp(1569506057, 10129), t: 1 } }, $db: "buzzguru_master" } numYields:0 ok:0 errMsg:"migration commit in progress for buzzguru_master.video" errName:StaleConfig errCode:13388 reslen:536 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 1 } }, ReplicationStateTransition: { acquireCount: { w: 2 } }, Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 205579 } }, Mutex: { acquireCount: { r: 3 } } } protocol:op_msg 274ms 2019-09-26T15:54:18.155+0200 I COMMAND [conn410403] command buzzguru_master.video command: find { find: "video", filter: { videoId: "sWbz9dLcZTs" }, limit: 1, runtimeConstants: { localNow: new Date(1569506057742), clusterTime: Timestamp(1569506057, 10269) }, shardVersion: [ Timestamp(2296, 1), ObjectId('5d8b790a355ec980dea6c7dc') ], lsid: { id: UUID("aad3a72e-227f-4f80-b151-4b896aab33a5"), uid: BinData(0, E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855) }, $clusterTime: { clusterTime: Timestamp(1569506057, 10269), signature: { hash: BinData(0, 37F216C20481B671BDCB062D7EA37A47A24CE457), keyId: 6733616200154087441 } }, $audit: { $impersonatedUsers: [ { user: "buzzguru_master", db: "buzzguru_master" } ], $impersonatedRoles: [ { role: "read", db: "buzzguru_master" }, { role: "dbOwner", db: "buzzguru_master" }, { role: "enableSharding", db: "buzzguru_master" }, { role: "readWrite", db: "buzzguru_master" }, { role: "userAdmin", db: "buzzguru_master" }, { role: "dbAdmin", db: "buzzguru_master" } ] }, $client: { driver: { name: "mongo-go-driver", version: "v1.1.1+prerelease" }, os: { type: "linux", architecture: "amd64" }, platform: "go1.13", mongos: { host: "mongo4:27000", client: "172.18.0.7:56970", version: "4.2.0" } }, $configServerState: { opTime: { ts: Timestamp(1569506057, 10129), t: 1 } }, $db: "buzzguru_master" } numYields:0 ok:0 errMsg:"migration commit in progress for buzzguru_master.video" errName:StaleConfig errCode:13388 reslen:556 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 1 } }, ReplicationStateTransition: { acquireCount: { w: 2 } }, Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 340057 } }, Mutex: { acquireCount: { r: 3 } } } protocol:op_msg 413ms 2019-09-26T15:54:18.155+0200 I COMMAND [conn410485] command buzzguru_master.video command: find { find: "video", filter: { videoId: "j6nd1ly8KRs" }, limit: 1, runtimeConstants: { localNow: new Date(1569506057881), clusterTime: Timestamp(1569506057, 10338) }, shardVersion: [ Timestamp(2296, 1), ObjectId('5d8b790a355ec980dea6c7dc') ], lsid: { id: UUID("d15547d7-0411-4178-ad0a-0ce43ce2ae47"), uid: BinData(0, E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855) }, $clusterTime: { clusterTime: Timestamp(1569506057, 10338), signature: { hash: BinData(0, 37F216C20481B671BDCB062D7EA37A47A24CE457), keyId: 6733616200154087441 } }, $audit: { $impersonatedUsers: [ { user: "buzzguru_master", db: "buzzguru_master" } ], $impersonatedRoles: [ { role: "read", db: "buzzguru_master" }, { role: "dbOwner", db: "buzzguru_master" }, { role: "enableSharding", db: "buzzguru_master" }, { role: "readWrite", db: "buzzguru_master" }, { role: "userAdmin", db: "buzzguru_master" }, { role: "dbAdmin", db: "buzzguru_master" } ] }, $client: { driver: { name: "mongo-go-driver", version: "v1.1.1+prerelease" }, os: { type: "linux", architecture: "amd64" }, platform: "go1.13", mongos: { host: "mongo4:27000", client: "172.18.0.7:57042", version: "4.2.0" } }, $configServerState: { opTime: { ts: Timestamp(1569506057, 10129), t: 1 } }, $db: "buzzguru_master" } numYields:0 ok:0 errMsg:"migration commit in progress for buzzguru_master.video" errName:StaleConfig errCode:13388 reslen:536 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 1 } }, ReplicationStateTransition: { acquireCount: { w: 2 } }, Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 204331 } }, Mutex: { acquireCount: { r: 3 } } } protocol:op_msg 273ms 2019-09-26T15:54:18.155+0200 I COMMAND [conn410476] command buzzguru_master.video command: find { find: "video", filter: { videoId: "Gz3T0YbO6dw" }, limit: 1, runtimeConstants: { localNow: new Date(1569506057862), clusterTime: Timestamp(1569506057, 10319) }, shardVersion: [ Timestamp(2296, 1), ObjectId('5d8b790a355ec980dea6c7dc') ], lsid: { id: UUID("337dd82c-d490-45f7-9a0d-73269c48d59f"), uid: BinData(0, E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855) }, $clusterTime: { clusterTime: Timestamp(1569506057, 10320), signature: { hash: BinData(0, 37F216C20481B671BDCB062D7EA37A47A24CE457), keyId: 6733616200154087441 } }, $audit: { $impersonatedUsers: [ { user: "buzzguru_master", db: "buzzguru_master" } ], $impersonatedRoles: [ { role: "read", db: "buzzguru_master" }, { role: "dbOwner", db: "buzzguru_master" }, { role: "enableSharding", db: "buzzguru_master" }, { role: "readWrite", db: "buzzguru_master" }, { role: "userAdmin", db: "buzzguru_master" }, { role: "dbAdmin", db: "buzzguru_master" } ] }, $client: { driver: { name: "mongo-go-driver", version: "v1.1.1+prerelease" }, os: { type: "linux", architecture: "amd64" }, platform: "go1.13", mongos: { host: "mongo4:27000", client: "172.18.0.7:57176", version: "4.2.0" } }, $configServerState: { opTime: { ts: Timestamp(1569506057, 10129), t: 1 } }, $db: "buzzguru_master" } numYields:0 ok:0 errMsg:"migration commit in progress for buzzguru_master.video" errName:StaleConfig errCode:13388 reslen:536 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 1 } }, ReplicationStateTransition: { acquireCount: { w: 2 } }, Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 223327 } }, Mutex: { acquireCount: { r: 3 } } } protocol:op_msg 292ms 2019-09-26T15:54:18.161+0200 I SHARDING [conn380117] Deletion of buzzguru_master.video range [{ _id: ObjectId('5d02904069bd5bea0a2c3b4f') }, { _id: ObjectId('5d02947869bd5bea0a5b93e3') }) will be scheduled after all possibly dependent queries finish 2019-09-26T15:54:18.161+0200 I SHARDING [conn380117] Leaving cleanup of buzzguru_master.video range [{ _id: ObjectId('5d02904069bd5bea0a2c3b4f') }, { _id: ObjectId('5d02947869bd5bea0a5b93e3') }) to complete in background 2019-09-26T15:54:18.161+0200 I SHARDING [conn380117] about to log metadata event into changelog: { _id: "m4.buzz.guru:27018-2019-09-26T15:54:18.161+0200-5d8cc30a11441b5e57daf87e", server: "m4.buzz.guru:27018", shard: "rs1", clientAddr: "116.202.13.30:14248", time: new Date(1569506058161), what: "moveChunk.from", ns: "buzzguru_master.video", details: { min: { _id: ObjectId('5d02904069bd5bea0a2c3b4f') }, max: { _id: ObjectId('5d02947869bd5bea0a5b93e3') }, step 1 of 6: 0, step 2 of 6: 6, step 3 of 6: 1636, step 4 of 6: 3821, step 5 of 6: 2854, step 6 of 6: 579, to: "rs0", from: "rs1", note: "success" } } 2019-09-26T15:54:18.168+0200 I COMMAND [conn380117] command admin.$cmd command: moveChunk { moveChunk: "buzzguru_master.video", shardVersion: [ Timestamp(2296, 1), ObjectId('5d8b790a355ec980dea6c7dc') ], epoch: ObjectId('5d8b790a355ec980dea6c7dc'), configdb: "config/arbiter1.buzz.guru:27019,arbiter2.buzz.guru:27019,arbiter3.buzz.guru:27019", fromShard: "rs1", toShard: "rs0", min: { _id: ObjectId('5d02904069bd5bea0a2c3b4f') }, max: { _id: ObjectId('5d02947869bd5bea0a5b93e3') }, maxChunkSizeBytes: 67108864, waitForDelete: false, takeDistLock: false, $clusterTime: { clusterTime: Timestamp(1569506049, 282), signature: { hash: BinData(0, 9EA44DF6A91D63CD758F8B52067E005A926CD823), keyId: 6733616200154087441 } }, $configServerState: { opTime: { ts: Timestamp(1569506049, 282), t: 1 } }, $db: "admin" } numYields:282 reslen:333 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 197 } }, ReplicationStateTransition: { acquireCount: { w: 14105 } }, Global: { acquireCount: { r: 14095, w: 10 } }, Database: { acquireCount: { r: 295, w: 8, W: 2 } }, Collection: { acquireCount: { r: 293, w: 4, W: 3 }, acquireWaitCount: { W: 3 }, timeAcquiringMicros: { W: 616033 } }, Mutex: { acquireCount: { r: 34, W: 5 } }, oplog: { acquireCount: { r: 2, w: 1 } } } flowControl:{ acquireCount: 8 } storage:{ data: { bytesRead: 213792, bytesWritten: 79543, timeReadingMicros: 516, timeWritingMicros: 128 }, timeWaitingMicros: { cache: 26073 } } protocol:op_msg 8906ms 2019-09-26T15:54:19.325+0200 I SHARDING [conn380117] Starting chunk migration ns: buzzguru_master.video, [{ _id: ObjectId('5d02947869bd5bea0a5b93e3') }, { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }), fromShard: rs1, toShard: rs2 with expected collection version epoch 5d8b790a355ec980dea6c7dc 2019-09-26T15:54:19.331+0200 I SHARDING [conn380117] about to log metadata event into changelog: { _id: "m4.buzz.guru:27018-2019-09-26T15:54:19.331+0200-5d8cc30b11441b5e57db03fe", server: "m4.buzz.guru:27018", shard: "rs1", clientAddr: "116.202.13.30:14248", time: new Date(1569506059331), what: "moveChunk.start", ns: "buzzguru_master.video", details: { min: { _id: ObjectId('5d02947869bd5bea0a5b93e3') }, max: { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, from: "rs1", to: "rs2" } } 2019-09-26T15:54:20.579+0200 I SHARDING [Collection-Range-Deleter] No documents remain to delete in buzzguru_master.video range [{ _id: ObjectId('5caa4973f2e034eb827c444a') }, { _id: ObjectId('5cab85eee165ebba0d8b541e') }) 2019-09-26T15:54:20.579+0200 I SHARDING [Collection-Range-Deleter] Waiting for majority replication of local deletions in buzzguru_master.video range [{ _id: ObjectId('5caa4973f2e034eb827c444a') }, { _id: ObjectId('5cab85eee165ebba0d8b541e') }) 2019-09-26T15:54:23.869+0200 I SHARDING [Collection-Range-Deleter] Finished deleting documents in buzzguru_master.video range [{ _id: ObjectId('5caa4973f2e034eb827c444a') }, { _id: ObjectId('5cab85eee165ebba0d8b541e') }) 2019-09-26T15:54:25.208+0200 I SHARDING [conn380117] moveChunk data transfer progress: { waited: true, active: true, sessionId: "rs1_rs2_5d8cc30b11441b5e57db041b", ns: "buzzguru_master.video", from: "rs1/m3.buzz.guru:27018,m4.buzz.guru:27018", fromShardId: "rs1", min: { _id: ObjectId('5d02947869bd5bea0a5b93e3') }, max: { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, shardKeyPattern: { _id: 1.0 }, state: "clone", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0, $gleStats: { lastOpTime: { ts: Timestamp(1569506064, 574), t: 1 }, electionId: ObjectId('7fffffff0000000000000001') }, lastCommittedOpTime: Timestamp(1569506065, 1667), $configServerState: { opTime: { ts: Timestamp(1569506063, 1), t: 1 } }, $clusterTime: { clusterTime: Timestamp(1569506065, 1801), signature: { hash: BinData(0, 0000000000000000000000000000000000000000), keyId: 0 } }, operationTime: Timestamp(1569506065, 1737) } mem used: 0 documents remaining to clone: 0 2019-09-26T15:54:26.209+0200 I SHARDING [conn380117] moveChunk data transfer progress: { waited: true, active: true, sessionId: "rs1_rs2_5d8cc30b11441b5e57db041b", ns: "buzzguru_master.video", from: "rs1/m3.buzz.guru:27018,m4.buzz.guru:27018", fromShardId: "rs1", min: { _id: ObjectId('5d02947869bd5bea0a5b93e3') }, max: { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, shardKeyPattern: { _id: 1.0 }, state: "clone", counts: { cloned: 10285, clonedBytes: 16713415, catchup: 0, steady: 0 }, ok: 1.0, $gleStats: { lastOpTime: { ts: Timestamp(1569506064, 574), t: 1 }, electionId: ObjectId('7fffffff0000000000000001') }, lastCommittedOpTime: Timestamp(1569506066, 1854), $configServerState: { opTime: { ts: Timestamp(1569506063, 1), t: 1 } }, $clusterTime: { clusterTime: Timestamp(1569506066, 1987), signature: { hash: BinData(0, 0000000000000000000000000000000000000000), keyId: 0 } }, operationTime: Timestamp(1569506066, 1922) } mem used: 0 documents remaining to clone: 0 2019-09-26T15:54:27.210+0200 I SHARDING [conn380117] moveChunk data transfer progress: { waited: true, active: true, sessionId: "rs1_rs2_5d8cc30b11441b5e57db041b", ns: "buzzguru_master.video", from: "rs1/m3.buzz.guru:27018,m4.buzz.guru:27018", fromShardId: "rs1", min: { _id: ObjectId('5d02947869bd5bea0a5b93e3') }, max: { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, shardKeyPattern: { _id: 1.0 }, state: "clone", counts: { cloned: 10285, clonedBytes: 16713415, catchup: 0, steady: 0 }, ok: 1.0, $gleStats: { lastOpTime: { ts: Timestamp(1569506064, 574), t: 1 }, electionId: ObjectId('7fffffff0000000000000001') }, lastCommittedOpTime: Timestamp(1569506067, 1193), $configServerState: { opTime: { ts: Timestamp(1569506066, 8404), t: 1 } }, $clusterTime: { clusterTime: Timestamp(1569506067, 1325), signature: { hash: BinData(0, 0000000000000000000000000000000000000000), keyId: 0 } }, operationTime: Timestamp(1569506067, 1325) } mem used: 0 documents remaining to clone: 0 2019-09-26T15:54:27.551+0200 I SHARDING [conn380117] moveChunk data transfer progress: { waited: true, active: true, sessionId: "rs1_rs2_5d8cc30b11441b5e57db041b", ns: "buzzguru_master.video", from: "rs1/m3.buzz.guru:27018,m4.buzz.guru:27018", fromShardId: "rs1", min: { _id: ObjectId('5d02947869bd5bea0a5b93e3') }, max: { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, shardKeyPattern: { _id: 1.0 }, state: "steady", counts: { cloned: 22505, clonedBytes: 35904603, catchup: 0, steady: 0 }, ok: 1.0, $gleStats: { lastOpTime: { ts: Timestamp(1569506064, 574), t: 1 }, electionId: ObjectId('7fffffff0000000000000001') }, lastCommittedOpTime: Timestamp(1569506067, 4029), $configServerState: { opTime: { ts: Timestamp(1569506066, 8404), t: 1 } }, $clusterTime: { clusterTime: Timestamp(1569506067, 4114), signature: { hash: BinData(0, 0000000000000000000000000000000000000000), keyId: 0 } }, operationTime: Timestamp(1569506067, 4114) } mem used: 0 documents remaining to clone: 0 2019-09-26T15:54:30.708+0200 I SHARDING [conn380117] Migration successfully entered critical section 2019-09-26T15:54:30.709+0200 I COMMAND [conn410378] command admin.$cmd command: _getNextSessionMods { _getNextSessionMods: 1, sessionId: "rs1_rs2_5d8cc30b11441b5e57db041b", $clusterTime: { clusterTime: Timestamp(1569506064, 631), signature: { hash: BinData(0, 0FE544CAD57A46493012D2AD69DA842631BDB58B), keyId: 6733616200154087441 } }, $configServerState: { opTime: { ts: Timestamp(1569506063, 1), t: 1 } }, $db: "admin" } numYields:0 reslen:344 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 1 } }, ReplicationStateTransition: { acquireCount: { w: 1 } }, Global: { acquireCount: { r: 1 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } }, Mutex: { acquireCount: { r: 2 } } } protocol:op_msg 6500ms 2019-09-26T15:54:30.744+0200 I SH_REFR [ConfigServerCatalogCacheLoader-257] Refresh for collection buzzguru_master.video from version 2297|1||5d8b790a355ec980dea6c7dc to version 2298|1||5d8b790a355ec980dea6c7dc took 7 ms 2019-09-26T15:54:30.799+0200 I SHARDING [conn380117] Updating metadata for collection buzzguru_master.video from collection version: 2297|1||5d8b790a355ec980dea6c7dc, shard version: 2297|1||5d8b790a355ec980dea6c7dc to collection version: 2298|1||5d8b790a355ec980dea6c7dc, shard version: 2298|1||5d8b790a355ec980dea6c7dc due to version change 2019-09-26T15:54:30.802+0200 I SHARDING [conn380117] Migration succeeded and updated collection version to 2298|1||5d8b790a355ec980dea6c7dc 2019-09-26T15:54:30.802+0200 I SHARDING [conn380117] about to log metadata event into changelog: { _id: "m4.buzz.guru:27018-2019-09-26T15:54:30.802+0200-5d8cc31611441b5e57db7587", server: "m4.buzz.guru:27018", shard: "rs1", clientAddr: "116.202.13.30:14248", time: new Date(1569506070802), what: "moveChunk.commit", ns: "buzzguru_master.video", details: { min: { _id: ObjectId('5d02947869bd5bea0a5b93e3') }, max: { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, from: "rs1", to: "rs2", counts: { cloned: 22505, clonedBytes: 35904603, catchup: 0, steady: 0 } } } 2019-09-26T15:54:30.807+0200 I SHARDING [conn380117] Deletion of buzzguru_master.video range [{ _id: ObjectId('5d02947869bd5bea0a5b93e3') }, { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }) will be scheduled after all possibly dependent queries finish 2019-09-26T15:54:30.808+0200 I SHARDING [conn380117] Leaving cleanup of buzzguru_master.video range [{ _id: ObjectId('5d02947869bd5bea0a5b93e3') }, { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }) to complete in background 2019-09-26T15:54:30.808+0200 I SHARDING [conn380117] about to log metadata event into changelog: { _id: "m4.buzz.guru:27018-2019-09-26T15:54:30.808+0200-5d8cc31611441b5e57db759f", server: "m4.buzz.guru:27018", shard: "rs1", clientAddr: "116.202.13.30:14248", time: new Date(1569506070808), what: "moveChunk.from", ns: "buzzguru_master.video", details: { min: { _id: ObjectId('5d02947869bd5bea0a5b93e3') }, max: { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, step 1 of 6: 0, step 2 of 6: 5, step 3 of 6: 4875, step 4 of 6: 3344, step 5 of 6: 3175, step 6 of 6: 80, to: "rs2", from: "rs1", note: "success" } } 2019-09-26T15:54:30.813+0200 I COMMAND [conn380117] command admin.$cmd command: moveChunk { moveChunk: "buzzguru_master.video", shardVersion: [ Timestamp(2297, 1), ObjectId('5d8b790a355ec980dea6c7dc') ], epoch: ObjectId('5d8b790a355ec980dea6c7dc'), configdb: "config/arbiter1.buzz.guru:27019,arbiter2.buzz.guru:27019,arbiter3.buzz.guru:27019", fromShard: "rs1", toShard: "rs2", min: { _id: ObjectId('5d02947869bd5bea0a5b93e3') }, max: { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, maxChunkSizeBytes: 67108864, waitForDelete: false, takeDistLock: false, $clusterTime: { clusterTime: Timestamp(1569506059, 351), signature: { hash: BinData(0, 98C8AFAF24B678CA967A5027E34ACF067CDC2B3C), keyId: 6733616200154087441 } }, $configServerState: { opTime: { ts: Timestamp(1569506059, 351), t: 1 } }, $db: "admin" } numYields:282 reslen:333 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 197 } }, ReplicationStateTransition: { acquireCount: { w: 14179 } }, Global: { acquireCount: { r: 14169, w: 10 } }, Database: { acquireCount: { r: 295, w: 8, W: 2 } }, Collection: { acquireCount: { r: 293, w: 4, W: 3 }, acquireWaitCount: { W: 3 }, timeAcquiringMicros: { W: 113185 } }, Mutex: { acquireCount: { r: 34, W: 5 } }, oplog: { acquireCount: { r: 2, w: 1 } } } flowControl:{ acquireCount: 8 } storage:{ data: { bytesRead: 221115, bytesWritten: 22978, timeReadingMicros: 437, timeWritingMicros: 42 }, timeWaitingMicros: { cache: 3616 } } protocol:op_msg 11489ms 2019-09-26T15:54:31.953+0200 I SHARDING [conn380117] Starting chunk migration ns: buzzguru_master.video, [{ _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, { _id: ObjectId('5d02a85169bd5bea0a7c9d4c') }), fromShard: rs1, toShard: rs0 with expected collection version epoch 5d8b790a355ec980dea6c7dc 2019-09-26T15:54:31.959+0200 I SHARDING [conn380117] about to log metadata event into changelog: { _id: "m4.buzz.guru:27018-2019-09-26T15:54:31.959+0200-5d8cc31711441b5e57db814f", server: "m4.buzz.guru:27018", shard: "rs1", clientAddr: "116.202.13.30:14248", time: new Date(1569506071959), what: "moveChunk.start", ns: "buzzguru_master.video", details: { min: { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, max: { _id: ObjectId('5d02a85169bd5bea0a7c9d4c') }, from: "rs1", to: "rs0" } } 2019-09-26T15:54:35.741+0200 I NETWORK [listener] connection accepted from 116.202.13.30:40476 #410518 (148 connections now open) 2019-09-26T15:54:35.741+0200 I NETWORK [conn410518] received client metadata from 116.202.13.30:40476 conn410518: { driver: { name: "MongoDB Internal Client", version: "4.2.0" }, os: { type: "Linux", name: "CentOS Linux release 7.6.1810 (Core) ", architecture: "x86_64", version: "Kernel 4.19.43-300.el7.x86_64" } } 2019-09-26T15:54:35.742+0200 I NETWORK [conn410518] end connection 116.202.13.30:40476 (147 connections now open) 2019-09-26T15:54:36.646+0200 I SHARDING [Collection-Range-Deleter] No documents remain to delete in buzzguru_master.video range [{ _id: ObjectId('5cab85eee165ebba0d8b541e') }, { _id: ObjectId('5cb4ab1a8cd87f60268bcfc9') }) 2019-09-26T15:54:36.646+0200 I SHARDING [Collection-Range-Deleter] Waiting for majority replication of local deletions in buzzguru_master.video range [{ _id: ObjectId('5cab85eee165ebba0d8b541e') }, { _id: ObjectId('5cb4ab1a8cd87f60268bcfc9') }) 2019-09-26T15:54:39.392+0200 I COMMAND [conn407240] command admin.$cmd command: _migrateClone { _migrateClone: "buzzguru_master.video", sessionId: "rs1_rs0_5d8cc31711441b5e57db8160", $clusterTime: { clusterTime: Timestamp(1569506079, 184), signature: { hash: BinData(0, 73C09D7FDB53FC7BF2B3347A7A0714769BB5E4B9), keyId: 6733616200154087441 } }, $configServerState: { opTime: { ts: Timestamp(1569506078, 616), t: 1 } }, $db: "admin" } numYields:0 reslen:16775272 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 86 } }, ReplicationStateTransition: { acquireCount: { w: 86 } }, Global: { acquireCount: { r: 86 } }, Database: { acquireCount: { r: 86 } }, Collection: { acquireCount: { r: 86 } }, Mutex: { acquireCount: { r: 172 } } } storage:{ data: { bytesRead: 14076399, timeReadingMicros: 102556 } } protocol:op_msg 136ms 2019-09-26T15:54:40.035+0200 I SHARDING [conn380117] moveChunk data transfer progress: { waited: true, active: true, sessionId: "rs1_rs0_5d8cc31711441b5e57db8160", ns: "buzzguru_master.video", from: "rs1/m3.buzz.guru:27018,m4.buzz.guru:27018", fromShardId: "rs1", min: { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, max: { _id: ObjectId('5d02a85169bd5bea0a7c9d4c') }, shardKeyPattern: { _id: 1.0 }, state: "clone", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0, $gleStats: { lastOpTime: { ts: Timestamp(1569506079, 31), t: 1 }, electionId: ObjectId('7fffffff0000000000000001') }, lastCommittedOpTime: Timestamp(1569506080, 215), $configServerState: { opTime: { ts: Timestamp(1569506078, 616), t: 1 } }, $clusterTime: { clusterTime: Timestamp(1569506080, 285), signature: { hash: BinData(0, 0000000000000000000000000000000000000000), keyId: 0 } }, operationTime: Timestamp(1569506080, 285) } mem used: 0 documents remaining to clone: 0 2019-09-26T15:54:41.036+0200 I SHARDING [conn380117] moveChunk data transfer progress: { waited: true, active: true, sessionId: "rs1_rs0_5d8cc31711441b5e57db8160", ns: "buzzguru_master.video", from: "rs1/m3.buzz.guru:27018,m4.buzz.guru:27018", fromShardId: "rs1", min: { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, max: { _id: ObjectId('5d02a85169bd5bea0a7c9d4c') }, shardKeyPattern: { _id: 1.0 }, state: "clone", counts: { cloned: 10957, clonedBytes: 16709631, catchup: 0, steady: 0 }, ok: 1.0, $gleStats: { lastOpTime: { ts: Timestamp(1569506079, 31), t: 1 }, electionId: ObjectId('7fffffff0000000000000001') }, lastCommittedOpTime: Timestamp(1569506081, 144), $configServerState: { opTime: { ts: Timestamp(1569506078, 616), t: 1 } }, $clusterTime: { clusterTime: Timestamp(1569506081, 210), signature: { hash: BinData(0, 0000000000000000000000000000000000000000), keyId: 0 } }, operationTime: Timestamp(1569506081, 210) } mem used: 0 documents remaining to clone: 0 2019-09-26T15:54:42.037+0200 I SHARDING [conn380117] moveChunk data transfer progress: { waited: true, active: true, sessionId: "rs1_rs0_5d8cc31711441b5e57db8160", ns: "buzzguru_master.video", from: "rs1/m3.buzz.guru:27018,m4.buzz.guru:27018", fromShardId: "rs1", min: { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, max: { _id: ObjectId('5d02a85169bd5bea0a7c9d4c') }, shardKeyPattern: { _id: 1.0 }, state: "clone", counts: { cloned: 10957, clonedBytes: 16709631, catchup: 0, steady: 0 }, ok: 1.0, $gleStats: { lastOpTime: { ts: Timestamp(1569506079, 31), t: 1 }, electionId: ObjectId('7fffffff0000000000000001') }, lastCommittedOpTime: Timestamp(1569506082, 207), $configServerState: { opTime: { ts: Timestamp(1569506078, 616), t: 1 } }, $clusterTime: { clusterTime: Timestamp(1569506082, 341), signature: { hash: BinData(0, 0000000000000000000000000000000000000000), keyId: 0 } }, operationTime: Timestamp(1569506082, 341) } mem used: 0 documents remaining to clone: 0 2019-09-26T15:54:42.345+0200 I SHARDING [Collection-Range-Deleter] Finished deleting documents in buzzguru_master.video range [{ _id: ObjectId('5cab85eee165ebba0d8b541e') }, { _id: ObjectId('5cb4ab1a8cd87f60268bcfc9') }) 2019-09-26T15:54:42.602+0200 I SHARDING [conn380117] moveChunk data transfer progress: { waited: true, active: true, sessionId: "rs1_rs0_5d8cc31711441b5e57db8160", ns: "buzzguru_master.video", from: "rs1/m3.buzz.guru:27018,m4.buzz.guru:27018", fromShardId: "rs1", min: { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, max: { _id: ObjectId('5d02a85169bd5bea0a7c9d4c') }, shardKeyPattern: { _id: 1.0 }, state: "steady", counts: { cloned: 22505, clonedBytes: 34588400, catchup: 0, steady: 0 }, ok: 1.0, $gleStats: { lastOpTime: { ts: Timestamp(1569506079, 31), t: 1 }, electionId: ObjectId('7fffffff0000000000000001') }, lastCommittedOpTime: Timestamp(1569506082, 5630), $configServerState: { opTime: { ts: Timestamp(1569506078, 616), t: 1 } }, $clusterTime: { clusterTime: Timestamp(1569506082, 5710), signature: { hash: BinData(0, 0000000000000000000000000000000000000000), keyId: 0 } }, operationTime: Timestamp(1569506082, 5710) } mem used: 0 documents remaining to clone: 0 2019-09-26T15:54:43.095+0200 I SHARDING [conn380117] Migration successfully entered critical section 2019-09-26T15:54:43.096+0200 I COMMAND [conn406962] command admin.$cmd command: _getNextSessionMods { _getNextSessionMods: 1, sessionId: "rs1_rs0_5d8cc31711441b5e57db8160", $clusterTime: { clusterTime: Timestamp(1569506079, 43), signature: { hash: BinData(0, 73C09D7FDB53FC7BF2B3347A7A0714769BB5E4B9), keyId: 6733616200154087441 } }, $configServerState: { opTime: { ts: Timestamp(1569506078, 616), t: 1 } }, $db: "admin" } numYields:0 reslen:344 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 1 } }, ReplicationStateTransition: { acquireCount: { w: 1 } }, Global: { acquireCount: { r: 1 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } }, Mutex: { acquireCount: { r: 2 } } } protocol:op_msg 4050ms 2019-09-26T15:54:43.127+0200 I SH_REFR [ConfigServerCatalogCacheLoader-257] Refresh for collection buzzguru_master.video from version 2298|1||5d8b790a355ec980dea6c7dc to version 2299|1||5d8b790a355ec980dea6c7dc took 6 ms 2019-09-26T15:54:43.144+0200 I SHARDING [conn380117] Updating metadata for collection buzzguru_master.video from collection version: 2298|1||5d8b790a355ec980dea6c7dc, shard version: 2298|1||5d8b790a355ec980dea6c7dc to collection version: 2299|1||5d8b790a355ec980dea6c7dc, shard version: 2299|1||5d8b790a355ec980dea6c7dc due to version change 2019-09-26T15:54:43.146+0200 I SHARDING [conn380117] Migration succeeded and updated collection version to 2299|1||5d8b790a355ec980dea6c7dc 2019-09-26T15:54:43.147+0200 I SHARDING [conn380117] about to log metadata event into changelog: { _id: "m4.buzz.guru:27018-2019-09-26T15:54:43.146+0200-5d8cc32311441b5e57dc09e4", server: "m4.buzz.guru:27018", shard: "rs1", clientAddr: "116.202.13.30:14248", time: new Date(1569506083146), what: "moveChunk.commit", ns: "buzzguru_master.video", details: { min: { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, max: { _id: ObjectId('5d02a85169bd5bea0a7c9d4c') }, from: "rs1", to: "rs0", counts: { cloned: 22505, clonedBytes: 34588400, catchup: 0, steady: 0 } } } 2019-09-26T15:54:43.155+0200 I SHARDING [conn380117] Deletion of buzzguru_master.video range [{ _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, { _id: ObjectId('5d02a85169bd5bea0a7c9d4c') }) will be scheduled after all possibly dependent queries finish 2019-09-26T15:54:43.155+0200 I SHARDING [conn380117] Leaving cleanup of buzzguru_master.video range [{ _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, { _id: ObjectId('5d02a85169bd5bea0a7c9d4c') }) to complete in background 2019-09-26T15:54:43.155+0200 I SHARDING [conn380117] about to log metadata event into changelog: { _id: "m4.buzz.guru:27018-2019-09-26T15:54:43.155+0200-5d8cc32311441b5e57dc0a05", server: "m4.buzz.guru:27018", shard: "rs1", clientAddr: "116.202.13.30:14248", time: new Date(1569506083155), what: "moveChunk.from", ns: "buzzguru_master.video", details: { min: { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, max: { _id: ObjectId('5d02a85169bd5bea0a7c9d4c') }, step 1 of 6: 0, step 2 of 6: 6, step 3 of 6: 7076, step 4 of 6: 3566, step 5 of 6: 506, step 6 of 6: 46, to: "rs0", from: "rs1", note: "success" } } 2019-09-26T15:54:43.164+0200 I COMMAND [conn380117] command admin.$cmd command: moveChunk { moveChunk: "buzzguru_master.video", shardVersion: [ Timestamp(2298, 1), ObjectId('5d8b790a355ec980dea6c7dc') ], epoch: ObjectId('5d8b790a355ec980dea6c7dc'), configdb: "config/arbiter1.buzz.guru:27019,arbiter2.buzz.guru:27019,arbiter3.buzz.guru:27019", fromShard: "rs1", toShard: "rs0", min: { _id: ObjectId('5d029cbb69bd5bea0adc23d1') }, max: { _id: ObjectId('5d02a85169bd5bea0a7c9d4c') }, maxChunkSizeBytes: 67108864, waitForDelete: false, takeDistLock: false, $clusterTime: { clusterTime: Timestamp(1569506071, 1960), signature: { hash: BinData(0, 095418897661E3B444605931DC7CB6E2B45E3DDE), keyId: 6733616200154087441 } }, $configServerState: { opTime: { ts: Timestamp(1569506071, 1960), t: 1 } }, $db: "admin" } numYields:283 reslen:333 locks:{ ParallelBatchWriterMode: { acquireCount: { r: 197 } }, ReplicationStateTransition: { acquireCount: { w: 14275 } }, Global: { acquireCount: { r: 14265, w: 10 } }, Database: { acquireCount: { r: 296, w: 8, W: 2 } }, Collection: { acquireCount: { r: 294, w: 4, W: 3 }, acquireWaitCount: { W: 3 }, timeAcquiringMicros: { W: 17463 } }, Mutex: { acquireCount: { r: 34, W: 5 } }, oplog: { acquireCount: { r: 2, w: 1 } } } flowControl:{ acquireCount: 8 } storage:{ data: { bytesRead: 232271, bytesWritten: 31171, timeReadingMicros: 469, timeWritingMicros: 61 }, timeWaitingMicros: { cache: 2080 } } protocol:op_msg 11211ms 2019-09-26T15:54:44.303+0200 I SHARDING [conn380117] Starting chunk migration ns: buzzguru_master.video, [{ _id: ObjectId('5d02a85169bd5bea0a7c9d4c') }, { _id: ObjectId('5d02c03769bd5bea0ac0b484') }), fromShard: rs1, toShard: rs0 with expected collection version epoch 5d8b790a355ec980dea6c7dc 2019-09-26T15:54:44.309+0200 I SHARDING [conn380117] about to log metadata event into changelog: { _id: "m4.buzz.guru:27018-2019-09-26T15:54:44.309+0200-5d8cc32411441b5e57dc19f7", server: "m4.buzz.guru:27018", shard: "rs1", clientAddr: "116.202.13.30:14248", time: new Date(1569506084309), what: "moveChunk.start", ns: "buzzguru_master.video", details: { min: { _id: ObjectId('5d02a85169bd5bea0a7c9d4c') }, max: { _id: ObjectId('5d02c03769bd5bea0ac0b484') }, from: "rs1", to: "rs0" } } 2019-09-26T15:54:45.758+0200 I STORAGE [WTCheckpointThread] WiredTiger message [1569506085:758275][2985:0x7fc0b4ffe700], file:index-967-1648121650949916385.wt, WT_SESSION.checkpoint: Checkpoint has been running for 438 seconds and wrote: 245000 pages (7198 MB) 2019-09-26T15:54:46.660+0200 I - [conn410395] operation was interrupted because a client disconnected 2019-09-26T15:54:46.660+0200 I SHARDING [conn410395] Queries possibly dependent on buzzguru_master.video range(s) finished; scheduling ranges for deletion 2019-09-26T15:54:46.661+0200 F - [conn410395] Invariant failure opCtx->lockState()->isDbLockedForMode(db, MODE_IS) || (db.compare("local") == 0 && opCtx->lockState()->isLocked()) src/mongo/db/catalog/database_holder_impl.cpp 74 2019-09-26T15:54:46.661+0200 F - [conn410395] ***aborting after invariant() failure 2019-09-26T15:54:46.690+0200 F - [conn410395] Got signal: 6 (Aborted). 0x55c968eccc81 0x55c968ecc47e 0x55c968ecc516 0x7fc0bef3d5d0 0x7fc0beb972c7 0x7fc0beb989b8 0x55c967406916 0x55c967c51534 0x55c967dce018 0x55c967e13f48 0x55c967e15199 0x55c967e15366 0x55c967e1543f 0x55c967e015d0 0x55c967e02096 0x55c967e03158 0x55c967e0540a 0x55c967b28799 0x55c967b1cd45 0x55c967847899 0x55c967848f53 0x55c967849e0e 0x55c96784a6a0 0x55c9678386dc 0x55c9678442cc 0x55c96783fc1f 0x55c967842e9c 0x55c968624742 0x55c96783d63d 0x55c9678408d3 0x55c96783ed07 0x55c96783fb7b 0x55c967842e9c 0x55c968624bab 0x55c968c5ec94 0x7fc0bef35dd5 0x7fc0bec5f02d ----- BEGIN BACKTRACE ----- {"backtrace":[{"b":"55C96674D000","o":"277FC81","s":"_ZN5mongo15printStackTraceERSo"},{"b":"55C96674D000","o":"277F47E"},{"b":"55C96674D000","o":"277F516"},{"b":"7FC0BEF2E000","o":"F5D0"},{"b":"7FC0BEB61000","o":"362C7","s":"gsignal"},{"b":"7FC0BEB61000","o":"379B8","s":"abort"},{"b":"55C96674D000","o":"CB9916","s":"_ZN5mongo22invariantFailedWithMsgEPKcRKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEES1_j"},{"b":"55C96674D000","o":"1504534","s":"_ZNK5mongo18DatabaseHolderImpl5getDbEPNS_16OperationContextENS_10StringDataE"},{"b":"55C96674D000","o":"1681018","s":"_ZN5mongo14MultiPlanStageC1EPNS_16OperationContextEPKNS_10CollectionEPNS_14CanonicalQueryENS0_11CachingModeE"},{"b":"55C96674D000","o":"16C6F48"},{"b":"55C96674D000","o":"16C8199","s":"_ZN5mongo11getExecutorEPNS_16OperationContextEPNS_10CollectionESt10unique_ptrINS_14CanonicalQueryESt14default_deleteIS5_EENS_12PlanExecutor11YieldPolicyEm"},{"b":"55C96674D000","o":"16C8366"},{"b":"55C96674D000","o":"16C843F","s":"_ZN5mongo15getExecutorFindEPNS_16OperationContextEPNS_10CollectionESt10unique_ptrINS_14CanonicalQueryESt14default_deleteIS5_EEbm"},{"b":"55C96674D000","o":"16B45D0"},{"b":"55C96674D000","o":"16B5096","s":"_ZN5mongo9PipelineD15prepareExecutorEPNS_16OperationContextEPNS_10CollectionERKNS_15NamespaceStringEPNS_8PipelineERKN5boost13intrusive_ptrINS_17ExpressionContextEEEbRKNSB_INS_18DocumentSourceSortEEESt10unique_ptrINS_36GroupFromFirstDocumentTransformationESt14default_deleteISL_EERKNS_11DepsTrackerERKNS_7BSONObjEPKNS_18AggregationRequestERKyPSS_S10_"},{"b":"55C96674D000","o":"16B6158","s":"_ZN5mongo9PipelineD30buildInnerQueryExecutorGenericEPNS_10CollectionERKNS_15NamespaceStringEPKNS_18AggregationRequestEPNS_8PipelineE"},{"b":"55C96674D000","o":"16B840A","s":"_ZN5mongo9PipelineD23buildInnerQueryExecutorEPNS_10CollectionERKNS_15NamespaceStringEPKNS_18AggregationRequestEPNS_8PipelineE"},{"b":"55C96674D000","o":"13DB799","s":"_ZN5mongo12runAggregateEPNS_16OperationContextERKNS_15NamespaceStringERKNS_18AggregationRequestERKNS_7BSONObjERKSt6vectorINS_9PrivilegeESaISC_EEPNS_3rpc21ReplyBuilderInterfaceE"},{"b":"55C96674D000","o":"13CFD45"},{"b":"55C96674D000","o":"10FA899"},{"b":"55C96674D000","o":"10FBF53"},{"b":"55C96674D000","o":"10FCE0E"},{"b":"55C96674D000","o":"10FD6A0","s":"_ZN5mongo23ServiceEntryPointCommon13handleRequestEPNS_16OperationContextERKNS_7MessageERKNS0_5HooksE"},{"b":"55C96674D000","o":"10EB6DC","s":"_ZN5mongo23ServiceEntryPointMongod13handleRequestEPNS_16OperationContextERKNS_7MessageE"},{"b":"55C96674D000","o":"10F72CC","s":"_ZN5mongo19ServiceStateMachine15_processMessageENS0_11ThreadGuardE"},{"b":"55C96674D000","o":"10F2C1F","s":"_ZN5mongo19ServiceStateMachine15_runNextInGuardENS0_11ThreadGuardE"},{"b":"55C96674D000","o":"10F5E9C"},{"b":"55C96674D000","o":"1ED7742","s":"_ZN5mongo9transport26ServiceExecutorSynchronous8scheduleESt8functionIFvvEENS0_15ServiceExecutor13ScheduleFlagsENS0_23ServiceExecutorTaskNameE"},{"b":"55C96674D000","o":"10F063D","s":"_ZN5mongo19ServiceStateMachine22_scheduleNextWithGuardENS0_11ThreadGuardENS_9transport15ServiceExecutor13ScheduleFlagsENS2_23ServiceExecutorTaskNameENS0_9OwnershipE"},{"b":"55C96674D000","o":"10F38D3","s":"_ZN5mongo19ServiceStateMachine15_sourceCallbackENS_6StatusE"},{"b":"55C96674D000","o":"10F1D07","s":"_ZN5mongo19ServiceStateMachine14_sourceMessageENS0_11ThreadGuardE"},{"b":"55C96674D000","o":"10F2B7B","s":"_ZN5mongo19ServiceStateMachine15_runNextInGuardENS0_11ThreadGuardE"},{"b":"55C96674D000","o":"10F5E9C"},{"b":"55C96674D000","o":"1ED7BAB"},{"b":"55C96674D000","o":"2511C94"},{"b":"7FC0BEF2E000","o":"7DD5"},{"b":"7FC0BEB61000","o":"FE02D","s":"clone"}],"processInfo":{ "mongodbVersion" : "4.2.0", "gitVersion" : "a4b751dcf51dd249c5865812b390cfd1c0129c30", "compiledModules" : [], "uname" : { "sysname" : "Linux", "release" : "4.19.43-300.el7.x86_64", "version" : "#1 SMP Fri May 17 00:06:07 UTC 2019", "machine" : "x86_64" }, "somap" : [ { "b" : "55C96674D000", "elfType" : 3, "buildId" : "E8D75D13E92279CB6AF8104353A95729FD262FAB" }, { "b" : "7FFF9F16D000", "elfType" : 3, "buildId" : "83D4E2FD2DC72D673299472CF30C61012C60FDE4" }, { "b" : "7FC0C035B000", "path" : "/lib64/libcurl.so.4", "elfType" : 3, "buildId" : "9570D81C6E0E7EE6E021640223115E827B7BCBF2" }, { "b" : "7FC0C0142000", "path" : "/lib64/libresolv.so.2", "elfType" : 3, "buildId" : "C444AE61E7CBB716FD9C18A0B46A7FE8F4FCF3E5" }, { "b" : "7FC0BFCE0000", "path" : "/lib64/libcrypto.so.10", "elfType" : 3, "buildId" : "3593FA778645A59EA272DBBB59D318C60940E792" }, { "b" : "7FC0BFA6E000", "path" : "/lib64/libssl.so.10", "elfType" : 3, "buildId" : "AEF5E6F2240B55F90E9DF76CFBB8B9D9F5286583" }, { "b" : "7FC0BF86A000", "path" : "/lib64/libdl.so.2", "elfType" : 3, "buildId" : "357693C8F1F49D93010C4E31529C07CDD2BD3D08" }, { "b" : "7FC0BF662000", "path" : "/lib64/librt.so.1", "elfType" : 3, "buildId" : "EFDE2029C9A4A20BE5B8D8AE7E6551FF9B5755D2" }, { "b" : "7FC0BF360000", "path" : "/lib64/libm.so.6", "elfType" : 3, "buildId" : "5B14BE4D749631673523A61074C10959D50F5455" }, { "b" : "7FC0BF14A000", "path" : "/lib64/libgcc_s.so.1", "elfType" : 3, "buildId" : "179F202998E429AA1215907F6D4C5C1BB9C90136" }, { "b" : "7FC0BEF2E000", "path" : "/lib64/libpthread.so.0", "elfType" : 3, "buildId" : "96900CB0FF25B26F2BBDF247DE1408242E4773D8" }, { "b" : "7FC0BEB61000", "path" : "/lib64/libc.so.6", "elfType" : 3, "buildId" : "426A04647352308628F2091A30D347EDEEDED787" }, { "b" : "7FC0C05C4000", "path" : "/lib64/ld-linux-x86-64.so.2", "elfType" : 3, "buildId" : "A527FE72908703C5972AE384E78D1850D1881EE7" }, { "b" : "7FC0BE92E000", "path" : "/lib64/libidn.so.11", "elfType" : 3, "buildId" : "2B77BBEFFF65E94F3E0B71A4E89BEB68C4B476C5" }, { "b" : "7FC0BE704000", "path" : "/lib64/libssh2.so.1", "elfType" : 3, "buildId" : "4F4D120B3A652DC2651DA82CB6DC6F05516F7571" }, { "b" : "7FC0BE4B2000", "path" : "/lib64/libssl3.so", "elfType" : 3, "buildId" : "2E28F6A705F2ECEA8460D4716D5D1C24B5DDA5E4" }, { "b" : "7FC0BE28B000", "path" : "/lib64/libsmime3.so", "elfType" : 3, "buildId" : "8D0B4010959C321022DF9CE239277A9D7B34A76A" }, { "b" : "7FC0BDF5E000", "path" : "/lib64/libnss3.so", "elfType" : 3, "buildId" : "F5A64BB37FA3972E545EF459A51310F0AB56FA56" }, { "b" : "7FC0BDD2E000", "path" : "/lib64/libnssutil3.so", "elfType" : 3, "buildId" : "E0705772325A52C3372FFFB8BDE5F786E2E200D6" }, { "b" : "7FC0BDB2A000", "path" : "/lib64/libplds4.so", "elfType" : 3, "buildId" : "084D2194302908913F68B9DCD27DE46FA5B50522" }, { "b" : "7FC0BD925000", "path" : "/lib64/libplc4.so", "elfType" : 3, "buildId" : "799B28AD9A5460D78376E2C11260F2E858B95DE3" }, { "b" : "7FC0BD6E7000", "path" : "/lib64/libnspr4.so", "elfType" : 3, "buildId" : "DE762A28174110911B273E175D54F222B313CFE0" }, { "b" : "7FC0BD49A000", "path" : "/lib64/libgssapi_krb5.so.2", "elfType" : 3, "buildId" : "BCC30853830CD911E58700591830DF51ABCBD7BA" }, { "b" : "7FC0BD1B1000", "path" : "/lib64/libkrb5.so.3", "elfType" : 3, "buildId" : "45BAB0BB455BDFA960FDA22E4124CF17B67CC930" }, { "b" : "7FC0BCF7E000", "path" : "/lib64/libk5crypto.so.3", "elfType" : 3, "buildId" : "A9B3906192687CC45D483AE3C58C8AF745A6726A" }, { "b" : "7FC0BCD7A000", "path" : "/lib64/libcom_err.so.2", "elfType" : 3, "buildId" : "B4BE1023D9606A88169DF411BF94AF417D7BA1A0" }, { "b" : "7FC0BCB6B000", "path" : "/lib64/liblber-2.4.so.2", "elfType" : 3, "buildId" : "3192C56CD451E18EB9F29CB045432BA9C738DD29" }, { "b" : "7FC0BC916000", "path" : "/lib64/libldap-2.4.so.2", "elfType" : 3, "buildId" : "F1FADDDE0D21D5F4E2DCADEDD3B85B6E7AAC9883" }, { "b" : "7FC0BC700000", "path" : "/lib64/libz.so.1", "elfType" : 3, "buildId" : "B9D5F73428BD6AD68C96986B57BEA3B7CEDB9745" }, { "b" : "7FC0BC4F0000", "path" : "/lib64/libkrb5support.so.0", "elfType" : 3, "buildId" : "94B3BCB669126166B77CDCE6092679A6AA2004C8" }, { "b" : "7FC0BC2EC000", "path" : "/lib64/libkeyutils.so.1", "elfType" : 3, "buildId" : "2E01D5AC08C1280D013AAB96B292AC58BC30A263" }, { "b" : "7FC0BC0CF000", "path" : "/lib64/libsasl2.so.3", "elfType" : 3, "buildId" : "E2F2017F821DD1B9D307DA1A9B8014F2941AEB7B" }, { "b" : "7FC0BBEA8000", "path" : "/lib64/libselinux.so.1", "elfType" : 3, "buildId" : "D2DD4DA3FDE1477D25BFFF80F3A25FDB541A8179" }, { "b" : "7FC0BBC71000", "path" : "/lib64/libcrypt.so.1", "elfType" : 3, "buildId" : "740CAD898E29E1F3B73A323CCEC4A7C88911647F" }, { "b" : "7FC0BBA0F000", "path" : "/lib64/libpcre.so.1", "elfType" : 3, "buildId" : "9CA3D11F018BEEB719CDB34BE800BF1641350D0A" }, { "b" : "7FC0BB80C000", "path" : "/lib64/libfreebl3.so", "elfType" : 3, "buildId" : "B758881F4B6AF6C28C07A1A57713CBD2144628D4" } ] }} mongod(_ZN5mongo15printStackTraceERSo+0x41) [0x55c968eccc81] mongod(+0x277F47E) [0x55c968ecc47e] mongod(+0x277F516) [0x55c968ecc516] libpthread.so.0(+0xF5D0) [0x7fc0bef3d5d0] libc.so.6(gsignal+0x37) [0x7fc0beb972c7] libc.so.6(abort+0x148) [0x7fc0beb989b8] mongod(_ZN5mongo22invariantFailedWithMsgEPKcRKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEES1_j+0x0) [0x55c967406916] mongod(_ZNK5mongo18DatabaseHolderImpl5getDbEPNS_16OperationContextENS_10StringDataE+0x84) [0x55c967c51534] mongod(_ZN5mongo14MultiPlanStageC1EPNS_16OperationContextEPKNS_10CollectionEPNS_14CanonicalQueryENS0_11CachingModeE+0x108) [0x55c967dce018] mongod(+0x16C6F48) [0x55c967e13f48] mongod(_ZN5mongo11getExecutorEPNS_16OperationContextEPNS_10CollectionESt10unique_ptrINS_14CanonicalQueryESt14default_deleteIS5_EENS_12PlanExecutor11YieldPolicyEm+0x89) [0x55c967e15199] mongod(+0x16C8366) [0x55c967e15366] mongod(_ZN5mongo15getExecutorFindEPNS_16OperationContextEPNS_10CollectionESt10unique_ptrINS_14CanonicalQueryESt14default_deleteIS5_EEbm+0x6F) [0x55c967e1543f] mongod(+0x16B45D0) [0x55c967e015d0] mongod(_ZN5mongo9PipelineD15prepareExecutorEPNS_16OperationContextEPNS_10CollectionERKNS_15NamespaceStringEPNS_8PipelineERKN5boost13intrusive_ptrINS_17ExpressionContextEEEbRKNSB_INS_18DocumentSourceSortEEESt10unique_ptrINS_36GroupFromFirstDocumentTransformationESt14default_deleteISL_EERKNS_11DepsTrackerERKNS_7BSONObjEPKNS_18AggregationRequestERKyPSS_S10_+0x946) [0x55c967e02096] mongod(_ZN5mongo9PipelineD30buildInnerQueryExecutorGenericEPNS_10CollectionERKNS_15NamespaceStringEPKNS_18AggregationRequestEPNS_8PipelineE+0x348) [0x55c967e03158] mongod(_ZN5mongo9PipelineD23buildInnerQueryExecutorEPNS_10CollectionERKNS_15NamespaceStringEPKNS_18AggregationRequestEPNS_8PipelineE+0x56A) [0x55c967e0540a] mongod(_ZN5mongo12runAggregateEPNS_16OperationContextERKNS_15NamespaceStringERKNS_18AggregationRequestERKNS_7BSONObjERKSt6vectorINS_9PrivilegeESaISC_EEPNS_3rpc21ReplyBuilderInterfaceE+0x18E9) [0x55c967b28799] mongod(+0x13CFD45) [0x55c967b1cd45] mongod(+0x10FA899) [0x55c967847899] mongod(+0x10FBF53) [0x55c967848f53] mongod(+0x10FCE0E) [0x55c967849e0e] mongod(_ZN5mongo23ServiceEntryPointCommon13handleRequestEPNS_16OperationContextERKNS_7MessageERKNS0_5HooksE+0x540) [0x55c96784a6a0] mongod(_ZN5mongo23ServiceEntryPointMongod13handleRequestEPNS_16OperationContextERKNS_7MessageE+0x3C) [0x55c9678386dc] mongod(_ZN5mongo19ServiceStateMachine15_processMessageENS0_11ThreadGuardE+0xEC) [0x55c9678442cc] mongod(_ZN5mongo19ServiceStateMachine15_runNextInGuardENS0_11ThreadGuardE+0x17F) [0x55c96783fc1f] mongod(+0x10F5E9C) [0x55c967842e9c] mongod(_ZN5mongo9transport26ServiceExecutorSynchronous8scheduleESt8functionIFvvEENS0_15ServiceExecutor13ScheduleFlagsENS0_23ServiceExecutorTaskNameE+0x182) [0x55c968624742] mongod(_ZN5mongo19ServiceStateMachine22_scheduleNextWithGuardENS0_11ThreadGuardENS_9transport15ServiceExecutor13ScheduleFlagsENS2_23ServiceExecutorTaskNameENS0_9OwnershipE+0x10D) [0x55c96783d63d] mongod(_ZN5mongo19ServiceStateMachine15_sourceCallbackENS_6StatusE+0x843) [0x55c9678408d3] mongod(_ZN5mongo19ServiceStateMachine14_sourceMessageENS0_11ThreadGuardE+0x2E7) [0x55c96783ed07] mongod(_ZN5mongo19ServiceStateMachine15_runNextInGuardENS0_11ThreadGuardE+0xDB) [0x55c96783fb7b] mongod(+0x10F5E9C) [0x55c967842e9c] mongod(+0x1ED7BAB) [0x55c968624bab] mongod(+0x2511C94) [0x55c968c5ec94] libpthread.so.0(+0x7DD5) [0x7fc0bef35dd5] libc.so.6(clone+0x6D) [0x7fc0bec5f02d] ----- END BACKTRACE ----- 2019-09-26T15:57:16.007+0200 I CONTROL [main] ***** SERVER RESTARTED ***** 2019-09-26T15:57:16.014+0200 I CONTROL [main] Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none' 2019-09-26T15:57:16.052+0200 I CONTROL [initandlisten] MongoDB starting : pid=10895 port=27018 dbpath=/var/lib/mongodb 64-bit host=m4.buzz.guru 2019-09-26T15:57:16.052+0200 I CONTROL [initandlisten] db version v4.2.0 2019-09-26T15:57:16.052+0200 I CONTROL [initandlisten] git version: a4b751dcf51dd249c5865812b390cfd1c0129c30 2019-09-26T15:57:16.052+0200 I CONTROL [initandlisten] OpenSSL version: OpenSSL 1.0.1e-fips 11 Feb 2013 2019-09-26T15:57:16.052+0200 I CONTROL [initandlisten] allocator: tcmalloc 2019-09-26T15:57:16.052+0200 I CONTROL [initandlisten] modules: none 2019-09-26T15:57:16.052+0200 I CONTROL [initandlisten] build environment: 2019-09-26T15:57:16.052+0200 I CONTROL [initandlisten] distmod: rhel70 2019-09-26T15:57:16.052+0200 I CONTROL [initandlisten] distarch: x86_64 2019-09-26T15:57:16.052+0200 I CONTROL [initandlisten] target_arch: x86_64 2019-09-26T15:57:16.052+0200 I CONTROL [initandlisten] options: { config: "/etc/mongod.conf", net: { bindIp: "0.0.0.0", port: 27018 }, processManagement: { fork: true, pidFilePath: "/var/run/mongodb/mongod.pid", timeZoneInfo: "/usr/share/zoneinfo" }, replication: { replSetName: "rs1" }, sharding: { clusterRole: "shardsvr" }, storage: { dbPath: "/var/lib/mongodb", journal: { enabled: true } }, systemLog: { destination: "file", logAppend: true, path: "/var/log/mongodb/mongod.log" } } 2019-09-26T15:57:16.053+0200 W STORAGE [initandlisten] Detected unclean shutdown - /var/lib/mongodb/mongod.lock is not empty. 2019-09-26T15:57:16.053+0200 I STORAGE [initandlisten] Detected data files in /var/lib/mongodb created by the 'wiredTiger' storage engine, so setting the active storage engine to 'wiredTiger'. 2019-09-26T15:57:16.053+0200 W STORAGE [initandlisten] Recovering data from the last clean checkpoint. 2019-09-26T15:57:16.053+0200 I STORAGE [initandlisten] 2019-09-26T15:57:16.053+0200 I STORAGE [initandlisten] ** WARNING: Using the XFS filesystem is strongly recommended with the WiredTiger storage engine 2019-09-26T15:57:16.053+0200 I STORAGE [initandlisten] ** See http://dochub.mongodb.org/core/prodnotes-filesystem 2019-09-26T15:57:16.053+0200 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=128294M,cache_overflow=(file_max=0M),session_max=33000,eviction=(threads_min=4,threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),statistics_log=(wait=0),verbose=[recovery_progress,checkpoint_progress], 2019-09-26T15:57:16.424+0200 I STORAGE [initandlisten] WiredTiger message [1569506236:424846][10895:0x7f3c08bdfc00], txn-recover: Recovering log 4310 through 4314 2019-09-26T15:57:16.835+0200 I STORAGE [initandlisten] WiredTiger message [1569506236:835626][10895:0x7f3c08bdfc00], txn-recover: Recovering log 4311 through 4314 2019-09-26T15:57:17.283+0200 I STORAGE [initandlisten] WiredTiger message [1569506237:283510][10895:0x7f3c08bdfc00], txn-recover: Recovering log 4312 through 4314 2019-09-26T15:57:17.729+0200 I STORAGE [initandlisten] WiredTiger message [1569506237:729044][10895:0x7f3c08bdfc00], txn-recover: Recovering log 4313 through 4314 2019-09-26T15:57:17.890+0200 I STORAGE [initandlisten] WiredTiger message [1569506237:890816][10895:0x7f3c08bdfc00], txn-recover: Recovering log 4314 through 4314 2019-09-26T15:57:17.952+0200 I STORAGE [initandlisten] WiredTiger message [1569506237:952514][10895:0x7f3c08bdfc00], txn-recover: Main recovery loop: starting at 4310/9794944 to 4314/256 2019-09-26T15:57:17.953+0200 I STORAGE [initandlisten] WiredTiger message [1569506237:953175][10895:0x7f3c08bdfc00], txn-recover: Recovering log 4310 through 4314 2019-09-26T15:57:18.606+0200 I STORAGE [initandlisten] WiredTiger message [1569506238:606480][10895:0x7f3c08bdfc00], file:collection-16-1648121650949916385.wt, txn-recover: Recovering log 4311 through 4314 2019-09-26T15:57:19.340+0200 I STORAGE [initandlisten] WiredTiger message [1569506239:340435][10895:0x7f3c08bdfc00], file:collection-16-1648121650949916385.wt, txn-recover: Recovering log 4312 through 4314 2019-09-26T15:57:20.079+0200 I STORAGE [initandlisten] WiredTiger message [1569506240:79442][10895:0x7f3c08bdfc00], file:collection-16-1648121650949916385.wt, txn-recover: Recovering log 4313 through 4314 2019-09-26T15:57:20.332+0200 I STORAGE [initandlisten] WiredTiger message [1569506240:332945][10895:0x7f3c08bdfc00], file:collection-16-1648121650949916385.wt, txn-recover: Recovering log 4314 through 4314 2019-09-26T15:57:20.378+0200 I STORAGE [initandlisten] WiredTiger message [1569506240:378693][10895:0x7f3c08bdfc00], file:collection-16-1648121650949916385.wt, txn-recover: Set global recovery timestamp: (1569505039,1131) 2019-09-26T15:57:20.719+0200 I RECOVERY [initandlisten] WiredTiger recoveryTimestamp. Ts: Timestamp(1569505039, 1131) 2019-09-26T15:57:21.392+0200 I STORAGE [initandlisten] Starting OplogTruncaterThread local.oplog.rs 2019-09-26T15:57:21.392+0200 I STORAGE [initandlisten] The size storer reports that the oplog contains 93457620 records totaling to 44197861225 bytes 2019-09-26T15:57:21.392+0200 I STORAGE [initandlisten] Sampling from the oplog between Sep 25 06:36:35:3375 and Sep 26 15:54:46:1980 to determine where to place markers for truncation 2019-09-26T15:57:21.392+0200 I STORAGE [initandlisten] Taking 994 samples and assuming that each section of oplog contains approximately 940141 records totaling to 444610310 bytes 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 06:39:14:558 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 06:40:44:1556 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 06:44:09:655 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 06:46:22:374 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 06:49:11:435 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 06:51:19:852 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 06:54:09:3629 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 06:56:17:94 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 06:59:33:3399 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:02:36:1066 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:05:19:3193 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:07:47:2130 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:10:06:1345 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:13:13:2653 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:14:14:2134 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:17:05:1357 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:19:35:159 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:22:19:251 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:24:43:524 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:27:30:14 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:29:17:626 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:31:24:654 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:33:36:327 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:35:09:1663 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:36:56:87 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:39:46:393 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:41:35:572 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:43:30:4262 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:46:15:972 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:48:51:2589 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:51:35:4910 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:53:25:1148 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:55:47:325 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 07:57:20:2195 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:01:13:3554 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:03:39:1656 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:06:20:2335 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:08:11:711 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:10:33:563 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:12:44:953 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:15:50:4085 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:17:51:1583 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:21:32:1882 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:24:20:333 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:27:09:2092 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:29:39:2482 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:32:27:368 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:34:41:2621 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:37:05:1484 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:39:52:2533 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:42:23:1736 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:44:18:3989 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:46:37:1055 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:49:09:929 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:52:00:145 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:54:18:2914 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:56:43:1070 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 08:59:09:1909 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 09:02:36:651 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 09:05:04:12796 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 09:07:57:656 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 09:11:42:2125 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 09:15:09:3327 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 09:18:13:1137 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 09:21:18:1767 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 09:25:08:3122 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 10:27:31:306 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 10:41:01:2085 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 10:54:48:449 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 11:27:53:2010 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 11:36:03:609 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 14:54:30:706 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 15:31:44:2863 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 15:45:48:8281 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 15:51:58:3497 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 15:57:57:3089 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 16:04:06:1657 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 16:10:43:4412 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 16:17:12:1018 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 16:23:46:2997 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 16:30:23:3591 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 16:37:17:4426 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 17:23:44:111 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 25 20:32:04:236 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 26 10:14:38:478 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 26 11:46:11:689 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 26 12:46:39:16071 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 26 12:50:07:9503 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 26 13:03:31:4 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 26 13:26:30:2797 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 26 13:43:44:2306 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 26 13:59:56:2549 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 26 14:03:51:1499 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 26 14:21:36:770 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 26 14:36:51:7680 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 26 14:52:56:359 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 26 15:09:26:4957 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 26 15:19:04:451 2019-09-26T15:57:22.133+0200 I STORAGE [initandlisten] Placing a marker at optime Sep 26 15:31:22:4629 2019-09-26T15:57:22.138+0200 I STORAGE [initandlisten] Timestamp monitor starting 2019-09-26T15:57:22.145+0200 I CONTROL [initandlisten] 2019-09-26T15:57:22.145+0200 I CONTROL [initandlisten] ** WARNING: Access control is not enabled for the database. 2019-09-26T15:57:22.145+0200 I CONTROL [initandlisten] ** Read and write access to data and configuration is unrestricted. 2019-09-26T15:57:22.145+0200 I CONTROL [initandlisten] 2019-09-26T15:57:22.508+0200 I SHARDING [initandlisten] Marking collection local.system.replset as collection version: 2019-09-26T15:57:22.509+0200 I STORAGE [initandlisten] Flow Control is enabled on this deployment. 2019-09-26T15:57:22.509+0200 I SHARDING [initandlisten] Marking collection admin.system.roles as collection version: 2019-09-26T15:57:22.509+0200 I SHARDING [initandlisten] Marking collection admin.system.version as collection version: 2019-09-26T15:57:22.509+0200 I SHARDING [initandlisten] initializing sharding state with: { shardName: "rs1", clusterId: ObjectId('5d729c424d8cf6b0cdd8c5eb'), configsvrConnectionString: "config/arbiter1.buzz.guru:27019,arbiter2.buzz.guru:27019,arbiter3.buzz.guru:27019" } 2019-09-26T15:57:22.509+0200 I NETWORK [initandlisten] Starting new replica set monitor for config/arbiter1.buzz.guru:27019,arbiter2.buzz.guru:27019,arbiter3.buzz.guru:27019 2019-09-26T15:57:22.509+0200 I SHARDING [thread1] creating distributed lock ping thread for process m4.buzz.guru:27018:1569506242:2310514791710709010 (sleeping for 30000ms) 2019-09-26T15:57:22.509+0200 I SHARDING [initandlisten] Finished initializing sharding components for secondary node. 2019-09-26T15:57:22.509+0200 I CONNPOOL [ReplicaSetMonitor-TaskExecutor] Connecting to arbiter2.buzz.guru:27019 2019-09-26T15:57:22.510+0200 I CONNPOOL [ReplicaSetMonitor-TaskExecutor] Connecting to arbiter1.buzz.guru:27019 2019-09-26T15:57:22.510+0200 I CONNPOOL [ReplicaSetMonitor-TaskExecutor] Connecting to arbiter3.buzz.guru:27019 2019-09-26T15:57:22.521+0200 I NETWORK [ReplicaSetMonitor-TaskExecutor] Confirmed replica set for config is config/arbiter1.buzz.guru:27019,arbiter2.buzz.guru:27019,arbiter3.buzz.guru:27019 2019-09-26T15:57:22.521+0200 I SHARDING [Sharding-Fixed-0] Updating config server with confirmed set config/arbiter1.buzz.guru:27019,arbiter2.buzz.guru:27019,arbiter3.buzz.guru:27019 2019-09-26T15:57:22.524+0200 I SHARDING [ShardRegistry] Received reply from config server node (unknown) indicating config server optime term has increased, previous optime { ts: Timestamp(0, 0), t: -1 }, now { ts: Timestamp(1569506240, 102), t: 1 } 2019-09-26T15:57:22.524+0200 I NETWORK [shard-registry-reload] Starting new replica set monitor for rs0/m1.buzz.guru:27018,m2.buzz.guru:27018 2019-09-26T15:57:22.524+0200 I NETWORK [shard-registry-reload] Starting new replica set monitor for rs1/m3.buzz.guru:27018,m4.buzz.guru:27018 2019-09-26T15:57:22.524+0200 I NETWORK [shard-registry-reload] Starting new replica set monitor for rs2/m5.buzz.guru:27018,m6.buzz.guru:27018 2019-09-26T15:57:22.524+0200 I CONNPOOL [ReplicaSetMonitor-TaskExecutor] Connecting to m1.buzz.guru:27018 2019-09-26T15:57:22.525+0200 I CONNPOOL [ReplicaSetMonitor-TaskExecutor] Connecting to m2.buzz.guru:27018 2019-09-26T15:57:22.525+0200 I CONNPOOL [ReplicaSetMonitor-TaskExecutor] Connecting to m3.buzz.guru:27018 2019-09-26T15:57:22.525+0200 I CONNPOOL [ReplicaSetMonitor-TaskExecutor] Connecting to m4.buzz.guru:27018 2019-09-26T15:57:22.525+0200 I CONNPOOL [ReplicaSetMonitor-TaskExecutor] Connecting to m6.buzz.guru:27018 2019-09-26T15:57:22.525+0200 I CONNPOOL [ReplicaSetMonitor-TaskExecutor] Connecting to m5.buzz.guru:27018 2019-09-26T15:57:22.526+0200 I NETWORK [ReplicaSetMonitor-TaskExecutor] Confirmed replica set for rs1 is rs1/m3.buzz.guru:27018,m4.buzz.guru:27018 2019-09-26T15:57:22.526+0200 I SHARDING [updateShardIdentityConfigString] Updating config server with confirmed set rs1/m3.buzz.guru:27018,m4.buzz.guru:27018 2019-09-26T15:57:22.526+0200 I NETWORK [ReplicaSetMonitor-TaskExecutor] Confirmed replica set for rs0 is rs0/m1.buzz.guru:27018,m2.buzz.guru:27018 2019-09-26T15:57:22.526+0200 I SHARDING [updateShardIdentityConfigString] Updating config server with confirmed set rs0/m1.buzz.guru:27018,m2.buzz.guru:27018 2019-09-26T15:57:22.526+0200 I NETWORK [ReplicaSetMonitor-TaskExecutor] Confirmed replica set for rs2 is rs2/m5.buzz.guru:27018,m6.buzz.guru:27018 2019-09-26T15:57:22.526+0200 I SHARDING [updateShardIdentityConfigString] Updating config server with confirmed set rs2/m5.buzz.guru:27018,m6.buzz.guru:27018 2019-09-26T15:57:22.533+0200 W SHARDING [replSetDistLockPinger] pinging failed for distributed lock pinger :: caused by :: LockStateChangeFailed: findAndModify query predicate didn't match any lock document 2019-09-26T15:57:24.524+0200 I CONNPOOL [ReplicaSetMonitor-TaskExecutor] Connecting to m4.buzz.guru:27018 2019-09-26T15:57:24.526+0200 I SHARDING [initandlisten] Marking collection local.startup_log as collection version: 2019-09-26T15:57:24.526+0200 I FTDC [initandlisten] Initializing full-time diagnostic data capture with directory '/var/lib/mongodb/diagnostic.data' 2019-09-26T15:57:24.527+0200 I SHARDING [initandlisten] Marking collection local.replset.minvalid as collection version: 2019-09-26T15:57:24.527+0200 I SHARDING [initandlisten] Marking collection local.replset.election as collection version: 2019-09-26T15:57:24.527+0200 I REPL [initandlisten] Rollback ID is 1 2019-09-26T15:57:24.528+0200 I REPL [initandlisten] Recovering from stable timestamp: Timestamp(1569505039, 1131) (top of oplog: { ts: Timestamp(1569506086, 1980), t: 2 }, appliedThrough: { ts: Timestamp(0, 0), t: -1 }, TruncateAfter: Timestamp(0, 0)) 2019-09-26T15:57:24.528+0200 I REPL [initandlisten] Starting recovery oplog application at the stable timestamp: Timestamp(1569505039, 1131) 2019-09-26T15:57:24.528+0200 I REPL [initandlisten] Replaying stored operations from { : Timestamp(1569505039, 1131) } (exclusive) to { : Timestamp(1569506086, 1980) } (inclusive). 2019-09-26T15:57:24.528+0200 I SHARDING [initandlisten] Marking collection local.oplog.rs as collection version: 2019-09-26T15:57:25.024+0200 I FTDC [ftdc] Unclean full-time diagnostic data capture shutdown detected, found interim file, some metrics may have been lost. OK 2019-09-26T15:57:25.280+0200 I REPL [repl-writer-worker-2] applied op: CRUD { ts: Timestamp(1569505039, 1137), t: 2, h: 0, v: 2, op: "d", ns: "buzzguru_master.video", ui: UUID("f80fd760-d9a4-4bd8-871c-0f604c5dcf6b"), fromMigrate: true, wall: new Date(1569505039154), o: { _id: ObjectId('5ca6d5c6f2e034eb82a34b46') } }, took 696ms 2019-09-26T15:57:25.280+0200 I REPL [repl-writer-worker-15] applied op: CRUD { ts: Timestamp(1569505039, 1257), t: 2, h: 0, v: 2, op: "d", ns: "buzzguru_master.video", ui: UUID("f80fd760-d9a4-4bd8-871c-0f604c5dcf6b"), fromMigrate: true, wall: new Date(1569505039176), o: { _id: ObjectId('5ca6d5c6f2e034eb82a34b6a') } }, took 696ms 2019-09-26T15:57:25.280+0200 I REPL [repl-writer-worker-5] applied op: CRUD { ts: Timestamp(1569505039, 1258), t: 2, h: 0, v: 2, op: "d", ns: "buzzguru_master.video", ui: UUID("f80fd760-d9a4-4bd8-871c-0f604c5dcf6b"), fromMigrate: true, wall: new Date(1569505039176), o: { _id: ObjectId('5ca6d5c6f2e034eb82a34b6e') } }, took 696ms 2019-09-26T15:57:25.280+0200 I REPL [repl-writer-worker-4] applied op: CRUD { ts: Timestamp(1569505039, 1284), t: 2, h: 0, v: 2, op: "d", ns: "buzzguru_master.video", ui: UUID("f80fd760-d9a4-4bd8-871c-0f604c5dcf6b"), fromMigrate: true, wall: new Date(1569505039182), o: { _id: ObjectId('5ca6d5c7f2e034eb82a353d9') } }, took 696ms 2019-09-26T15:57:25.280+0200 I REPL [repl-writer-worker-0] applied op: CRUD { ts: Timestamp(1569505039, 1274), t: 2, h: 0, v: 2, op: "d", ns: "buzzguru_master.video", ui: UUID("f80fd760-d9a4-4bd8-871c-0f604c5dcf6b"), fromMigrate: true, wall: new Date(1569505039180), o: { _id: ObjectId('5ca6d5c7f2e034eb82a353bb') } }, took 696ms 2019-09-26T15:57:25.280+0200 I REPL [repl-writer-worker-13] applied op: CRUD { ts: Timestamp(1569505039, 1262), t: 2, h: 0, v: 2, op: "d", ns: "buzzguru_master.video", ui: UUID("f80fd760-d9a4-4bd8-871c-0f604c5dcf6b"), fromMigrate: true, wall: new Date(1569505039178), o: { _id: ObjectId('5ca6d5c6f2e034eb82a34b90') } }, took 696ms 2019-09-26T15:57:25.280+0200 I REPL [repl-writer-worker-3] applied op: CRUD { ts: Timestamp(1569505039, 1259), t: 2, h: 0, v: 2, op: "d", ns: "buzzguru_master.video", ui: UUID("f80fd760-d9a4-4bd8-871c-0f604c5dcf6b"), fromMigrate: true, wall: new Date(1569505039177), o: { _id: ObjectId('5ca6d5c6f2e034eb82a34b88') } }, took 696ms