bin/./mongos(_ZN5mongo17checkShardVersionEPNS_12DBClientBaseERKSsN5boost10shared_ptrIKNS_12ChunkManagerEEEbi+0xfb5) [0x76a7c5] bin/./mongos(_ZN5mongo14VersionManager19checkShardVersionCBEPNS_15ShardConnectionEbi+0x6a) [0x76c31a] bin/./mongos(_ZN5mongo15ShardConnection11_finishInitEv+0xfc) [0x7705ec] bin/./mongos(_ZN5mongo13ShardStrategy7_insertERKSsRSt6vectorINS_7BSONObjESaIS4_EERSt3mapIN5boost10shared_ptrIKNS_5ChunkEEES6_St4lessISD_ESaISt4pairIKSD _S6_EEEiRNS_7RequestERNS_9DbMessageEi+0x1c0) [0x7883f0] bin/./mongos(_ZN5mongo13ShardStrategy7writeOpEiRNS_7RequestE+0x555) [0x78ce05] bin/./mongos(_ZN5mongo7Request7processEi+0xe8) [0x75a888] bin/./mongos(_ZN5mongo21ShardedMessageHandler7processERNS_7MessageEPNS_21AbstractMessagingPortEPNS_9LastErrorE+0x71) [0x500751] bin/./mongos(_ZN5mongo3pms9threadRunEPNS_13MessagingPortE+0x411) [0x7fcb51] /lib64/libpthread.so.0 [0x3f824064a7] /lib64/libc.so.6(clone+0x6d) [0x3f81cd3c2d] Fri Mar 7 10:27:10 [conn26758] sharded connection to shard004/mongo0.mcloud.139.com:20004,mongo4.mcloud.139.com:20004 not being returned to the pool Fri Mar 7 10:27:10 [conn26758] AssertionException while processing op type : 2002 to : mcloud.m_iosyncdetaillog :: caused by :: 10429 setShardVersion f ailed host: mongo0.mcloud.139.com:20004 { oldVersion: Timestamp 0|0, oldVersionEpoch: ObjectId('000000000000000000000000'), ns: "mcloud.m_iosyncdetaillo g", version: Timestamp 560000|503, versionEpoch: ObjectId('51d37afa0b8c2dd3569f5ed6'), globalVersion: Timestamp 561000|0, globalVersionEpoch: ObjectId(' 51d37afa0b8c2dd3569f5ed6'), reloadConfig: true, errmsg: "shard global version for collection is higher than trying to set to 'mcloud.m_iosyncdetaillog'" , ok: 0.0 } Fri Mar 7 10:27:17 [conn26724] going to retry checkShardVersion host: mongo0.mcloud.139.com:20004 { oldVersion: Timestamp 0|0, oldVersionEpoch: ObjectI d('000000000000000000000000'), ns: "mcloud.m_iosyncdetaillog", version: Timestamp 560000|503, versionEpoch: ObjectId('51d37afa0b8c2dd3569f5ed6'), global Version: Timestamp 561000|0, globalVersionEpoch: ObjectId('51d37afa0b8c2dd3569f5ed6'), reloadConfig: true, errmsg: "shard global version for collection is higher than trying to set to 'mcloud.m_iosyncdetaillog'", ok: 0.0 } Fri Mar 7 10:27:17 [conn26724] going to retry checkShardVersion host: mongo0.mcloud.139.com:20004 { oldVersion: Timestamp 0|0, oldVersionEpoch: ObjectI d('000000000000000000000000'), ns: "mcloud.m_iosyncdetaillog", version: Timestamp 560000|503, versionEpoch: ObjectId('51d37afa0b8c2dd3569f5ed6'), global Version: Timestamp 561000|0, globalVersionEpoch: ObjectId('51d37afa0b8c2dd3569f5ed6'), reloadConfig: true, errmsg: "shard global version for collection is higher than trying to set to 'mcloud.m_iosyncdetaillog'", ok: 0.0 } Fri Mar 7 10:27:17 [conn26724] going to retry checkShardVersion host: mongo0.mcloud.139.com:20004 { oldVersion: Timestamp 0|0, oldVersionEpoch: ObjectI d('000000000000000000000000'), ns: "mcloud.m_iosyncdetaillog", version: Timestamp 560000|503, versionEpoch: ObjectId('51d37afa0b8c2dd3569f5ed6'), global Version: Timestamp 561000|0, globalVersionEpoch: ObjectId('51d37afa0b8c2dd3569f5ed6'), reloadConfig: true, errmsg: "shard global version for collection is higher than trying to set to 'mcloud.m_iosyncdetaillog'", ok: 0.0 } Fri Mar 7 10:27:17 [conn26724] going to retry checkShardVersion host: mongo0.mcloud.139.com:20004 { oldVersion: Timestamp 0|0, oldVersionEpoch: ObjectI d('000000000000000000000000'), ns: "mcloud.m_iosyncdetaillog", version: Timestamp 560000|503, versionEpoch: ObjectId('51d37afa0b8c2dd3569f5ed6'), global Version: Timestamp 561000|0, globalVersionEpoch: ObjectId('51d37afa0b8c2dd3569f5ed6'), reloadConfig: true, errmsg: "shard global version for collection is higher than trying to set to 'mcloud.m_iosyncdetaillog'", ok: 0.0 } Fri Mar 7 10:27:17 [conn26724] setShardVersion failed host: mongo0.mcloud.139.com:20004 { oldVersion: Timestamp 0|0, oldVersionEpoch: ObjectId('00 0000000000000000000000'), ns: "mcloud.m_iosyncdetaillog", version: Timestamp 560000|503, versionEpoch: ObjectId('51d37afa0b8c2dd3569f5ed6'), globalVersi on: Timestamp 561000|0, globalVersionEpoch: ObjectId('51d37afa0b8c2dd3569f5ed6'), reloadConfig: true, errmsg: "shard global version for collection is hi gher than trying to set to 'mcloud.m_iosyncdetaillog'", ok: 0.0 } Fri Mar 7 10:27:17 [conn26724] Assertion: 10429:setShardVersion failed host: mongo0.mcloud.139.com:20004 { oldVersion: Timestamp 0|0, oldVersionEpoch: ObjectId('000000000000000000000000'), ns: "mcloud.m_iosyncdetaillog", version: Timestamp 560000|503, versionEpoch: ObjectId('51d37afa0b8c2dd3569f5ed6'), globalVersion: Timestamp 561000|0, globalVersionEpoch: ObjectId('51d37afa0b8c2dd3569f5ed6'), reloadConfig: true, errmsg: "shard global version for coll ection is higher than trying to set to 'mcloud.m_iosyncdetaillog'", ok: 0.0 } 0x80e931 0x7d79f9 0x7d7b7c 0x76b796 0x76ad10 0x76ad10 0x76ad10 0x76ad10 0x76ad10 0x76a7c5 0x76c31a 0x7705ec 0x7883f0 0x78ce05 0x75a888 0x500751 0x7fcb51 0x3f824064a7 0x3f81cd3c2d bin/./mongos(_ZN5mongo15printStackTraceERSo+0x21) [0x80e931] bin/./mongos(_ZN5mongo11msgassertedEiPKc+0x99) [0x7d79f9] bin/./mongos [0x7d7b7c] bin/./mongos(_ZN5mongo17checkShardVersionEPNS_12DBClientBaseERKSsN5boost10shared_ptrIKNS_12ChunkManagerEEEbi+0x1f86) [0x76b796] bin/./mongos(_ZN5mongo17checkShardVersionEPNS_12DBClientBaseERKSsN5boost10shared_ptrIKNS_12ChunkManagerEEEbi+0x1500) [0x76ad10] bin/./mongos(_ZN5mongo17checkShardVersionEPNS_12DBClientBaseERKSsN5boost10shared_ptrIKNS_12ChunkManagerEEEbi+0x1500) [0x76ad10] bin/./mongos(_ZN5mongo17checkShardVersionEPNS_12DBClientBaseERKSsN5boost10shared_ptrIKNS_12ChunkManagerEEEbi+0x1500) [0x76ad10] bin/./mongos(_ZN5mongo17checkShardVersionEPNS_12DBClientBaseERKSsN5boost10shared_ptrIKNS_12ChunkManagerEEEbi+0x1500) [0x76ad10] bin/./mongos(_ZN5mongo17checkShardVersionEPNS_12DBClientBaseERKSsN5boost10shared_ptrIKNS_12ChunkManagerEEEbi+0x1500) [0x76ad10] bin/./mongos(_ZN5mongo17checkShardVersionEPNS_12DBClientBaseERKSsN5boost10shared_ptrIKNS_12ChunkManagerEEEbi+0xfb5) [0x76a7c5] bin/./mongos(_ZN5mongo14VersionManager19checkShardVersionCBEPNS_15ShardConnectionEbi+0x6a) [0x76c31a] bin/./mongos(_ZN5mongo15ShardConnection11_finishInitEv+0xfc) [0x7705ec] bin/./mongos(_ZN5mongo13ShardStrategy7_insertERKSsRSt6vectorINS_7BSONObjESaIS4_EERSt3mapIN5boost10shared_ptrIKNS_5ChunkEEES6_St4lessISD_ESaISt4pairIKSD _S6_EEEiRNS_7RequestERNS_9DbMessageEi+0x1c0) [0x7883f0] bin/./mongos(_ZN5mongo13ShardStrategy7writeOpEiRNS_7RequestE+0x555) [0x78ce05] bin/./mongos(_ZN5mongo7Request7processEi+0xe8) [0x75a888] bin/./mongos(_ZN5mongo21ShardedMessageHandler7processERNS_7MessageEPNS_21AbstractMessagingPortEPNS_9LastErrorE+0x71) [0x500751] bin/./mongos(_ZN5mongo3pms9threadRunEPNS_13MessagingPortE+0x411) [0x7fcb51] /lib64/libpthread.so.0 [0x3f824064a7] /lib64/libc.so.6(clone+0x6d) [0x3f81cd3c2d] Fri Mar 7 10:27:17 [conn26724] sharded connection to shard004/mongo0.mcloud.139.com:20004,mongo4.mcloud.139.com:20004 not being returned to the pool Fri Mar 7 10:27:17 [conn26724] AssertionException while processing op type : 2002 to : mcloud.m_iosyncdetaillog :: caused by :: 10429 setShardVersion f ailed host: mongo0.mcloud.139.com:20004 { oldVersion: Timestamp 0|0, oldVersionEpoch: ObjectId('000000000000000000000000'), ns: "mcloud.m_iosyncdetaillo g", version: Timestamp 560000|503, versionEpoch: ObjectId('51d37afa0b8c2dd3569f5ed6'), globalVersion: Timestamp 561000|0, globalVersionEpoch: ObjectId(' 51d37afa0b8c2dd3569f5ed6'), reloadConfig: true, errmsg: "shard global version for collection is higher than trying to set to 'mcloud.m_iosyncdetaillog'" , ok: 0.0 } Fri Mar 7 10:27:17 [Balancer] DBClientCursor::init call() failed Fri Mar 7 10:27:17 [WriteBackListener-mongo0.mcloud.139.com:20004] DBClientCursor::init call() failed Fri Mar 7 10:27:17 [WriteBackListener-mongo0.mcloud.139.com:20004] WriteBackListener exception : DBClientBase::findN: transport error: mongo0.mcloud.13 9.com:20004 ns: admin.$cmd query: { writebacklisten: ObjectId('50f7f457c6e78b5fbceb0af3') } Fri Mar 7 10:27:17 [WriteBackListener-mongo0.mcloud.139.com:20004] SyncClusterConnection connecting to [mongo0.mcloud.139.com:30000] Fri Mar 7 10:27:17 [WriteBackListener-mongo0.mcloud.139.com:20004] SyncClusterConnection connecting to [mongo1.mcloud.139.com:30000] Fri Mar 7 10:27:17 [WriteBackListener-mongo0.mcloud.139.com:20004] SyncClusterConnection connecting to [mongo2.mcloud.139.com:30000] Fri Mar 7 10:27:17 [Balancer] distributed lock 'balancer/GD-QHD-CNG152TFL3-201.14:10000:1358427224:1804289383' unlocked. Fri Mar 7 10:27:17 [Balancer] scoped connection to mongo0.mcloud.139.com:30000,mongo1.mcloud.139.com:30000,mongo2.mcloud.139.com:30000 not being return ed to the pool Fri Mar 7 10:27:17 [Balancer] caught exception while doing balance: DBClientBase::findN: transport error: mongo0.mcloud.139.com:20004 ns: admin.$cmd qu ery: { moveChunk: "mcloud.contact_item_bak", from: "shard004/mongo0.mcloud.139.com:20004,mongo4.mcloud.139.com:20004", to: "shard003/mongo3.mcloud.139.c om:20003,mongo4.mcloud.139.com:20003", fromShard: "shard004", toShard: "shard003", min: { u: 1013568223 }, max: { u: 1014393111 }, maxChunkSizeBytes: 67 108864, shardId: "mcloud.contact_item_bak-u_1013568223", configdb: "mongo0.mcloud.139.com:30000,mongo1.mcloud.139.com:30000,mongo2.mcloud.139.com:30000" , secondaryThrottle: true } Fri Mar 7 10:27:18 [WriteBackListener-mongo0.mcloud.139.com:20004] Socket recv() errno:104 Connection reset by peer 192.168.201.4:20004 Fri Mar 7 10:27:18 [WriteBackListener-mongo0.mcloud.139.com:20004] SocketException: remote: 192.168.201.4:20004 error: 9001 socket exception [1] server [192.168.201.4:20004] Fri Mar 7 10:27:18 [WriteBackListener-mongo0.mcloud.139.com:20004] DBClientCursor::init call() failed Fri Mar 7 10:27:18 [WriteBackListener-mongo0.mcloud.139.com:20004] WriteBackListener exception : DBClientBase::findN: transport error: mongo0.mcloud.13 9.com:20004 ns: admin.$cmd query: { writebacklisten: ObjectId('50f7f457c6e78b5fbceb0af3') } Fri Mar 7 10:27:18 [conn25540] Socket recv() errno:104 Connection reset by peer 192.168.201.4:20004 Fri Mar 7 10:27:18 [conn25540] SocketException: remote: 192.168.201.4:20004 error: 9001 socket exception [1] server [192.168.201.4:20004] Fri Mar 7 10:27:18 [conn25540] DBClientCursor::init call() failed Fri Mar 7 10:27:18 [conn26724] trying reconnect to mongo0.mcloud.139.com:20004 Fri Mar 7 10:27:18 [conn26724] reconnect mongo0.mcloud.139.com:20004 failed couldn't connect to server mongo0.mcloud.139.com:20004 Fri Mar 7 10:27:19 [conn26726] Primary for replica set shard004 changed to mongo4.mcloud.139.com:20004 Fri Mar 7 10:27:20 [WriteBackListener-mongo0.mcloud.139.com:20004] DBClientCursor::init call() failed Fri Mar 7 10:27:20 [WriteBackListener-mongo0.mcloud.139.com:20004] WriteBackListener exception : DBClientBase::findN: transport error: mongo0.mcloud.13 9.com:20004 ns: admin.$cmd query: { writebacklisten: ObjectId('50f7f457c6e78b5fbceb0af3') } Fri Mar 7 10:27:23 [WriteBackListener-mongo0.mcloud.139.com:20004] WriteBackListener exception : socket exception [CONNECT_ERROR] for mongo0.mcloud.139 .com:20004 Fri Mar 7 10:27:24 [Balancer] distributed lock 'balancer/GD-QHD-CNG152TFL3-201.14:10000:1358427224:1804289383' acquired, ts : 53192e8bc6e78b5fbc3af6fe Fri Mar 7 10:27:24 [Balancer] ns: logsystem.logs going to move { _id: "logsystem.logs-app_"mcloud"uid_"36291030"_id_ObjectId('52b2b45d498e86a7c10c17e0 ')", lastmod: Timestamp 4854000|1, lastmodEpoch: ObjectId('000000000000000000000000'), ns: "logsystem.logs", min: { app: "mcloud", uid: "36291030", _id: ObjectId('52b2b45d498e86a7c10c17e0') }, max: { app: "mcloud", uid: "36331062", _id: ObjectId('51164804498ed3bc05749522') }, shard: "shard001" } from: s hard001 to: shard000 tag [] Fri Mar 7 10:27:24 [Balancer] ns: mcloud.contact_item_bak going to move { _id: "mcloud.contact_item_bak-u_1013568223", lastmod: Timestamp 20000|2, las tmodEpoch: ObjectId('51d37a5b0b8c2dd3569f5ed3'), ns: "mcloud.contact_item_bak", min: { u: 1013568223 }, max: { u: 1014393111 }, shard: "shard004" } from : shard004 to: shard003 tag [] Fri Mar 7 10:27:24 [Balancer] ns: mcloud.m_iosyncdetaillog going to move { _id: "mcloud.m_iosyncdetaillog-uid_"520395051"", lastmod: Timestamp 560000