[js_test:fsm_all_sharded_replication] 2015-07-19T23:38:25.917+0000 Starting JSTest jstests/concurrency/fsm_all_sharded_replication.js... ./mongo --eval MongoRunner.dataDir = "/data/db/job0/mongorunner"; TestData = new Object(); TestData.wiredTigerEngineConfigString = ""; TestData.wiredTigerCollectionConfigString = ""; TestData.storageEngine = "wiredTiger"; TestData.wiredTigerIndexConfigString = ""; TestData.noJournal = false; TestData.testName = "fsm_all_sharded_replication"; TestData.noJournalPrealloc = false; MongoRunner.dataPath = "/data/db/job0/mongorunner/" --nodb jstests/concurrency/fsm_all_sharded_replication.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:25.920+0000 JSTest jstests/concurrency/fsm_all_sharded_replication.js started with pid 2944. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:25.929+0000 MongoDB shell version: 3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:25.958+0000 /data/db/job0/mongorunner/ [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.053+0000 Replica set test! [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.053+0000 ReplSetTest Starting Set [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.053+0000 ReplSetTest n is : 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.054+0000 ReplSetTest n: 0 ports: [ 31100, 31101, 31102 ] 31100 number [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.055+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.055+0000 "useHostName" : true, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.055+0000 "oplogSize" : 1024, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.055+0000 "keyFile" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.055+0000 "port" : 31100, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.056+0000 "noprealloc" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.056+0000 "smallfiles" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.056+0000 "rest" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.056+0000 "replSet" : "test-rs0", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.056+0000 "dbpath" : "$set-$node", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.056+0000 "useHostname" : true, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.056+0000 "noJournalPrealloc" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.057+0000 "pathOpts" : { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.057+0000 "testName" : "test", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.057+0000 "shard" : 0, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.057+0000 "node" : 0, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.057+0000 "set" : "test-rs0" [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.057+0000 }, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.057+0000 "verbose" : 0, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.057+0000 "restart" : undefined [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.058+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.058+0000 ReplSetTest Starting.... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.058+0000 Resetting db path '/data/db/job0/mongorunner/test-rs0-0' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.059+0000 2015-07-19T23:38:26.059+0000 I - [main] shell: started program (sh2949): /data/mci/src/mongod --oplogSize 1024 --port 31100 --noprealloc --smallfiles --rest --replSet test-rs0 --dbpath /data/db/job0/mongorunner/test-rs0-0 --setParameter enableTestCommands=1 --storageEngine wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.060+0000 2015-07-19T23:38:26.060+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31100, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.069+0000 m31100| 2015-07-19T23:38:26.069+0000 I CONTROL [main] ** WARNING: --rest is specified without --httpinterface, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.070+0000 m31100| 2015-07-19T23:38:26.069+0000 I CONTROL [main] ** enabling http interface [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.070+0000 m31100| note: noprealloc may hurt performance in many applications [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.096+0000 m31100| 2015-07-19T23:38:26.096+0000 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=3G,session_max=20000,eviction=(threads_max=4),statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0), [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.261+0000 2015-07-19T23:38:26.261+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31100, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.461+0000 2015-07-19T23:38:26.461+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31100, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.662+0000 2015-07-19T23:38:26.662+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31100, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:26.863+0000 2015-07-19T23:38:26.862+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31100, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.063+0000 2015-07-19T23:38:27.063+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31100, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.118+0000 m31100| 2015-07-19T23:38:27.118+0000 W STORAGE [initandlisten] Detected configuration for non-active storage engine mmapv1 when current storage engine is wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.118+0000 m31100| 2015-07-19T23:38:27.118+0000 I CONTROL [initandlisten] MongoDB starting : pid=2949 port=31100 dbpath=/data/db/job0/mongorunner/test-rs0-0 64-bit host=ip-10-139-123-131 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.119+0000 m31100| 2015-07-19T23:38:27.118+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.119+0000 m31100| 2015-07-19T23:38:27.118+0000 I CONTROL [initandlisten] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.119+0000 m31100| 2015-07-19T23:38:27.118+0000 I CONTROL [initandlisten] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.119+0000 m31100| 2015-07-19T23:38:27.118+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.119+0000 m31100| 2015-07-19T23:38:27.118+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.119+0000 m31100| 2015-07-19T23:38:27.118+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.120+0000 m31100| 2015-07-19T23:38:27.118+0000 I CONTROL [initandlisten] ** We suggest setting it to 'never' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.120+0000 m31100| 2015-07-19T23:38:27.118+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.120+0000 m31100| 2015-07-19T23:38:27.118+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.120+0000 m31100| 2015-07-19T23:38:27.118+0000 I CONTROL [initandlisten] ** We suggest setting it to 'never' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.120+0000 m31100| 2015-07-19T23:38:27.118+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.120+0000 m31100| 2015-07-19T23:38:27.118+0000 I CONTROL [initandlisten] ** WARNING: soft rlimits too low. rlimits set to 1024 processes, 64000 files. Number of processes should be at least 32000 : 0.5 times number of files. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.120+0000 m31100| 2015-07-19T23:38:27.118+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.120+0000 m31100| 2015-07-19T23:38:27.118+0000 I CONTROL [initandlisten] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.121+0000 m31100| 2015-07-19T23:38:27.119+0000 I CONTROL [initandlisten] git version: 92c6ee5cedf09928720b79592981dc1c6aeb0482 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.121+0000 m31100| 2015-07-19T23:38:27.119+0000 I CONTROL [initandlisten] OpenSSL version: OpenSSL 1.0.1e-fips 11 Feb 2013 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.121+0000 m31100| 2015-07-19T23:38:27.119+0000 I CONTROL [initandlisten] allocator: tcmalloc [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.121+0000 m31100| 2015-07-19T23:38:27.119+0000 I CONTROL [initandlisten] options: { net: { http: { RESTInterfaceEnabled: true, enabled: true }, port: 31100 }, replication: { oplogSizeMB: 1024, replSet: "test-rs0" }, setParameter: { enableTestCommands: "1" }, storage: { dbPath: "/data/db/job0/mongorunner/test-rs0-0", engine: "wiredTiger", mmapv1: { preallocDataFiles: false, smallFiles: true } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.121+0000 m31100| 2015-07-19T23:38:27.120+0000 I NETWORK [websvr] admin web console waiting for connections on port 32100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.260+0000 m31100| 2015-07-19T23:38:27.260+0000 I REPL [initandlisten] Did not find local voted for document at startup; NoMatchingDocument Did not find replica set lastVote document in local.replset.election [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.260+0000 m31100| 2015-07-19T23:38:27.260+0000 I REPL [initandlisten] Did not find local replica set configuration document at startup; NoMatchingDocument Did not find replica set configuration document in local.system.replset [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.264+0000 2015-07-19T23:38:27.264+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31100, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.408+0000 m31100| 2015-07-19T23:38:27.408+0000 I NETWORK [initandlisten] waiting for connections on port 31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.464+0000 m31100| 2015-07-19T23:38:27.464+0000 I NETWORK [initandlisten] connection accepted from 127.0.0.1:46330 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.465+0000 [ connection to ip-10-139-123-131:31100 ] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.465+0000 ReplSetTest n is : 1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.466+0000 ReplSetTest n: 1 ports: [ 31100, 31101, 31102 ] 31101 number [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.466+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.466+0000 "useHostName" : true, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.466+0000 "oplogSize" : 1024, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.466+0000 "keyFile" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.466+0000 "port" : 31101, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.467+0000 "noprealloc" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.467+0000 "smallfiles" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.467+0000 "rest" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.467+0000 "replSet" : "test-rs0", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.467+0000 "dbpath" : "$set-$node", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.467+0000 "useHostname" : true, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.467+0000 "noJournalPrealloc" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.467+0000 "pathOpts" : { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.468+0000 "testName" : "test", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.468+0000 "shard" : 0, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.468+0000 "node" : 1, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.468+0000 "set" : "test-rs0" [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.468+0000 }, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.468+0000 "verbose" : 0, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.468+0000 "restart" : undefined [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.468+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.469+0000 ReplSetTest Starting.... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.469+0000 Resetting db path '/data/db/job0/mongorunner/test-rs0-1' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.469+0000 2015-07-19T23:38:27.469+0000 I - [main] shell: started program (sh2980): /data/mci/src/mongod --oplogSize 1024 --port 31101 --noprealloc --smallfiles --rest --replSet test-rs0 --dbpath /data/db/job0/mongorunner/test-rs0-1 --setParameter enableTestCommands=1 --storageEngine wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.469+0000 2015-07-19T23:38:27.469+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31101, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.480+0000 m31101| 2015-07-19T23:38:27.479+0000 I CONTROL [main] ** WARNING: --rest is specified without --httpinterface, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.480+0000 m31101| 2015-07-19T23:38:27.479+0000 I CONTROL [main] ** enabling http interface [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.480+0000 m31101| note: noprealloc may hurt performance in many applications [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.506+0000 m31101| 2015-07-19T23:38:27.506+0000 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=3G,session_max=20000,eviction=(threads_max=4),statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0), [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.670+0000 2015-07-19T23:38:27.670+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31101, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:27.870+0000 2015-07-19T23:38:27.870+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31101, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.071+0000 2015-07-19T23:38:28.071+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31101, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.272+0000 2015-07-19T23:38:28.272+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31101, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.342+0000 m31101| 2015-07-19T23:38:28.342+0000 W STORAGE [initandlisten] Detected configuration for non-active storage engine mmapv1 when current storage engine is wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.342+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] MongoDB starting : pid=2980 port=31101 dbpath=/data/db/job0/mongorunner/test-rs0-1 64-bit host=ip-10-139-123-131 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.342+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.342+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.343+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.343+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.343+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.343+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.343+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] ** We suggest setting it to 'never' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.343+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.344+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.344+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] ** We suggest setting it to 'never' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.344+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.344+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] ** WARNING: soft rlimits too low. rlimits set to 1024 processes, 64000 files. Number of processes should be at least 32000 : 0.5 times number of files. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.344+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.344+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.344+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] git version: 92c6ee5cedf09928720b79592981dc1c6aeb0482 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.344+0000 m31101| 2015-07-19T23:38:28.342+0000 I CONTROL [initandlisten] OpenSSL version: OpenSSL 1.0.1e-fips 11 Feb 2013 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.344+0000 m31101| 2015-07-19T23:38:28.343+0000 I CONTROL [initandlisten] allocator: tcmalloc [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.345+0000 m31101| 2015-07-19T23:38:28.343+0000 I CONTROL [initandlisten] options: { net: { http: { RESTInterfaceEnabled: true, enabled: true }, port: 31101 }, replication: { oplogSizeMB: 1024, replSet: "test-rs0" }, setParameter: { enableTestCommands: "1" }, storage: { dbPath: "/data/db/job0/mongorunner/test-rs0-1", engine: "wiredTiger", mmapv1: { preallocDataFiles: false, smallFiles: true } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.345+0000 m31101| 2015-07-19T23:38:28.343+0000 I NETWORK [websvr] admin web console waiting for connections on port 32101 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.472+0000 2015-07-19T23:38:28.472+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31101, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.474+0000 m31101| 2015-07-19T23:38:28.473+0000 I REPL [initandlisten] Did not find local voted for document at startup; NoMatchingDocument Did not find replica set lastVote document in local.replset.election [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.474+0000 m31101| 2015-07-19T23:38:28.473+0000 I REPL [initandlisten] Did not find local replica set configuration document at startup; NoMatchingDocument Did not find replica set configuration document in local.system.replset [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.477+0000 m31101| 2015-07-19T23:38:28.477+0000 I NETWORK [initandlisten] waiting for connections on port 31101 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.673+0000 m31101| 2015-07-19T23:38:28.673+0000 I NETWORK [initandlisten] connection accepted from 127.0.0.1:35764 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.673+0000 [ [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.674+0000 connection to ip-10-139-123-131:31100, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.674+0000 connection to ip-10-139-123-131:31101 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.674+0000 ] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.674+0000 ReplSetTest n is : 2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.674+0000 ReplSetTest n: 2 ports: [ 31100, 31101, 31102 ] 31102 number [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.674+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.674+0000 "useHostName" : true, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.675+0000 "oplogSize" : 1024, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.675+0000 "keyFile" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.675+0000 "port" : 31102, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.675+0000 "noprealloc" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.675+0000 "smallfiles" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.675+0000 "rest" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.675+0000 "replSet" : "test-rs0", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.675+0000 "dbpath" : "$set-$node", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.675+0000 "useHostname" : true, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.676+0000 "noJournalPrealloc" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.676+0000 "pathOpts" : { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.676+0000 "testName" : "test", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.676+0000 "shard" : 0, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.676+0000 "node" : 2, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.676+0000 "set" : "test-rs0" [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.676+0000 }, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.676+0000 "verbose" : 0, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.676+0000 "restart" : undefined [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.676+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.676+0000 ReplSetTest Starting.... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.676+0000 Resetting db path '/data/db/job0/mongorunner/test-rs0-2' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.677+0000 2015-07-19T23:38:28.675+0000 I - [main] shell: started program (sh3010): /data/mci/src/mongod --oplogSize 1024 --port 31102 --noprealloc --smallfiles --rest --replSet test-rs0 --dbpath /data/db/job0/mongorunner/test-rs0-2 --setParameter enableTestCommands=1 --storageEngine wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.677+0000 2015-07-19T23:38:28.676+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31102, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.686+0000 m31102| 2015-07-19T23:38:28.686+0000 I CONTROL [main] ** WARNING: --rest is specified without --httpinterface, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.686+0000 m31102| 2015-07-19T23:38:28.686+0000 I CONTROL [main] ** enabling http interface [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.687+0000 m31102| note: noprealloc may hurt performance in many applications [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.712+0000 m31102| 2015-07-19T23:38:28.712+0000 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=3G,session_max=20000,eviction=(threads_max=4),statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0), [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.747+0000 m31102| 2015-07-19T23:38:28.746+0000 W STORAGE [initandlisten] Detected configuration for non-active storage engine mmapv1 when current storage engine is wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.747+0000 m31102| 2015-07-19T23:38:28.746+0000 I CONTROL [initandlisten] MongoDB starting : pid=3010 port=31102 dbpath=/data/db/job0/mongorunner/test-rs0-2 64-bit host=ip-10-139-123-131 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.747+0000 m31102| 2015-07-19T23:38:28.746+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.747+0000 m31102| 2015-07-19T23:38:28.746+0000 I CONTROL [initandlisten] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.747+0000 m31102| 2015-07-19T23:38:28.746+0000 I CONTROL [initandlisten] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.748+0000 m31102| 2015-07-19T23:38:28.746+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.748+0000 m31102| 2015-07-19T23:38:28.747+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.748+0000 m31102| 2015-07-19T23:38:28.747+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.748+0000 m31102| 2015-07-19T23:38:28.747+0000 I CONTROL [initandlisten] ** We suggest setting it to 'never' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.748+0000 m31102| 2015-07-19T23:38:28.747+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.748+0000 m31102| 2015-07-19T23:38:28.747+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.748+0000 m31102| 2015-07-19T23:38:28.747+0000 I CONTROL [initandlisten] ** We suggest setting it to 'never' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.749+0000 m31102| 2015-07-19T23:38:28.747+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.749+0000 m31102| 2015-07-19T23:38:28.747+0000 I CONTROL [initandlisten] ** WARNING: soft rlimits too low. rlimits set to 1024 processes, 64000 files. Number of processes should be at least 32000 : 0.5 times number of files. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.749+0000 m31102| 2015-07-19T23:38:28.747+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.749+0000 m31102| 2015-07-19T23:38:28.747+0000 I CONTROL [initandlisten] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.749+0000 m31102| 2015-07-19T23:38:28.747+0000 I CONTROL [initandlisten] git version: 92c6ee5cedf09928720b79592981dc1c6aeb0482 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.749+0000 m31102| 2015-07-19T23:38:28.747+0000 I CONTROL [initandlisten] OpenSSL version: OpenSSL 1.0.1e-fips 11 Feb 2013 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.749+0000 m31102| 2015-07-19T23:38:28.747+0000 I CONTROL [initandlisten] allocator: tcmalloc [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.749+0000 m31102| 2015-07-19T23:38:28.747+0000 I CONTROL [initandlisten] options: { net: { http: { RESTInterfaceEnabled: true, enabled: true }, port: 31102 }, replication: { oplogSizeMB: 1024, replSet: "test-rs0" }, setParameter: { enableTestCommands: "1" }, storage: { dbPath: "/data/db/job0/mongorunner/test-rs0-2", engine: "wiredTiger", mmapv1: { preallocDataFiles: false, smallFiles: true } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.749+0000 m31102| 2015-07-19T23:38:28.748+0000 I NETWORK [websvr] admin web console waiting for connections on port 32102 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.752+0000 m31102| 2015-07-19T23:38:28.752+0000 I REPL [initandlisten] Did not find local voted for document at startup; NoMatchingDocument Did not find replica set lastVote document in local.replset.election [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.752+0000 m31102| 2015-07-19T23:38:28.752+0000 I REPL [initandlisten] Did not find local replica set configuration document at startup; NoMatchingDocument Did not find replica set configuration document in local.system.replset [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.755+0000 m31102| 2015-07-19T23:38:28.755+0000 I NETWORK [initandlisten] waiting for connections on port 31102 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.877+0000 m31102| 2015-07-19T23:38:28.876+0000 I NETWORK [initandlisten] connection accepted from 127.0.0.1:53864 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.877+0000 [ [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.877+0000 connection to ip-10-139-123-131:31100, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.877+0000 connection to ip-10-139-123-131:31101, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.878+0000 connection to ip-10-139-123-131:31102 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.878+0000 ] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.878+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.878+0000 "replSetInitiate" : { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.878+0000 "_id" : "test-rs0", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.878+0000 "members" : [ [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.879+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.879+0000 "_id" : 0, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.879+0000 "host" : "ip-10-139-123-131:31100" [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.879+0000 }, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.879+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.879+0000 "_id" : 1, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.879+0000 "host" : "ip-10-139-123-131:31101" [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.879+0000 }, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.879+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.880+0000 "_id" : 2, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.880+0000 "host" : "ip-10-139-123-131:31102" [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.880+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.880+0000 ] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.880+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.880+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.880+0000 m31100| 2015-07-19T23:38:28.879+0000 I REPL [conn1] replSetInitiate admin command received from client [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.880+0000 m31101| 2015-07-19T23:38:28.880+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46378 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.881+0000 m31101| 2015-07-19T23:38:28.881+0000 I NETWORK [conn2] end connection 10.139.123.131:46378 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.881+0000 m31102| 2015-07-19T23:38:28.881+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:37164 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.881+0000 m31100| 2015-07-19T23:38:28.881+0000 I REPL [conn1] replSetInitiate config object with 3 members parses ok [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.881+0000 m31102| 2015-07-19T23:38:28.881+0000 I NETWORK [conn2] end connection 10.139.123.131:37164 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.882+0000 m31102| 2015-07-19T23:38:28.882+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:37165 #3 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.882+0000 m31101| 2015-07-19T23:38:28.882+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46381 #3 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.883+0000 m31100| 2015-07-19T23:38:28.883+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47450 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.883+0000 m31100| 2015-07-19T23:38:28.883+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47451 #3 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.887+0000 m31100| 2015-07-19T23:38:28.886+0000 I REPL [ReplicationExecutor] New replica set config in use: { _id: "test-rs0", version: 1, members: [ { _id: 0, host: "ip-10-139-123-131:31100", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 1, host: "ip-10-139-123-131:31101", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 2, host: "ip-10-139-123-131:31102", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 }, protocolVersion: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.887+0000 m31100| 2015-07-19T23:38:28.886+0000 I REPL [ReplicationExecutor] This node is ip-10-139-123-131:31100 in the config [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.887+0000 m31100| 2015-07-19T23:38:28.886+0000 I REPL [ReplicationExecutor] transition to STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.887+0000 m31100| 2015-07-19T23:38:28.886+0000 I REPL [conn1] ****** [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.887+0000 m31100| 2015-07-19T23:38:28.886+0000 I REPL [conn1] creating replication oplog of size: 1024MB... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.887+0000 m31100| 2015-07-19T23:38:28.886+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31102 is now in state STARTUP [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.887+0000 m31100| 2015-07-19T23:38:28.887+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31101 is now in state STARTUP [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.888+0000 m31100| 2015-07-19T23:38:28.888+0000 I STORAGE [conn1] Starting WiredTigerRecordStoreThread local.oplog.rs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.912+0000 m31100| 2015-07-19T23:38:28.912+0000 I REPL [conn1] ****** [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.913+0000 m31100| 2015-07-19T23:38:28.913+0000 I REPL [conn1] Starting replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.913+0000 m31100| 2015-07-19T23:38:28.913+0000 I REPL [ReplicationExecutor] transition to RECOVERING [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:28.915+0000 m31100| 2015-07-19T23:38:28.915+0000 I REPL [ReplicationExecutor] transition to SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.885+0000 m31100| 2015-07-19T23:38:30.885+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47453 #4 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.886+0000 m31100| 2015-07-19T23:38:30.885+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47452 #5 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.886+0000 m31100| 2015-07-19T23:38:30.886+0000 I NETWORK [conn4] end connection 10.139.123.131:47453 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.886+0000 m31100| 2015-07-19T23:38:30.886+0000 I NETWORK [conn5] end connection 10.139.123.131:47452 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.886+0000 m31101| 2015-07-19T23:38:30.886+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46386 #4 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.887+0000 m31102| 2015-07-19T23:38:30.886+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:37172 #4 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.887+0000 m31101| 2015-07-19T23:38:30.886+0000 I NETWORK [conn4] end connection 10.139.123.131:46386 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.887+0000 m31102| 2015-07-19T23:38:30.887+0000 I NETWORK [conn4] end connection 10.139.123.131:37172 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.887+0000 m31100| 2015-07-19T23:38:30.887+0000 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.888+0000 m31100| 2015-07-19T23:38:30.888+0000 I REPL [ReplicationExecutor] not electing self, we could not contact enough voting members [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.890+0000 m31102| 2015-07-19T23:38:30.889+0000 I REPL [replExecDBWorker-0] Starting replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.890+0000 m31102| 2015-07-19T23:38:30.890+0000 W REPL [rsSync] did not receive a valid config yet, sleeping 5 seconds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.890+0000 m31101| 2015-07-19T23:38:30.890+0000 I REPL [replExecDBWorker-0] Starting replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.890+0000 m31102| 2015-07-19T23:38:30.890+0000 I REPL [ReplicationExecutor] New replica set config in use: { _id: "test-rs0", version: 1, members: [ { _id: 0, host: "ip-10-139-123-131:31100", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 1, host: "ip-10-139-123-131:31101", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 2, host: "ip-10-139-123-131:31102", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 }, protocolVersion: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.890+0000 m31102| 2015-07-19T23:38:30.890+0000 I REPL [ReplicationExecutor] This node is ip-10-139-123-131:31102 in the config [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.891+0000 m31102| 2015-07-19T23:38:30.890+0000 I REPL [ReplicationExecutor] transition to STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.891+0000 m31102| 2015-07-19T23:38:30.890+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31100 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.891+0000 m31101| 2015-07-19T23:38:30.890+0000 W REPL [rsSync] did not receive a valid config yet, sleeping 5 seconds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.891+0000 m31101| 2015-07-19T23:38:30.891+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46388 #5 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.892+0000 m31101| 2015-07-19T23:38:30.891+0000 I REPL [ReplicationExecutor] New replica set config in use: { _id: "test-rs0", version: 1, members: [ { _id: 0, host: "ip-10-139-123-131:31100", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 1, host: "ip-10-139-123-131:31101", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 2, host: "ip-10-139-123-131:31102", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 }, protocolVersion: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.892+0000 m31101| 2015-07-19T23:38:30.891+0000 I REPL [ReplicationExecutor] This node is ip-10-139-123-131:31101 in the config [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.892+0000 m31101| 2015-07-19T23:38:30.891+0000 I REPL [ReplicationExecutor] transition to STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.892+0000 m31101| 2015-07-19T23:38:30.891+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31100 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.892+0000 m31102| 2015-07-19T23:38:30.892+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31101 is now in state STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.892+0000 m31102| 2015-07-19T23:38:30.892+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:37174 #5 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:30.893+0000 m31101| 2015-07-19T23:38:30.893+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31102 is now in state STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:32.887+0000 m31100| 2015-07-19T23:38:32.887+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31101 is now in state STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:32.888+0000 m31100| 2015-07-19T23:38:32.887+0000 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:32.888+0000 m31100| 2015-07-19T23:38:32.887+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31102 is now in state STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:32.888+0000 m31100| 2015-07-19T23:38:32.888+0000 I REPL [ReplicationExecutor] running for election [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:32.888+0000 m31101| 2015-07-19T23:38:32.888+0000 I REPL [ReplicationExecutor] replSetElect voting yea for ip-10-139-123-131:31100 (0) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:32.888+0000 m31100| 2015-07-19T23:38:32.888+0000 I REPL [ReplicationExecutor] received vote: 1 votes from ip-10-139-123-131:31101 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:32.888+0000 m31100| 2015-07-19T23:38:32.888+0000 I REPL [ReplicationExecutor] election succeeded, assuming primary role [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:32.889+0000 m31100| 2015-07-19T23:38:32.888+0000 I REPL [ReplicationExecutor] transition to PRIMARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:32.889+0000 m31102| 2015-07-19T23:38:32.888+0000 I REPL [ReplicationExecutor] replSetElect voting yea for ip-10-139-123-131:31100 (0) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:32.891+0000 m31102| 2015-07-19T23:38:32.891+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31100 is now in state PRIMARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:32.891+0000 m31101| 2015-07-19T23:38:32.891+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31100 is now in state PRIMARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:32.916+0000 m31100| 2015-07-19T23:38:32.915+0000 I REPL [rsSync] transition to primary complete; database writes are now permitted [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:35.891+0000 m31102| 2015-07-19T23:38:35.890+0000 I REPL [rsSync] ****** [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:35.891+0000 m31102| 2015-07-19T23:38:35.890+0000 I REPL [rsSync] creating replication oplog of size: 1024MB... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:35.891+0000 m31101| 2015-07-19T23:38:35.891+0000 I REPL [rsSync] ****** [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:35.891+0000 m31101| 2015-07-19T23:38:35.891+0000 I REPL [rsSync] creating replication oplog of size: 1024MB... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:35.984+0000 m31102| 2015-07-19T23:38:35.983+0000 I STORAGE [rsSync] Starting WiredTigerRecordStoreThread local.oplog.rs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:35.984+0000 m31101| 2015-07-19T23:38:35.983+0000 I STORAGE [rsSync] Starting WiredTigerRecordStoreThread local.oplog.rs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.173+0000 m31102| 2015-07-19T23:38:37.173+0000 I REPL [rsSync] ****** [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.173+0000 m31102| 2015-07-19T23:38:37.173+0000 I REPL [rsSync] initial sync pending [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.214+0000 m31101| 2015-07-19T23:38:37.214+0000 I REPL [rsSync] ****** [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.215+0000 m31101| 2015-07-19T23:38:37.214+0000 I REPL [rsSync] initial sync pending [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.340+0000 m31102| 2015-07-19T23:38:37.340+0000 I REPL [ReplicationExecutor] syncing from: ip-10-139-123-131:31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.340+0000 m31100| 2015-07-19T23:38:37.340+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47459 #6 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.343+0000 m31102| 2015-07-19T23:38:37.343+0000 I REPL [rsSync] initial sync drop all databases [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.343+0000 m31102| 2015-07-19T23:38:37.343+0000 I STORAGE [rsSync] dropAllDatabasesExceptLocal 1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.343+0000 m31102| 2015-07-19T23:38:37.343+0000 I REPL [rsSync] initial sync clone all databases [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.343+0000 m31102| 2015-07-19T23:38:37.343+0000 I REPL [rsSync] initial sync data copy, starting syncup [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.344+0000 m31102| 2015-07-19T23:38:37.343+0000 I REPL [rsSync] oplog sync 1 of 3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.344+0000 m31102| 2015-07-19T23:38:37.343+0000 I REPL [rsSync] oplog sync 2 of 3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.344+0000 m31102| 2015-07-19T23:38:37.344+0000 I REPL [rsSync] initial sync building indexes [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.344+0000 m31102| 2015-07-19T23:38:37.344+0000 I REPL [rsSync] oplog sync 3 of 3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.346+0000 m31102| 2015-07-19T23:38:37.346+0000 I REPL [rsSync] initial sync finishing up [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.346+0000 m31102| 2015-07-19T23:38:37.346+0000 I REPL [rsSync] set minValid=(term: -1, timestamp: Jul 19 23:38:28:1) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.346+0000 m31102| 2015-07-19T23:38:37.346+0000 I REPL [rsSync] initial sync done [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.348+0000 m31100| 2015-07-19T23:38:37.348+0000 I NETWORK [conn6] end connection 10.139.123.131:47459 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.348+0000 m31102| 2015-07-19T23:38:37.348+0000 I REPL [ReplicationExecutor] transition to RECOVERING [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.350+0000 m31102| 2015-07-19T23:38:37.350+0000 I REPL [ReplicationExecutor] transition to SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.363+0000 m31101| 2015-07-19T23:38:37.363+0000 I REPL [ReplicationExecutor] syncing from: ip-10-139-123-131:31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.364+0000 m31100| 2015-07-19T23:38:37.363+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47460 #7 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.366+0000 m31101| 2015-07-19T23:38:37.366+0000 I REPL [rsSync] initial sync drop all databases [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.366+0000 m31101| 2015-07-19T23:38:37.366+0000 I STORAGE [rsSync] dropAllDatabasesExceptLocal 1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.366+0000 m31101| 2015-07-19T23:38:37.366+0000 I REPL [rsSync] initial sync clone all databases [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.366+0000 m31101| 2015-07-19T23:38:37.366+0000 I REPL [rsSync] initial sync data copy, starting syncup [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.366+0000 m31101| 2015-07-19T23:38:37.366+0000 I REPL [rsSync] oplog sync 1 of 3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.367+0000 m31101| 2015-07-19T23:38:37.366+0000 I REPL [rsSync] oplog sync 2 of 3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.367+0000 m31101| 2015-07-19T23:38:37.367+0000 I REPL [rsSync] initial sync building indexes [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.367+0000 m31101| 2015-07-19T23:38:37.367+0000 I REPL [rsSync] oplog sync 3 of 3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.369+0000 m31101| 2015-07-19T23:38:37.369+0000 I REPL [rsSync] initial sync finishing up [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.369+0000 m31101| 2015-07-19T23:38:37.369+0000 I REPL [rsSync] set minValid=(term: -1, timestamp: Jul 19 23:38:28:1) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.369+0000 m31101| 2015-07-19T23:38:37.369+0000 I REPL [rsSync] initial sync done [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.371+0000 m31100| 2015-07-19T23:38:37.371+0000 I NETWORK [conn7] end connection 10.139.123.131:47460 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.371+0000 m31101| 2015-07-19T23:38:37.371+0000 I REPL [ReplicationExecutor] transition to RECOVERING [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.372+0000 m31101| 2015-07-19T23:38:37.372+0000 I REPL [ReplicationExecutor] transition to SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.379+0000 Replica set test! [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.379+0000 ReplSetTest Starting Set [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.379+0000 ReplSetTest n is : 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.380+0000 ReplSetTest n: 0 ports: [ 31200, 31201, 31202 ] 31200 number [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.380+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.380+0000 "useHostName" : true, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.380+0000 "oplogSize" : 1024, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.380+0000 "keyFile" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.380+0000 "port" : 31200, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.380+0000 "noprealloc" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.381+0000 "smallfiles" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.381+0000 "rest" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.381+0000 "replSet" : "test-rs1", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.381+0000 "dbpath" : "$set-$node", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.381+0000 "useHostname" : true, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.381+0000 "noJournalPrealloc" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.381+0000 "pathOpts" : { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.381+0000 "testName" : "test", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.382+0000 "shard" : 1, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.382+0000 "node" : 0, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.382+0000 "set" : "test-rs1" [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.382+0000 }, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.382+0000 "verbose" : 0, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.382+0000 "restart" : undefined [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.382+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.382+0000 ReplSetTest Starting.... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.382+0000 Resetting db path '/data/db/job0/mongorunner/test-rs1-0' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.383+0000 2015-07-19T23:38:37.382+0000 I - [main] shell: started program (sh3304): /data/mci/src/mongod --oplogSize 1024 --port 31200 --noprealloc --smallfiles --rest --replSet test-rs1 --dbpath /data/db/job0/mongorunner/test-rs1-0 --setParameter enableTestCommands=1 --storageEngine wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.383+0000 2015-07-19T23:38:37.382+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31200, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.393+0000 m31200| 2015-07-19T23:38:37.392+0000 I CONTROL [main] ** WARNING: --rest is specified without --httpinterface, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.393+0000 m31200| 2015-07-19T23:38:37.392+0000 I CONTROL [main] ** enabling http interface [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.393+0000 m31200| note: noprealloc may hurt performance in many applications [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.419+0000 m31200| 2015-07-19T23:38:37.418+0000 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=3G,session_max=20000,eviction=(threads_max=4),statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0), [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.583+0000 2015-07-19T23:38:37.583+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31200, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.783+0000 2015-07-19T23:38:37.783+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31200, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.891+0000 m31102| 2015-07-19T23:38:37.891+0000 I REPL [ReplicationExecutor] could not find member to sync from [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.892+0000 m31101| 2015-07-19T23:38:37.892+0000 I REPL [ReplicationExecutor] could not find member to sync from [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:37.984+0000 2015-07-19T23:38:37.984+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31200, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.185+0000 2015-07-19T23:38:38.184+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31200, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.385+0000 2015-07-19T23:38:38.385+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31200, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.440+0000 m31200| 2015-07-19T23:38:38.440+0000 W STORAGE [initandlisten] Detected configuration for non-active storage engine mmapv1 when current storage engine is wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.441+0000 m31200| 2015-07-19T23:38:38.440+0000 I CONTROL [initandlisten] MongoDB starting : pid=3304 port=31200 dbpath=/data/db/job0/mongorunner/test-rs1-0 64-bit host=ip-10-139-123-131 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.441+0000 m31200| 2015-07-19T23:38:38.440+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.441+0000 m31200| 2015-07-19T23:38:38.440+0000 I CONTROL [initandlisten] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.441+0000 m31200| 2015-07-19T23:38:38.440+0000 I CONTROL [initandlisten] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.441+0000 m31200| 2015-07-19T23:38:38.440+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.441+0000 m31200| 2015-07-19T23:38:38.440+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.442+0000 m31200| 2015-07-19T23:38:38.440+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.442+0000 m31200| 2015-07-19T23:38:38.440+0000 I CONTROL [initandlisten] ** We suggest setting it to 'never' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.442+0000 m31200| 2015-07-19T23:38:38.441+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.442+0000 m31200| 2015-07-19T23:38:38.441+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.442+0000 m31200| 2015-07-19T23:38:38.441+0000 I CONTROL [initandlisten] ** We suggest setting it to 'never' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.442+0000 m31200| 2015-07-19T23:38:38.441+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.442+0000 m31200| 2015-07-19T23:38:38.441+0000 I CONTROL [initandlisten] ** WARNING: soft rlimits too low. rlimits set to 1024 processes, 64000 files. Number of processes should be at least 32000 : 0.5 times number of files. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.443+0000 m31200| 2015-07-19T23:38:38.441+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.443+0000 m31200| 2015-07-19T23:38:38.441+0000 I CONTROL [initandlisten] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.443+0000 m31200| 2015-07-19T23:38:38.441+0000 I CONTROL [initandlisten] git version: 92c6ee5cedf09928720b79592981dc1c6aeb0482 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.443+0000 m31200| 2015-07-19T23:38:38.441+0000 I CONTROL [initandlisten] OpenSSL version: OpenSSL 1.0.1e-fips 11 Feb 2013 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.443+0000 m31200| 2015-07-19T23:38:38.441+0000 I CONTROL [initandlisten] allocator: tcmalloc [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.443+0000 m31200| 2015-07-19T23:38:38.441+0000 I CONTROL [initandlisten] options: { net: { http: { RESTInterfaceEnabled: true, enabled: true }, port: 31200 }, replication: { oplogSizeMB: 1024, replSet: "test-rs1" }, setParameter: { enableTestCommands: "1" }, storage: { dbPath: "/data/db/job0/mongorunner/test-rs1-0", engine: "wiredTiger", mmapv1: { preallocDataFiles: false, smallFiles: true } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.443+0000 m31200| 2015-07-19T23:38:38.442+0000 I NETWORK [websvr] admin web console waiting for connections on port 32200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.586+0000 2015-07-19T23:38:38.586+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31200, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.685+0000 m31200| 2015-07-19T23:38:38.685+0000 I REPL [initandlisten] Did not find local voted for document at startup; NoMatchingDocument Did not find replica set lastVote document in local.replset.election [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.685+0000 m31200| 2015-07-19T23:38:38.685+0000 I REPL [initandlisten] Did not find local replica set configuration document at startup; NoMatchingDocument Did not find replica set configuration document in local.system.replset [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.786+0000 2015-07-19T23:38:38.786+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31200, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.871+0000 m31200| 2015-07-19T23:38:38.870+0000 I NETWORK [initandlisten] waiting for connections on port 31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.888+0000 m31100| 2015-07-19T23:38:38.888+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31101 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.889+0000 m31100| 2015-07-19T23:38:38.888+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31102 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.893+0000 m31102| 2015-07-19T23:38:38.893+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31101 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.895+0000 m31101| 2015-07-19T23:38:38.895+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31102 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.987+0000 m31200| 2015-07-19T23:38:38.987+0000 I NETWORK [initandlisten] connection accepted from 127.0.0.1:40634 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.987+0000 [ connection to ip-10-139-123-131:31200 ] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.988+0000 ReplSetTest n is : 1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.988+0000 ReplSetTest n: 1 ports: [ 31200, 31201, 31202 ] 31201 number [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.988+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.988+0000 "useHostName" : true, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.988+0000 "oplogSize" : 1024, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.988+0000 "keyFile" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.988+0000 "port" : 31201, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.989+0000 "noprealloc" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.989+0000 "smallfiles" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.989+0000 "rest" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.989+0000 "replSet" : "test-rs1", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.989+0000 "dbpath" : "$set-$node", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.989+0000 "useHostname" : true, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.989+0000 "noJournalPrealloc" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.989+0000 "pathOpts" : { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.990+0000 "testName" : "test", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.990+0000 "shard" : 1, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.990+0000 "node" : 1, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.990+0000 "set" : "test-rs1" [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.990+0000 }, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.990+0000 "verbose" : 0, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.990+0000 "restart" : undefined [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.990+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.990+0000 ReplSetTest Starting.... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.990+0000 Resetting db path '/data/db/job0/mongorunner/test-rs1-1' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.990+0000 2015-07-19T23:38:38.990+0000 I - [main] shell: started program (sh3336): /data/mci/src/mongod --oplogSize 1024 --port 31201 --noprealloc --smallfiles --rest --replSet test-rs1 --dbpath /data/db/job0/mongorunner/test-rs1-1 --setParameter enableTestCommands=1 --storageEngine wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:38.991+0000 2015-07-19T23:38:38.990+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31201, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.000+0000 m31201| 2015-07-19T23:38:39.000+0000 I CONTROL [main] ** WARNING: --rest is specified without --httpinterface, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.001+0000 m31201| 2015-07-19T23:38:39.000+0000 I CONTROL [main] ** enabling http interface [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.001+0000 m31201| note: noprealloc may hurt performance in many applications [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.026+0000 m31201| 2015-07-19T23:38:39.026+0000 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=3G,session_max=20000,eviction=(threads_max=4),statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0), [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.191+0000 2015-07-19T23:38:39.190+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31201, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.391+0000 2015-07-19T23:38:39.391+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31201, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.592+0000 2015-07-19T23:38:39.592+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31201, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.792+0000 2015-07-19T23:38:39.792+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31201, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.883+0000 m31201| 2015-07-19T23:38:39.883+0000 W STORAGE [initandlisten] Detected configuration for non-active storage engine mmapv1 when current storage engine is wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.884+0000 m31201| 2015-07-19T23:38:39.883+0000 I CONTROL [initandlisten] MongoDB starting : pid=3336 port=31201 dbpath=/data/db/job0/mongorunner/test-rs1-1 64-bit host=ip-10-139-123-131 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.884+0000 m31201| 2015-07-19T23:38:39.883+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.884+0000 m31201| 2015-07-19T23:38:39.883+0000 I CONTROL [initandlisten] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.884+0000 m31201| 2015-07-19T23:38:39.883+0000 I CONTROL [initandlisten] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.884+0000 m31201| 2015-07-19T23:38:39.883+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.884+0000 m31201| 2015-07-19T23:38:39.883+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.885+0000 m31201| 2015-07-19T23:38:39.883+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.885+0000 m31201| 2015-07-19T23:38:39.883+0000 I CONTROL [initandlisten] ** We suggest setting it to 'never' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.885+0000 m31201| 2015-07-19T23:38:39.884+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.885+0000 m31201| 2015-07-19T23:38:39.884+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.885+0000 m31201| 2015-07-19T23:38:39.884+0000 I CONTROL [initandlisten] ** We suggest setting it to 'never' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.885+0000 m31201| 2015-07-19T23:38:39.884+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.886+0000 m31201| 2015-07-19T23:38:39.884+0000 I CONTROL [initandlisten] ** WARNING: soft rlimits too low. rlimits set to 1024 processes, 64000 files. Number of processes should be at least 32000 : 0.5 times number of files. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.886+0000 m31201| 2015-07-19T23:38:39.884+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.886+0000 m31201| 2015-07-19T23:38:39.884+0000 I CONTROL [initandlisten] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.886+0000 m31201| 2015-07-19T23:38:39.884+0000 I CONTROL [initandlisten] git version: 92c6ee5cedf09928720b79592981dc1c6aeb0482 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.886+0000 m31201| 2015-07-19T23:38:39.884+0000 I CONTROL [initandlisten] OpenSSL version: OpenSSL 1.0.1e-fips 11 Feb 2013 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.886+0000 m31201| 2015-07-19T23:38:39.884+0000 I CONTROL [initandlisten] allocator: tcmalloc [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.886+0000 m31201| 2015-07-19T23:38:39.884+0000 I CONTROL [initandlisten] options: { net: { http: { RESTInterfaceEnabled: true, enabled: true }, port: 31201 }, replication: { oplogSizeMB: 1024, replSet: "test-rs1" }, setParameter: { enableTestCommands: "1" }, storage: { dbPath: "/data/db/job0/mongorunner/test-rs1-1", engine: "wiredTiger", mmapv1: { preallocDataFiles: false, smallFiles: true } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.886+0000 m31201| 2015-07-19T23:38:39.885+0000 I NETWORK [websvr] admin web console waiting for connections on port 32201 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.895+0000 m31201| 2015-07-19T23:38:39.894+0000 I REPL [initandlisten] Did not find local voted for document at startup; NoMatchingDocument Did not find replica set lastVote document in local.replset.election [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.895+0000 m31201| 2015-07-19T23:38:39.894+0000 I REPL [initandlisten] Did not find local replica set configuration document at startup; NoMatchingDocument Did not find replica set configuration document in local.system.replset [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.898+0000 m31201| 2015-07-19T23:38:39.898+0000 I NETWORK [initandlisten] waiting for connections on port 31201 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.993+0000 m31201| 2015-07-19T23:38:39.993+0000 I NETWORK [initandlisten] connection accepted from 127.0.0.1:33124 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.994+0000 [ [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.994+0000 connection to ip-10-139-123-131:31200, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.994+0000 connection to ip-10-139-123-131:31201 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.994+0000 ] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.994+0000 ReplSetTest n is : 2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.994+0000 ReplSetTest n: 2 ports: [ 31200, 31201, 31202 ] 31202 number [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.994+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.995+0000 "useHostName" : true, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.995+0000 "oplogSize" : 1024, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.995+0000 "keyFile" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.995+0000 "port" : 31202, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.995+0000 "noprealloc" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.995+0000 "smallfiles" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.995+0000 "rest" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.995+0000 "replSet" : "test-rs1", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.995+0000 "dbpath" : "$set-$node", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.996+0000 "useHostname" : true, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.996+0000 "noJournalPrealloc" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.996+0000 "pathOpts" : { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.996+0000 "testName" : "test", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.996+0000 "shard" : 1, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.996+0000 "node" : 2, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.996+0000 "set" : "test-rs1" [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.996+0000 }, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.997+0000 "verbose" : 0, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.997+0000 "restart" : undefined [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.997+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.997+0000 ReplSetTest Starting.... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.997+0000 Resetting db path '/data/db/job0/mongorunner/test-rs1-2' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.997+0000 2015-07-19T23:38:39.996+0000 I - [main] shell: started program (sh3365): /data/mci/src/mongod --oplogSize 1024 --port 31202 --noprealloc --smallfiles --rest --replSet test-rs1 --dbpath /data/db/job0/mongorunner/test-rs1-2 --setParameter enableTestCommands=1 --storageEngine wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:39.997+0000 2015-07-19T23:38:39.996+0000 W NETWORK [main] Failed to connect to 127.0.0.1:31202, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.007+0000 m31202| 2015-07-19T23:38:40.006+0000 I CONTROL [main] ** WARNING: --rest is specified without --httpinterface, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.007+0000 m31202| 2015-07-19T23:38:40.006+0000 I CONTROL [main] ** enabling http interface [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.007+0000 m31202| note: noprealloc may hurt performance in many applications [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.033+0000 m31202| 2015-07-19T23:38:40.032+0000 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=3G,session_max=20000,eviction=(threads_max=4),statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0), [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.065+0000 m31202| 2015-07-19T23:38:40.064+0000 W STORAGE [initandlisten] Detected configuration for non-active storage engine mmapv1 when current storage engine is wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.065+0000 m31202| 2015-07-19T23:38:40.064+0000 I CONTROL [initandlisten] MongoDB starting : pid=3365 port=31202 dbpath=/data/db/job0/mongorunner/test-rs1-2 64-bit host=ip-10-139-123-131 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.065+0000 m31202| 2015-07-19T23:38:40.064+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.065+0000 m31202| 2015-07-19T23:38:40.064+0000 I CONTROL [initandlisten] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.066+0000 m31202| 2015-07-19T23:38:40.065+0000 I CONTROL [initandlisten] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.066+0000 m31202| 2015-07-19T23:38:40.065+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.066+0000 m31202| 2015-07-19T23:38:40.065+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.066+0000 m31202| 2015-07-19T23:38:40.065+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.066+0000 m31202| 2015-07-19T23:38:40.065+0000 I CONTROL [initandlisten] ** We suggest setting it to 'never' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.066+0000 m31202| 2015-07-19T23:38:40.065+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.067+0000 m31202| 2015-07-19T23:38:40.065+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.067+0000 m31202| 2015-07-19T23:38:40.065+0000 I CONTROL [initandlisten] ** We suggest setting it to 'never' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.067+0000 m31202| 2015-07-19T23:38:40.065+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.067+0000 m31202| 2015-07-19T23:38:40.065+0000 I CONTROL [initandlisten] ** WARNING: soft rlimits too low. rlimits set to 1024 processes, 64000 files. Number of processes should be at least 32000 : 0.5 times number of files. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.067+0000 m31202| 2015-07-19T23:38:40.065+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.067+0000 m31202| 2015-07-19T23:38:40.065+0000 I CONTROL [initandlisten] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.067+0000 m31202| 2015-07-19T23:38:40.065+0000 I CONTROL [initandlisten] git version: 92c6ee5cedf09928720b79592981dc1c6aeb0482 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.067+0000 m31202| 2015-07-19T23:38:40.065+0000 I CONTROL [initandlisten] OpenSSL version: OpenSSL 1.0.1e-fips 11 Feb 2013 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.067+0000 m31202| 2015-07-19T23:38:40.065+0000 I CONTROL [initandlisten] allocator: tcmalloc [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.068+0000 m31202| 2015-07-19T23:38:40.065+0000 I CONTROL [initandlisten] options: { net: { http: { RESTInterfaceEnabled: true, enabled: true }, port: 31202 }, replication: { oplogSizeMB: 1024, replSet: "test-rs1" }, setParameter: { enableTestCommands: "1" }, storage: { dbPath: "/data/db/job0/mongorunner/test-rs1-2", engine: "wiredTiger", mmapv1: { preallocDataFiles: false, smallFiles: true } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.068+0000 m31202| 2015-07-19T23:38:40.066+0000 I NETWORK [websvr] admin web console waiting for connections on port 32202 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.069+0000 m31202| 2015-07-19T23:38:40.069+0000 I REPL [initandlisten] Did not find local voted for document at startup; NoMatchingDocument Did not find replica set lastVote document in local.replset.election [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.070+0000 m31202| 2015-07-19T23:38:40.069+0000 I REPL [initandlisten] Did not find local replica set configuration document at startup; NoMatchingDocument Did not find replica set configuration document in local.system.replset [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.072+0000 m31202| 2015-07-19T23:38:40.072+0000 I NETWORK [initandlisten] waiting for connections on port 31202 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.197+0000 m31202| 2015-07-19T23:38:40.197+0000 I NETWORK [initandlisten] connection accepted from 127.0.0.1:38330 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.198+0000 [ [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.198+0000 connection to ip-10-139-123-131:31200, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.198+0000 connection to ip-10-139-123-131:31201, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.198+0000 connection to ip-10-139-123-131:31202 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.198+0000 ] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.198+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.198+0000 "replSetInitiate" : { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.198+0000 "_id" : "test-rs1", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.198+0000 "members" : [ [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.199+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.199+0000 "_id" : 0, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.199+0000 "host" : "ip-10-139-123-131:31200" [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.199+0000 }, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.199+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.199+0000 "_id" : 1, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.199+0000 "host" : "ip-10-139-123-131:31201" [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.199+0000 }, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.199+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.199+0000 "_id" : 2, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.199+0000 "host" : "ip-10-139-123-131:31202" [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.199+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.200+0000 ] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.200+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.200+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.200+0000 m31200| 2015-07-19T23:38:40.198+0000 I REPL [conn1] replSetInitiate admin command received from client [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.200+0000 m31201| 2015-07-19T23:38:40.200+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:35680 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.200+0000 m31202| 2015-07-19T23:38:40.200+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:41715 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.201+0000 m31201| 2015-07-19T23:38:40.200+0000 I NETWORK [conn2] end connection 10.139.123.131:35680 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.201+0000 m31200| 2015-07-19T23:38:40.200+0000 I REPL [conn1] replSetInitiate config object with 3 members parses ok [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.201+0000 m31202| 2015-07-19T23:38:40.201+0000 I NETWORK [conn2] end connection 10.139.123.131:41715 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.201+0000 m31201| 2015-07-19T23:38:40.201+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:35682 #3 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.201+0000 m31202| 2015-07-19T23:38:40.201+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:41717 #3 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.202+0000 m31200| 2015-07-19T23:38:40.202+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39319 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.202+0000 m31200| 2015-07-19T23:38:40.202+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39320 #3 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.205+0000 m31200| 2015-07-19T23:38:40.205+0000 I REPL [ReplicationExecutor] New replica set config in use: { _id: "test-rs1", version: 1, members: [ { _id: 0, host: "ip-10-139-123-131:31200", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 1, host: "ip-10-139-123-131:31201", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 2, host: "ip-10-139-123-131:31202", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 }, protocolVersion: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.206+0000 m31200| 2015-07-19T23:38:40.205+0000 I REPL [ReplicationExecutor] This node is ip-10-139-123-131:31200 in the config [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.206+0000 m31200| 2015-07-19T23:38:40.205+0000 I REPL [ReplicationExecutor] transition to STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.206+0000 m31200| 2015-07-19T23:38:40.205+0000 I REPL [conn1] ****** [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.206+0000 m31200| 2015-07-19T23:38:40.205+0000 I REPL [conn1] creating replication oplog of size: 1024MB... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.206+0000 m31200| 2015-07-19T23:38:40.205+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31201 is now in state STARTUP [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.206+0000 m31200| 2015-07-19T23:38:40.205+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31202 is now in state STARTUP [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.207+0000 m31200| 2015-07-19T23:38:40.207+0000 I STORAGE [conn1] Starting WiredTigerRecordStoreThread local.oplog.rs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.224+0000 m31200| 2015-07-19T23:38:40.224+0000 I REPL [conn1] ****** [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.225+0000 m31200| 2015-07-19T23:38:40.225+0000 I REPL [conn1] Starting replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.225+0000 m31200| 2015-07-19T23:38:40.225+0000 I REPL [ReplicationExecutor] transition to RECOVERING [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:40.227+0000 m31200| 2015-07-19T23:38:40.227+0000 I REPL [ReplicationExecutor] transition to SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.205+0000 m31200| 2015-07-19T23:38:42.205+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39321 #4 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.205+0000 m31200| 2015-07-19T23:38:42.205+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39322 #5 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.206+0000 m31200| 2015-07-19T23:38:42.205+0000 I NETWORK [conn5] end connection 10.139.123.131:39322 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.206+0000 m31201| 2015-07-19T23:38:42.205+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:35688 #4 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.206+0000 m31201| 2015-07-19T23:38:42.206+0000 I NETWORK [conn4] end connection 10.139.123.131:35688 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.206+0000 m31202| 2015-07-19T23:38:42.206+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:41723 #4 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.206+0000 m31200| 2015-07-19T23:38:42.205+0000 I NETWORK [conn4] end connection 10.139.123.131:39321 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.207+0000 m31200| 2015-07-19T23:38:42.206+0000 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.207+0000 m31200| 2015-07-19T23:38:42.206+0000 I REPL [ReplicationExecutor] not electing self, we could not contact enough voting members [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.207+0000 m31202| 2015-07-19T23:38:42.207+0000 I NETWORK [conn4] end connection 10.139.123.131:41723 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.210+0000 m31202| 2015-07-19T23:38:42.210+0000 I REPL [replExecDBWorker-2] Starting replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.211+0000 m31202| 2015-07-19T23:38:42.210+0000 W REPL [rsSync] did not receive a valid config yet, sleeping 5 seconds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.211+0000 m31201| 2015-07-19T23:38:42.211+0000 I REPL [replExecDBWorker-2] Starting replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.211+0000 m31202| 2015-07-19T23:38:42.211+0000 I REPL [ReplicationExecutor] New replica set config in use: { _id: "test-rs1", version: 1, members: [ { _id: 0, host: "ip-10-139-123-131:31200", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 1, host: "ip-10-139-123-131:31201", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 2, host: "ip-10-139-123-131:31202", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 }, protocolVersion: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.211+0000 m31202| 2015-07-19T23:38:42.211+0000 I REPL [ReplicationExecutor] This node is ip-10-139-123-131:31202 in the config [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.212+0000 m31202| 2015-07-19T23:38:42.211+0000 I REPL [ReplicationExecutor] transition to STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.212+0000 m31201| 2015-07-19T23:38:42.211+0000 W REPL [rsSync] did not receive a valid config yet, sleeping 5 seconds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.212+0000 m31202| 2015-07-19T23:38:42.211+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31200 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.212+0000 m31201| 2015-07-19T23:38:42.211+0000 I REPL [ReplicationExecutor] New replica set config in use: { _id: "test-rs1", version: 1, members: [ { _id: 0, host: "ip-10-139-123-131:31200", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 1, host: "ip-10-139-123-131:31201", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 2, host: "ip-10-139-123-131:31202", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 }, protocolVersion: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.212+0000 m31201| 2015-07-19T23:38:42.211+0000 I REPL [ReplicationExecutor] This node is ip-10-139-123-131:31201 in the config [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.212+0000 m31201| 2015-07-19T23:38:42.211+0000 I REPL [ReplicationExecutor] transition to STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.212+0000 m31201| 2015-07-19T23:38:42.211+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31200 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.213+0000 m31201| 2015-07-19T23:38:42.211+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:35690 #5 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.213+0000 m31202| 2015-07-19T23:38:42.212+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:41725 #5 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.213+0000 m31202| 2015-07-19T23:38:42.212+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31201 is now in state STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:42.213+0000 m31201| 2015-07-19T23:38:42.212+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31202 is now in state STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:44.207+0000 m31200| 2015-07-19T23:38:44.207+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31202 is now in state STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:44.208+0000 m31200| 2015-07-19T23:38:44.207+0000 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:44.208+0000 m31200| 2015-07-19T23:38:44.207+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31201 is now in state STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:44.208+0000 m31200| 2015-07-19T23:38:44.207+0000 I REPL [ReplicationExecutor] running for election [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:44.208+0000 m31201| 2015-07-19T23:38:44.207+0000 I REPL [ReplicationExecutor] replSetElect voting yea for ip-10-139-123-131:31200 (0) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:44.208+0000 m31202| 2015-07-19T23:38:44.208+0000 I REPL [ReplicationExecutor] replSetElect voting yea for ip-10-139-123-131:31200 (0) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:44.208+0000 m31200| 2015-07-19T23:38:44.208+0000 I REPL [ReplicationExecutor] received vote: 1 votes from ip-10-139-123-131:31201 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:44.208+0000 m31200| 2015-07-19T23:38:44.208+0000 I REPL [ReplicationExecutor] election succeeded, assuming primary role [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:44.208+0000 m31200| 2015-07-19T23:38:44.208+0000 I REPL [ReplicationExecutor] transition to PRIMARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:44.211+0000 m31202| 2015-07-19T23:38:44.211+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31200 is now in state PRIMARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:44.212+0000 m31201| 2015-07-19T23:38:44.212+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31200 is now in state PRIMARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:44.227+0000 m31200| 2015-07-19T23:38:44.227+0000 I REPL [rsSync] transition to primary complete; database writes are now permitted [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.211+0000 m31202| 2015-07-19T23:38:47.211+0000 I REPL [rsSync] ****** [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.211+0000 m31202| 2015-07-19T23:38:47.211+0000 I REPL [rsSync] creating replication oplog of size: 1024MB... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.211+0000 m31201| 2015-07-19T23:38:47.211+0000 I REPL [rsSync] ****** [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.212+0000 m31201| 2015-07-19T23:38:47.211+0000 I REPL [rsSync] creating replication oplog of size: 1024MB... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.213+0000 m31202| 2015-07-19T23:38:47.212+0000 I STORAGE [rsSync] Starting WiredTigerRecordStoreThread local.oplog.rs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.213+0000 m31201| 2015-07-19T23:38:47.213+0000 I STORAGE [rsSync] Starting WiredTigerRecordStoreThread local.oplog.rs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.267+0000 m31202| 2015-07-19T23:38:47.267+0000 I REPL [rsSync] ****** [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.267+0000 m31202| 2015-07-19T23:38:47.267+0000 I REPL [rsSync] initial sync pending [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.272+0000 m31201| 2015-07-19T23:38:47.272+0000 I REPL [rsSync] ****** [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.272+0000 m31201| 2015-07-19T23:38:47.272+0000 I REPL [rsSync] initial sync pending [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.273+0000 m31202| 2015-07-19T23:38:47.272+0000 I REPL [ReplicationExecutor] syncing from: ip-10-139-123-131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.273+0000 m31200| 2015-07-19T23:38:47.273+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39328 #6 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.276+0000 m31202| 2015-07-19T23:38:47.275+0000 I REPL [rsSync] initial sync drop all databases [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.276+0000 m31202| 2015-07-19T23:38:47.275+0000 I STORAGE [rsSync] dropAllDatabasesExceptLocal 1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.276+0000 m31202| 2015-07-19T23:38:47.276+0000 I REPL [rsSync] initial sync clone all databases [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.276+0000 m31202| 2015-07-19T23:38:47.276+0000 I REPL [rsSync] initial sync data copy, starting syncup [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.276+0000 m31202| 2015-07-19T23:38:47.276+0000 I REPL [rsSync] oplog sync 1 of 3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.277+0000 m31202| 2015-07-19T23:38:47.277+0000 I REPL [rsSync] oplog sync 2 of 3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.278+0000 m31201| 2015-07-19T23:38:47.277+0000 I REPL [ReplicationExecutor] syncing from: ip-10-139-123-131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.278+0000 m31202| 2015-07-19T23:38:47.278+0000 I REPL [rsSync] initial sync building indexes [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.278+0000 m31202| 2015-07-19T23:38:47.278+0000 I REPL [rsSync] oplog sync 3 of 3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.278+0000 m31200| 2015-07-19T23:38:47.278+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39329 #7 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.280+0000 m31202| 2015-07-19T23:38:47.280+0000 I REPL [rsSync] initial sync finishing up [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.280+0000 m31202| 2015-07-19T23:38:47.280+0000 I REPL [rsSync] set minValid=(term: -1, timestamp: Jul 19 23:38:40:1) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.280+0000 m31202| 2015-07-19T23:38:47.280+0000 I REPL [rsSync] initial sync done [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.280+0000 m31201| 2015-07-19T23:38:47.280+0000 I REPL [rsSync] initial sync drop all databases [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.281+0000 m31201| 2015-07-19T23:38:47.280+0000 I STORAGE [rsSync] dropAllDatabasesExceptLocal 1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.281+0000 m31201| 2015-07-19T23:38:47.280+0000 I REPL [rsSync] initial sync clone all databases [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.281+0000 m31201| 2015-07-19T23:38:47.281+0000 I REPL [rsSync] initial sync data copy, starting syncup [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.281+0000 m31201| 2015-07-19T23:38:47.281+0000 I REPL [rsSync] oplog sync 1 of 3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.281+0000 m31201| 2015-07-19T23:38:47.281+0000 I REPL [rsSync] oplog sync 2 of 3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.283+0000 m31202| 2015-07-19T23:38:47.282+0000 I REPL [ReplicationExecutor] transition to RECOVERING [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.283+0000 m31200| 2015-07-19T23:38:47.282+0000 I NETWORK [conn6] end connection 10.139.123.131:39328 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.283+0000 m31201| 2015-07-19T23:38:47.281+0000 I REPL [rsSync] initial sync building indexes [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.283+0000 m31201| 2015-07-19T23:38:47.281+0000 I REPL [rsSync] oplog sync 3 of 3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.283+0000 m31201| 2015-07-19T23:38:47.283+0000 I REPL [rsSync] initial sync finishing up [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.283+0000 m31201| 2015-07-19T23:38:47.283+0000 I REPL [rsSync] set minValid=(term: -1, timestamp: Jul 19 23:38:40:1) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.284+0000 m31201| 2015-07-19T23:38:47.283+0000 I REPL [rsSync] initial sync done [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.284+0000 m31202| 2015-07-19T23:38:47.284+0000 I REPL [ReplicationExecutor] transition to SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.286+0000 m31201| 2015-07-19T23:38:47.285+0000 I REPL [ReplicationExecutor] transition to RECOVERING [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.286+0000 m31200| 2015-07-19T23:38:47.285+0000 I NETWORK [conn7] end connection 10.139.123.131:39329 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.287+0000 m31201| 2015-07-19T23:38:47.287+0000 I REPL [ReplicationExecutor] transition to SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.490+0000 2015-07-19T23:38:47.490+0000 I NETWORK [main] starting new replica set monitor for replica set test-rs0 with seeds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.491+0000 2015-07-19T23:38:47.490+0000 I NETWORK [main] ip-10-139-123-131:31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.491+0000 2015-07-19T23:38:47.490+0000 I NETWORK [main] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.491+0000 2015-07-19T23:38:47.490+0000 I NETWORK [main] ip-10-139-123-131:31101 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.491+0000 2015-07-19T23:38:47.490+0000 I NETWORK [main] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.491+0000 2015-07-19T23:38:47.490+0000 I NETWORK [main] ip-10-139-123-131:31102 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.491+0000 2015-07-19T23:38:47.490+0000 I NETWORK [ReplicaSetMonitorWatcher] starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.491+0000 m31100| 2015-07-19T23:38:47.491+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47494 #8 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.498+0000 2015-07-19T23:38:47.498+0000 I NETWORK [main] starting new replica set monitor for replica set test-rs1 with seeds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.498+0000 2015-07-19T23:38:47.498+0000 I NETWORK [main] ip-10-139-123-131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.498+0000 2015-07-19T23:38:47.498+0000 I NETWORK [main] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.499+0000 2015-07-19T23:38:47.498+0000 I NETWORK [main] ip-10-139-123-131:31201 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.499+0000 2015-07-19T23:38:47.498+0000 I NETWORK [main] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.499+0000 2015-07-19T23:38:47.498+0000 I NETWORK [main] ip-10-139-123-131:31202 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.499+0000 m31200| 2015-07-19T23:38:47.499+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39331 #8 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.499+0000 ReplSetTest Starting Set [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.499+0000 ReplSetTest n is : 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.500+0000 ReplSetTest n: 0 ports: [ 29000 ] 29000 number [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.506+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.506+0000 "useHostName" : true, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.506+0000 "oplogSize" : 40, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.506+0000 "keyFile" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.506+0000 "port" : 29000, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.506+0000 "noprealloc" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.506+0000 "smallfiles" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.507+0000 "rest" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.507+0000 "replSet" : "test-configRS", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.507+0000 "dbpath" : "$set-$node", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.507+0000 "pathOpts" : { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.507+0000 "testName" : "test", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.507+0000 "node" : 0, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.507+0000 "set" : "test-configRS" [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.507+0000 }, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.508+0000 "configsvr" : "", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.508+0000 "noJournalPrealloc" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.508+0000 "restart" : undefined [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.508+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.508+0000 ReplSetTest Starting.... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.508+0000 Resetting db path '/data/db/job0/mongorunner/test-configRS-0' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.508+0000 2015-07-19T23:38:47.508+0000 I - [main] shell: started program (sh3662): /data/mci/src/mongod --oplogSize 40 --port 29000 --noprealloc --smallfiles --rest --replSet test-configRS --dbpath /data/db/job0/mongorunner/test-configRS-0 --configsvr --setParameter enableTestCommands=1 --storageEngine wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.509+0000 2015-07-19T23:38:47.508+0000 W NETWORK [main] Failed to connect to 127.0.0.1:29000, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.519+0000 m29000| 2015-07-19T23:38:47.519+0000 I CONTROL [main] ** WARNING: --rest is specified without --httpinterface, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.519+0000 m29000| 2015-07-19T23:38:47.519+0000 I CONTROL [main] ** enabling http interface [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.520+0000 m29000| note: noprealloc may hurt performance in many applications [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.545+0000 m29000| 2015-07-19T23:38:47.545+0000 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=3G,session_max=20000,eviction=(threads_max=4),statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0), [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.583+0000 m29000| 2015-07-19T23:38:47.582+0000 W STORAGE [initandlisten] Detected configuration for non-active storage engine mmapv1 when current storage engine is wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.583+0000 m29000| 2015-07-19T23:38:47.582+0000 I CONTROL [initandlisten] MongoDB starting : pid=3662 port=29000 dbpath=/data/db/job0/mongorunner/test-configRS-0 master=1 64-bit host=ip-10-139-123-131 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.583+0000 m29000| 2015-07-19T23:38:47.582+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.583+0000 m29000| 2015-07-19T23:38:47.583+0000 I CONTROL [initandlisten] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.583+0000 m29000| 2015-07-19T23:38:47.583+0000 I CONTROL [initandlisten] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.584+0000 m29000| 2015-07-19T23:38:47.583+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.584+0000 m29000| 2015-07-19T23:38:47.583+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.584+0000 m29000| 2015-07-19T23:38:47.583+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.584+0000 m29000| 2015-07-19T23:38:47.583+0000 I CONTROL [initandlisten] ** We suggest setting it to 'never' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.584+0000 m29000| 2015-07-19T23:38:47.583+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.584+0000 m29000| 2015-07-19T23:38:47.583+0000 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.584+0000 m29000| 2015-07-19T23:38:47.584+0000 I CONTROL [initandlisten] ** We suggest setting it to 'never' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.585+0000 m29000| 2015-07-19T23:38:47.584+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.585+0000 m29000| 2015-07-19T23:38:47.584+0000 I CONTROL [initandlisten] ** WARNING: soft rlimits too low. rlimits set to 1024 processes, 64000 files. Number of processes should be at least 32000 : 0.5 times number of files. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.585+0000 m29000| 2015-07-19T23:38:47.584+0000 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.585+0000 m29000| 2015-07-19T23:38:47.584+0000 I CONTROL [initandlisten] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.585+0000 m29000| 2015-07-19T23:38:47.584+0000 I CONTROL [initandlisten] git version: 92c6ee5cedf09928720b79592981dc1c6aeb0482 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.585+0000 m29000| 2015-07-19T23:38:47.584+0000 I CONTROL [initandlisten] OpenSSL version: OpenSSL 1.0.1e-fips 11 Feb 2013 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.585+0000 m29000| 2015-07-19T23:38:47.584+0000 I CONTROL [initandlisten] allocator: tcmalloc [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.586+0000 m29000| 2015-07-19T23:38:47.584+0000 I CONTROL [initandlisten] options: { net: { http: { RESTInterfaceEnabled: true, enabled: true }, port: 29000 }, replication: { oplogSizeMB: 40, replSet: "test-configRS" }, setParameter: { enableTestCommands: "1" }, sharding: { clusterRole: "configsvr" }, storage: { dbPath: "/data/db/job0/mongorunner/test-configRS-0", engine: "wiredTiger", mmapv1: { preallocDataFiles: false, smallFiles: true } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.586+0000 m29000| 2015-07-19T23:38:47.584+0000 I NETWORK [websvr] admin web console waiting for connections on port 30000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.588+0000 m29000| 2015-07-19T23:38:47.588+0000 I REPL [initandlisten] Did not find local voted for document at startup; NoMatchingDocument Did not find replica set lastVote document in local.replset.election [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.588+0000 m29000| 2015-07-19T23:38:47.588+0000 I REPL [initandlisten] Did not find local replica set configuration document at startup; NoMatchingDocument Did not find replica set configuration document in local.system.replset [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.591+0000 m29000| 2015-07-19T23:38:47.591+0000 I NETWORK [initandlisten] waiting for connections on port 29000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.709+0000 m29000| 2015-07-19T23:38:47.709+0000 I NETWORK [initandlisten] connection accepted from 127.0.0.1:54348 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.710+0000 [ connection to ip-10-139-123-131:29000 ] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.710+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.710+0000 "replSetInitiate" : { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.711+0000 "_id" : "test-configRS", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.711+0000 "members" : [ [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.711+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.711+0000 "_id" : 0, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.711+0000 "host" : "ip-10-139-123-131:29000" [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.711+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.711+0000 ] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.711+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.711+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.712+0000 m29000| 2015-07-19T23:38:47.711+0000 I REPL [conn1] replSetInitiate admin command received from client [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.712+0000 m29000| 2015-07-19T23:38:47.712+0000 I REPL [conn1] replSetInitiate config object with 1 members parses ok [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.716+0000 m29000| 2015-07-19T23:38:47.716+0000 I REPL [ReplicationExecutor] New replica set config in use: { _id: "test-configRS", version: 1, members: [ { _id: 0, host: "ip-10-139-123-131:29000", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 }, protocolVersion: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.716+0000 m29000| 2015-07-19T23:38:47.716+0000 I REPL [ReplicationExecutor] This node is ip-10-139-123-131:29000 in the config [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.716+0000 m29000| 2015-07-19T23:38:47.716+0000 I REPL [ReplicationExecutor] transition to STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.716+0000 m29000| 2015-07-19T23:38:47.716+0000 I REPL [conn1] ****** [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.717+0000 m29000| 2015-07-19T23:38:47.716+0000 I REPL [conn1] creating replication oplog of size: 40MB... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.718+0000 m29000| 2015-07-19T23:38:47.718+0000 I STORAGE [conn1] Starting WiredTigerRecordStoreThread local.oplog.rs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.733+0000 m29000| 2015-07-19T23:38:47.733+0000 I REPL [conn1] ****** [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.734+0000 m29000| 2015-07-19T23:38:47.733+0000 I REPL [conn1] Starting replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.734+0000 m29000| 2015-07-19T23:38:47.734+0000 I REPL [ReplicationExecutor] transition to RECOVERING [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.736+0000 m29000| 2015-07-19T23:38:47.736+0000 I REPL [ReplicationExecutor] transition to SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:47.736+0000 m29000| 2015-07-19T23:38:47.736+0000 I REPL [ReplicationExecutor] transition to PRIMARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.209+0000 m31200| 2015-07-19T23:38:48.208+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31201 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.209+0000 m31200| 2015-07-19T23:38:48.209+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31202 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.212+0000 m31202| 2015-07-19T23:38:48.211+0000 I REPL [ReplicationExecutor] could not find member to sync from [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.212+0000 m31201| 2015-07-19T23:38:48.212+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31202 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.212+0000 m31202| 2015-07-19T23:38:48.212+0000 I REPL [ReplicationExecutor] Member ip-10-139-123-131:31201 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.213+0000 m31201| 2015-07-19T23:38:48.212+0000 I REPL [ReplicationExecutor] syncing from: ip-10-139-123-131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.213+0000 m31200| 2015-07-19T23:38:48.213+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39334 #9 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.278+0000 m31201| 2015-07-19T23:38:48.278+0000 I REPL [SyncSourceFeedback] setting syncSourceFeedback to ip-10-139-123-131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.279+0000 m31200| 2015-07-19T23:38:48.278+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39335 #10 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.734+0000 m29000| 2015-07-19T23:38:48.734+0000 I REPL [rsSync] transition to primary complete; database writes are now permitted [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.738+0000 "config servers: test-configRS/ip-10-139-123-131:29000" [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.739+0000 2015-07-19T23:38:48.738+0000 I NETWORK [main] starting new replica set monitor for replica set test-configRS with seeds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.739+0000 2015-07-19T23:38:48.738+0000 I NETWORK [main] ip-10-139-123-131:29000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.739+0000 m29000| 2015-07-19T23:38:48.739+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55234 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.740+0000 ShardingTest test : [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.740+0000 { [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.740+0000 "config" : "test-configRS/ip-10-139-123-131:29000", [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.740+0000 "shards" : [ [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.740+0000 connection to test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102, [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.741+0000 connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.741+0000 ] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.741+0000 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.742+0000 2015-07-19T23:38:48.742+0000 I - [main] shell: started program (sh3729): /data/mci/src/mongos --port 30999 --configdb test-configRS/ip-10-139-123-131:29000 --chunkSize 50 --setParameter enableTestCommands=1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.742+0000 2015-07-19T23:38:48.742+0000 W NETWORK [main] Failed to connect to 127.0.0.1:30999, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.750+0000 m30999| 2015-07-19T23:38:48.750+0000 W SHARDING [main] running with less than 3 config servers should be done only for testing purposes and is not recommended for production [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.754+0000 m30999| 2015-07-19T23:38:48.754+0000 I CONTROL [main] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.755+0000 m30999| 2015-07-19T23:38:48.754+0000 I CONTROL [main] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.755+0000 m30999| 2015-07-19T23:38:48.754+0000 I CONTROL [main] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.755+0000 m30999| 2015-07-19T23:38:48.754+0000 I CONTROL [main] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.755+0000 m30999| 2015-07-19T23:38:48.755+0000 I SHARDING [mongosMain] MongoS version 3.1.6-pre- starting: pid=3729 port=30999 64-bit host=ip-10-139-123-131 (--help for usage) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.755+0000 m30999| 2015-07-19T23:38:48.755+0000 I CONTROL [mongosMain] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.756+0000 m30999| 2015-07-19T23:38:48.755+0000 I CONTROL [mongosMain] git version: 92c6ee5cedf09928720b79592981dc1c6aeb0482 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.756+0000 m30999| 2015-07-19T23:38:48.755+0000 I CONTROL [mongosMain] OpenSSL version: OpenSSL 1.0.1e-fips 11 Feb 2013 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.756+0000 m30999| 2015-07-19T23:38:48.755+0000 I CONTROL [mongosMain] allocator: tcmalloc [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.756+0000 m30999| 2015-07-19T23:38:48.755+0000 I CONTROL [mongosMain] options: { net: { port: 30999 }, setParameter: { enableTestCommands: "1" }, sharding: { chunkSize: 50, configDB: "test-configRS/ip-10-139-123-131:29000" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.756+0000 m30999| 2015-07-19T23:38:48.756+0000 I NETWORK [mongosMain] starting new replica set monitor for replica set test-configRS with seeds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.756+0000 m30999| 2015-07-19T23:38:48.756+0000 I NETWORK [mongosMain] ip-10-139-123-131:29000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.756+0000 m30999| 2015-07-19T23:38:48.756+0000 I NETWORK [ReplicaSetMonitorWatcher] starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.757+0000 m29000| 2015-07-19T23:38:48.757+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55236 #3 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.758+0000 m29000| 2015-07-19T23:38:48.758+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55237 #4 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.758+0000 m29000| 2015-07-19T23:38:48.758+0000 I COMMAND [conn4] CMD fsync: sync:1 lock:0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.771+0000 m29000| 2015-07-19T23:38:48.771+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55238 #5 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.773+0000 m30999| 2015-07-19T23:38:48.773+0000 I SHARDING [LockPinger] creating distributed lock ping thread for test-configRS/ip-10-139-123-131:29000 and process ip-10-139-123-131:30999:1437349128:1804289383 (sleeping for 30000ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.773+0000 m29000| 2015-07-19T23:38:48.773+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55239 #6 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.779+0000 m30999| 2015-07-19T23:38:48.779+0000 I SHARDING [mongosMain] distributed lock 'configUpgrade/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3508d2c1f750d1548347 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.779+0000 m30999| 2015-07-19T23:38:48.779+0000 I SHARDING [LockPinger] cluster test-configRS/ip-10-139-123-131:29000 pinged successfully at 2015-07-19T23:38:48.773+0000 by distributed lock pinger 'test-configRS/ip-10-139-123-131:29000/ip-10-139-123-131:30999:1437349128:1804289383', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.779+0000 m30999| 2015-07-19T23:38:48.779+0000 I SHARDING [mongosMain] starting upgrade of config server from v0 to v7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.780+0000 m30999| 2015-07-19T23:38:48.779+0000 I SHARDING [mongosMain] starting next upgrade step from v0 to v7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.782+0000 m30999| 2015-07-19T23:38:48.782+0000 I SHARDING [mongosMain] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:48.782+0000-55ac3508d2c1f750d1548348", server: "ip-10-139-123-131", clientAddr: "", time: new Date(1437349128782), what: "starting upgrade of config database", ns: "config.version", details: { from: 0, to: 7 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.782+0000 m29000| 2015-07-19T23:38:48.782+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55240 #7 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.833+0000 m29000| 2015-07-19T23:38:48.833+0000 I SHARDING [conn7] first cluster operation detected, adding sharding hook to enable versioning and authentication to remote servers [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.834+0000 m29000| 2015-07-19T23:38:48.834+0000 I NETWORK [conn7] starting new replica set monitor for replica set test-configRS with seeds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.834+0000 m29000| 2015-07-19T23:38:48.834+0000 I NETWORK [conn7] ip-10-139-123-131:29000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.834+0000 m29000| 2015-07-19T23:38:48.834+0000 I NETWORK [ReplicaSetMonitorWatcher] starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.835+0000 m30999| 2015-07-19T23:38:48.834+0000 I SHARDING [mongosMain] writing initial config version at v7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.889+0000 m30999| 2015-07-19T23:38:48.889+0000 I SHARDING [mongosMain] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:48.889+0000-55ac3508d2c1f750d154834a", server: "ip-10-139-123-131", clientAddr: "", time: new Date(1437349128889), what: "finished upgrade of config database", ns: "config.version", details: { from: 0, to: 7 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.940+0000 m30999| 2015-07-19T23:38:48.940+0000 I SHARDING [mongosMain] upgrade of config server to v7 successful [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.940+0000 m29000| 2015-07-19T23:38:48.940+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55241 #8 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.941+0000 m30999| 2015-07-19T23:38:48.941+0000 I SHARDING [mongosMain] distributed lock 'configUpgrade/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.942+0000 m29000| 2015-07-19T23:38:48.941+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55242 #9 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:48.943+0000 2015-07-19T23:38:48.943+0000 W NETWORK [main] Failed to connect to 127.0.0.1:30999, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.050+0000 m29000| 2015-07-19T23:38:49.050+0000 I INDEX [conn7] build index on: config.chunks properties: { v: 1, unique: true, key: { ns: 1, min: 1 }, name: "ns_1_min_1", ns: "config.chunks" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.050+0000 m29000| 2015-07-19T23:38:49.050+0000 I INDEX [conn7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.051+0000 m29000| 2015-07-19T23:38:49.051+0000 I INDEX [conn7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.104+0000 m29000| 2015-07-19T23:38:49.103+0000 I INDEX [conn7] build index on: config.chunks properties: { v: 1, unique: true, key: { ns: 1, shard: 1, min: 1 }, name: "ns_1_shard_1_min_1", ns: "config.chunks" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.104+0000 m29000| 2015-07-19T23:38:49.103+0000 I INDEX [conn7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.105+0000 m29000| 2015-07-19T23:38:49.104+0000 I INDEX [conn7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.143+0000 2015-07-19T23:38:49.143+0000 W NETWORK [main] Failed to connect to 127.0.0.1:30999, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.157+0000 m29000| 2015-07-19T23:38:49.157+0000 I INDEX [conn7] build index on: config.chunks properties: { v: 1, unique: true, key: { ns: 1, lastmod: 1 }, name: "ns_1_lastmod_1", ns: "config.chunks" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.157+0000 m29000| 2015-07-19T23:38:49.157+0000 I INDEX [conn7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.158+0000 m29000| 2015-07-19T23:38:49.158+0000 I INDEX [conn7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.212+0000 m31202| 2015-07-19T23:38:49.212+0000 I REPL [ReplicationExecutor] syncing from: ip-10-139-123-131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.213+0000 m31200| 2015-07-19T23:38:49.212+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39347 #11 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.213+0000 m29000| 2015-07-19T23:38:49.213+0000 I INDEX [conn7] build index on: config.shards properties: { v: 1, unique: true, key: { host: 1 }, name: "host_1", ns: "config.shards" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.213+0000 m29000| 2015-07-19T23:38:49.213+0000 I INDEX [conn7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.214+0000 m29000| 2015-07-19T23:38:49.214+0000 I INDEX [conn7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.216+0000 m31200| 2015-07-19T23:38:49.216+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.266+0000 m29000| 2015-07-19T23:38:49.266+0000 I INDEX [conn7] build index on: config.locks properties: { v: 1, key: { ts: 1 }, name: "ts_1", ns: "config.locks" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.267+0000 m29000| 2015-07-19T23:38:49.266+0000 I INDEX [conn7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.268+0000 m29000| 2015-07-19T23:38:49.268+0000 I INDEX [conn7] build index done. scanned 1 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.273+0000 m31202| 2015-07-19T23:38:49.273+0000 I REPL [SyncSourceFeedback] setting syncSourceFeedback to ip-10-139-123-131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.274+0000 m31200| 2015-07-19T23:38:49.273+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39348 #12 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.320+0000 m29000| 2015-07-19T23:38:49.320+0000 I INDEX [conn7] build index on: config.locks properties: { v: 1, key: { state: 1, process: 1 }, name: "state_1_process_1", ns: "config.locks" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.320+0000 m29000| 2015-07-19T23:38:49.320+0000 I INDEX [conn7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.321+0000 m29000| 2015-07-19T23:38:49.321+0000 I INDEX [conn7] build index done. scanned 1 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.344+0000 2015-07-19T23:38:49.344+0000 W NETWORK [main] Failed to connect to 127.0.0.1:30999, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.374+0000 m29000| 2015-07-19T23:38:49.373+0000 I INDEX [conn7] build index on: config.lockpings properties: { v: 1, key: { ping: 1 }, name: "ping_1", ns: "config.lockpings" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.374+0000 m29000| 2015-07-19T23:38:49.373+0000 I INDEX [conn7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.375+0000 m29000| 2015-07-19T23:38:49.375+0000 I INDEX [conn7] build index done. scanned 1 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.430+0000 m29000| 2015-07-19T23:38:49.429+0000 I INDEX [conn7] build index on: config.tags properties: { v: 1, unique: true, key: { ns: 1, min: 1 }, name: "ns_1_min_1", ns: "config.tags" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.430+0000 m29000| 2015-07-19T23:38:49.430+0000 I INDEX [conn7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.431+0000 m29000| 2015-07-19T23:38:49.431+0000 I INDEX [conn7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.432+0000 m30999| 2015-07-19T23:38:49.432+0000 I SHARDING [Balancer] about to contact config servers and shards [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.432+0000 m30999| 2015-07-19T23:38:49.432+0000 I SHARDING [Balancer] config servers and shards contacted successfully [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.432+0000 m30999| 2015-07-19T23:38:49.432+0000 I SHARDING [Balancer] balancer id: ip-10-139-123-131:30999 started [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.451+0000 m30999| 2015-07-19T23:38:49.451+0000 I NETWORK [mongosMain] waiting for connections on port 30999 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.488+0000 m30999| 2015-07-19T23:38:49.488+0000 I SHARDING [Balancer] distributed lock 'balancer/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3509d2c1f750d154834c [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.542+0000 m30999| 2015-07-19T23:38:49.542+0000 I SHARDING [Balancer] distributed lock 'balancer/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.545+0000 m30999| 2015-07-19T23:38:49.544+0000 I NETWORK [mongosMain] connection accepted from 127.0.0.1:47275 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.547+0000 2015-07-19T23:38:49.546+0000 I - [main] shell: started program (sh3776): /data/mci/src/mongos --port 30998 --configdb test-configRS/ip-10-139-123-131:29000 --chunkSize 50 --setParameter enableTestCommands=1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.547+0000 2015-07-19T23:38:49.547+0000 W NETWORK [main] Failed to connect to 127.0.0.1:30998, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.554+0000 m30998| 2015-07-19T23:38:49.554+0000 W SHARDING [main] running with less than 3 config servers should be done only for testing purposes and is not recommended for production [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.559+0000 m30998| 2015-07-19T23:38:49.558+0000 I CONTROL [main] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.559+0000 m30998| 2015-07-19T23:38:49.559+0000 I CONTROL [main] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.559+0000 m30998| 2015-07-19T23:38:49.559+0000 I CONTROL [main] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.559+0000 m30998| 2015-07-19T23:38:49.559+0000 I CONTROL [main] [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.559+0000 m30998| 2015-07-19T23:38:49.559+0000 I SHARDING [mongosMain] MongoS version 3.1.6-pre- starting: pid=3776 port=30998 64-bit host=ip-10-139-123-131 (--help for usage) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.560+0000 m30998| 2015-07-19T23:38:49.559+0000 I CONTROL [mongosMain] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.560+0000 m30998| 2015-07-19T23:38:49.559+0000 I CONTROL [mongosMain] git version: 92c6ee5cedf09928720b79592981dc1c6aeb0482 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.560+0000 m30998| 2015-07-19T23:38:49.559+0000 I CONTROL [mongosMain] OpenSSL version: OpenSSL 1.0.1e-fips 11 Feb 2013 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.560+0000 m30998| 2015-07-19T23:38:49.559+0000 I CONTROL [mongosMain] allocator: tcmalloc [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.560+0000 m30998| 2015-07-19T23:38:49.559+0000 I CONTROL [mongosMain] options: { net: { port: 30998 }, setParameter: { enableTestCommands: "1" }, sharding: { chunkSize: 50, configDB: "test-configRS/ip-10-139-123-131:29000" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.560+0000 m30998| 2015-07-19T23:38:49.560+0000 I NETWORK [mongosMain] starting new replica set monitor for replica set test-configRS with seeds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.561+0000 m30998| 2015-07-19T23:38:49.560+0000 I NETWORK [mongosMain] ip-10-139-123-131:29000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.561+0000 m30998| 2015-07-19T23:38:49.560+0000 I NETWORK [ReplicaSetMonitorWatcher] starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.561+0000 m29000| 2015-07-19T23:38:49.560+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55250 #10 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.561+0000 m29000| 2015-07-19T23:38:49.561+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55251 #11 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.563+0000 m29000| 2015-07-19T23:38:49.562+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55252 #12 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.564+0000 m29000| 2015-07-19T23:38:49.563+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55253 #13 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.748+0000 2015-07-19T23:38:49.747+0000 W NETWORK [main] Failed to connect to 127.0.0.1:30998, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.894+0000 m31102| 2015-07-19T23:38:49.894+0000 I REPL [ReplicationExecutor] syncing from: ip-10-139-123-131:31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.896+0000 m31101| 2015-07-19T23:38:49.894+0000 I REPL [ReplicationExecutor] syncing from: ip-10-139-123-131:31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.896+0000 m31100| 2015-07-19T23:38:49.894+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47521 #9 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.896+0000 m31100| 2015-07-19T23:38:49.896+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47522 #10 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.948+0000 2015-07-19T23:38:49.948+0000 W NETWORK [main] Failed to connect to 127.0.0.1:30998, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.949+0000 m30998| 2015-07-19T23:38:49.948+0000 I SHARDING [Balancer] about to contact config servers and shards [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.950+0000 m29000| 2015-07-19T23:38:49.949+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55258 #14 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.950+0000 m30998| 2015-07-19T23:38:49.950+0000 I SHARDING [Balancer] config servers and shards contacted successfully [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.950+0000 m30998| 2015-07-19T23:38:49.950+0000 I SHARDING [Balancer] balancer id: ip-10-139-123-131:30998 started [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:49.969+0000 m30998| 2015-07-19T23:38:49.968+0000 I NETWORK [mongosMain] waiting for connections on port 30998 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.002+0000 m29000| 2015-07-19T23:38:50.002+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55259 #15 (15 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.004+0000 m30998| 2015-07-19T23:38:50.004+0000 I SHARDING [LockPinger] creating distributed lock ping thread for test-configRS/ip-10-139-123-131:29000 and process ip-10-139-123-131:30998:1437349129:1804289383 (sleeping for 30000ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.005+0000 m30998| 2015-07-19T23:38:50.005+0000 I SHARDING [Balancer] distributed lock 'balancer/ip-10-139-123-131:30998:1437349129:1804289383' acquired, ts : 55ac350a230355f00547ef1f [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.006+0000 m30998| 2015-07-19T23:38:50.005+0000 I SHARDING [LockPinger] cluster test-configRS/ip-10-139-123-131:29000 pinged successfully at 2015-07-19T23:38:50.004+0000 by distributed lock pinger 'test-configRS/ip-10-139-123-131:29000/ip-10-139-123-131:30998:1437349129:1804289383', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.057+0000 m29000| 2015-07-19T23:38:50.057+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55260 #16 (16 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.058+0000 m30998| 2015-07-19T23:38:50.057+0000 I SHARDING [Balancer] distributed lock 'balancer/ip-10-139-123-131:30998:1437349129:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.149+0000 m30998| 2015-07-19T23:38:50.149+0000 I NETWORK [mongosMain] connection accepted from 127.0.0.1:52198 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.204+0000 m29000| 2015-07-19T23:38:50.204+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55262 #17 (17 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.204+0000 Waiting for active hosts... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.204+0000 Waiting for the balancer lock... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.205+0000 Waiting again for active hosts after balancer is off... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.205+0000 ShardingTest undefined going to add shard : test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.206+0000 m30999| 2015-07-19T23:38:50.206+0000 I NETWORK [conn1] starting new replica set monitor for replica set test-rs0 with seeds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.206+0000 m30999| 2015-07-19T23:38:50.206+0000 I NETWORK [conn1] ip-10-139-123-131:31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.206+0000 m30999| 2015-07-19T23:38:50.206+0000 I NETWORK [conn1] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.206+0000 m30999| 2015-07-19T23:38:50.206+0000 I NETWORK [conn1] ip-10-139-123-131:31101 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.206+0000 m30999| 2015-07-19T23:38:50.206+0000 I NETWORK [conn1] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.206+0000 m30999| 2015-07-19T23:38:50.206+0000 I NETWORK [conn1] ip-10-139-123-131:31102 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.206+0000 m31101| 2015-07-19T23:38:50.206+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46461 #6 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.207+0000 m31100| 2015-07-19T23:38:50.207+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47530 #11 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.208+0000 m31100| 2015-07-19T23:38:50.208+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47531 #12 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.209+0000 m30999| 2015-07-19T23:38:50.209+0000 I SHARDING [conn1] going to add shard: { _id: "test-rs0", host: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.216+0000 m31200| 2015-07-19T23:38:50.215+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.219+0000 m31200| 2015-07-19T23:38:50.218+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.261+0000 m30999| 2015-07-19T23:38:50.261+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:50.260+0000-55ac350ad2c1f750d154834e", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349130261), what: "addShard", ns: "", details: { name: "test-rs0", host: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.312+0000 { "shardAdded" : "test-rs0", "ok" : 1 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.312+0000 ShardingTest undefined going to add shard : test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.312+0000 m30999| 2015-07-19T23:38:50.312+0000 I NETWORK [conn1] starting new replica set monitor for replica set test-rs1 with seeds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.313+0000 m30999| 2015-07-19T23:38:50.312+0000 I NETWORK [conn1] ip-10-139-123-131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.313+0000 m30999| 2015-07-19T23:38:50.312+0000 I NETWORK [conn1] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.313+0000 m30999| 2015-07-19T23:38:50.312+0000 I NETWORK [conn1] ip-10-139-123-131:31201 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.313+0000 m30999| 2015-07-19T23:38:50.312+0000 I NETWORK [conn1] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.313+0000 m30999| 2015-07-19T23:38:50.312+0000 I NETWORK [conn1] ip-10-139-123-131:31202 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.313+0000 m31201| 2015-07-19T23:38:50.313+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:35733 #6 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.314+0000 m31200| 2015-07-19T23:38:50.314+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39369 #13 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.315+0000 m31200| 2015-07-19T23:38:50.315+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39370 #14 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.316+0000 m30999| 2015-07-19T23:38:50.316+0000 I SHARDING [conn1] going to add shard: { _id: "test-rs1", host: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.344+0000 m31102| 2015-07-19T23:38:50.343+0000 I REPL [SyncSourceFeedback] setting syncSourceFeedback to ip-10-139-123-131:31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.344+0000 m31100| 2015-07-19T23:38:50.344+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47535 #13 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.367+0000 m31101| 2015-07-19T23:38:50.366+0000 I REPL [SyncSourceFeedback] setting syncSourceFeedback to ip-10-139-123-131:31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.367+0000 m30999| 2015-07-19T23:38:50.367+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:50.367+0000-55ac350ad2c1f750d154834f", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349130367), what: "addShard", ns: "", details: { name: "test-rs1", host: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.367+0000 m31100| 2015-07-19T23:38:50.367+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47536 #14 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.418+0000 { "shardAdded" : "test-rs1", "ok" : 1 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.470+0000 Waiting for active hosts... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.471+0000 Waiting for the balancer lock... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.471+0000 Waiting again for active hosts after balancer is off... [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.477+0000 setting random seed: 1437349130477 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.478+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.478+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.478+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.478+0000 jstests/concurrency/fsm_workloads/yield_id_hack.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.478+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.479+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.479+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.481+0000 m30999| 2015-07-19T23:38:50.481+0000 I SHARDING [conn1] distributed lock 'db0/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac350ad2c1f750d1548350 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.482+0000 m30999| 2015-07-19T23:38:50.482+0000 I SHARDING [conn1] Placing [db0] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.483+0000 m30999| 2015-07-19T23:38:50.482+0000 I SHARDING [conn1] Enabling sharding for database [db0] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.537+0000 m30999| 2015-07-19T23:38:50.536+0000 I SHARDING [conn1] distributed lock 'db0/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.538+0000 m31100| 2015-07-19T23:38:50.538+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47537 #15 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.539+0000 m31100| 2015-07-19T23:38:50.539+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47538 #16 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.571+0000 m31100| 2015-07-19T23:38:50.571+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 673ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.572+0000 m31100| 2015-07-19T23:38:50.572+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 672ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.573+0000 m31100| 2015-07-19T23:38:50.573+0000 I INDEX [conn16] build index on: db0.coll0 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.573+0000 m31100| 2015-07-19T23:38:50.573+0000 I INDEX [conn16] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.576+0000 m31100| 2015-07-19T23:38:50.576+0000 I INDEX [conn16] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.577+0000 m30999| 2015-07-19T23:38:50.577+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db0.coll0", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.578+0000 m30999| 2015-07-19T23:38:50.578+0000 I SHARDING [conn1] distributed lock 'db0.coll0/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac350ad2c1f750d1548351 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.578+0000 m31102| 2015-07-19T23:38:50.578+0000 I INDEX [repl writer worker 2] build index on: db0.coll0 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.578+0000 m31102| 2015-07-19T23:38:50.578+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.579+0000 m30999| 2015-07-19T23:38:50.579+0000 I SHARDING [conn1] enable sharding on: db0.coll0 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.579+0000 m30999| 2015-07-19T23:38:50.579+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:50.579+0000-55ac350ad2c1f750d1548352", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349130579), what: "shardCollection.start", ns: "db0.coll0", details: { shardKey: { _id: "hashed" }, collection: "db0.coll0", primary: "test-rs0:test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.579+0000 m31101| 2015-07-19T23:38:50.579+0000 I INDEX [repl writer worker 2] build index on: db0.coll0 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.579+0000 m31101| 2015-07-19T23:38:50.579+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.580+0000 m31102| 2015-07-19T23:38:50.580+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.584+0000 m31101| 2015-07-19T23:38:50.584+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.630+0000 m30999| 2015-07-19T23:38:50.630+0000 I SHARDING [conn1] going to create 2 chunk(s) for: db0.coll0 using new epoch 55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.733+0000 m30999| 2015-07-19T23:38:50.732+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db0.coll0: 0ms sequenceNumber: 2 version: 1|1||55ac350ad2c1f750d1548353 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.787+0000 m30999| 2015-07-19T23:38:50.787+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db0.coll0: 0ms sequenceNumber: 3 version: 1|1||55ac350ad2c1f750d1548353 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.787+0000 m31100| 2015-07-19T23:38:50.787+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47539 #17 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.788+0000 m31100| 2015-07-19T23:38:50.788+0000 I SHARDING [conn17] first cluster operation detected, adding sharding hook to enable versioning and authentication to remote servers [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.789+0000 m31100| 2015-07-19T23:38:50.788+0000 I NETWORK [conn17] starting new replica set monitor for replica set test-configRS with seeds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.789+0000 m31100| 2015-07-19T23:38:50.788+0000 I NETWORK [conn17] ip-10-139-123-131:29000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.789+0000 m31100| 2015-07-19T23:38:50.789+0000 I NETWORK [ReplicaSetMonitorWatcher] starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.789+0000 m31100| 2015-07-19T23:38:50.789+0000 I SHARDING [conn17] remote client 10.139.123.131:47539 initialized this host (test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102) as shard test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.789+0000 m31100| 2015-07-19T23:38:50.789+0000 I SHARDING [conn17] remotely refreshing metadata for db0.coll0 with requested shard version 1|1||55ac350ad2c1f750d1548353, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.790+0000 m29000| 2015-07-19T23:38:50.789+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55274 #18 (18 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.790+0000 m29000| 2015-07-19T23:38:50.790+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55275 #19 (19 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.791+0000 m31100| 2015-07-19T23:38:50.791+0000 I SHARDING [conn17] collection db0.coll0 was previously unsharded, new metadata loaded with shard version 1|1||55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.791+0000 m31100| 2015-07-19T23:38:50.791+0000 I SHARDING [conn17] collection version was loaded at version 1|1||55ac350ad2c1f750d1548353, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.791+0000 m30999| 2015-07-19T23:38:50.791+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:50.791+0000-55ac350ad2c1f750d1548354", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349130791), what: "shardCollection", ns: "db0.coll0", details: { version: "1|1||55ac350ad2c1f750d1548353" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.843+0000 m30999| 2015-07-19T23:38:50.842+0000 I SHARDING [conn1] distributed lock 'db0.coll0/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.843+0000 m30999| 2015-07-19T23:38:50.843+0000 I SHARDING [conn1] moving chunk ns: db0.coll0 moving ( ns: db0.coll0, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.843+0000 m31100| 2015-07-19T23:38:50.843+0000 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.844+0000 m31100| 2015-07-19T23:38:50.843+0000 I NETWORK [conn15] starting new replica set monitor for replica set test-rs0 with seeds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.844+0000 m31100| 2015-07-19T23:38:50.843+0000 I NETWORK [conn15] ip-10-139-123-131:31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.844+0000 m31100| 2015-07-19T23:38:50.843+0000 I NETWORK [conn15] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.844+0000 m31100| 2015-07-19T23:38:50.843+0000 I NETWORK [conn15] ip-10-139-123-131:31101 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.844+0000 m31100| 2015-07-19T23:38:50.843+0000 I NETWORK [conn15] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.844+0000 m31100| 2015-07-19T23:38:50.843+0000 I NETWORK [conn15] ip-10-139-123-131:31102 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.845+0000 m31100| 2015-07-19T23:38:50.843+0000 I NETWORK [conn15] starting new replica set monitor for replica set test-rs1 with seeds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.845+0000 m31100| 2015-07-19T23:38:50.843+0000 I NETWORK [conn15] ip-10-139-123-131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.845+0000 m31100| 2015-07-19T23:38:50.843+0000 I NETWORK [conn15] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.845+0000 m31100| 2015-07-19T23:38:50.843+0000 I NETWORK [conn15] ip-10-139-123-131:31201 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.845+0000 m31100| 2015-07-19T23:38:50.843+0000 I NETWORK [conn15] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.845+0000 m31100| 2015-07-19T23:38:50.843+0000 I NETWORK [conn15] ip-10-139-123-131:31202 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.846+0000 m31100| 2015-07-19T23:38:50.843+0000 I SHARDING [conn15] received moveChunk request: { moveChunk: "db0.coll0", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", to: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac350ad2c1f750d1548353') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.846+0000 m29000| 2015-07-19T23:38:50.844+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55276 #20 (20 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.847+0000 m31100| 2015-07-19T23:38:50.846+0000 I SHARDING [LockPinger] creating distributed lock ping thread for test-configRS/ip-10-139-123-131:29000 and process ip-10-139-123-131:31100:1437349130:1993228155 (sleeping for 30000ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.847+0000 m29000| 2015-07-19T23:38:50.847+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55277 #21 (21 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.848+0000 m31100| 2015-07-19T23:38:50.847+0000 I SHARDING [conn15] distributed lock 'db0.coll0/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac350a68c42881b59cba10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.848+0000 m31100| 2015-07-19T23:38:50.848+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:50.848+0000-55ac350a68c42881b59cba11", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349130848), what: "moveChunk.start", ns: "db0.coll0", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.848+0000 m29000| 2015-07-19T23:38:50.848+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55278 #22 (22 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.848+0000 m31100| 2015-07-19T23:38:50.848+0000 I SHARDING [LockPinger] cluster test-configRS/ip-10-139-123-131:29000 pinged successfully at 2015-07-19T23:38:50.846+0000 by distributed lock pinger 'test-configRS/ip-10-139-123-131:29000/ip-10-139-123-131:31100:1437349130:1993228155', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.899+0000 m31100| 2015-07-19T23:38:50.899+0000 I SHARDING [conn15] remotely refreshing metadata for db0.coll0 based on current shard version 1|1||55ac350ad2c1f750d1548353, current metadata version is 1|1||55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.900+0000 m31100| 2015-07-19T23:38:50.900+0000 I SHARDING [conn15] metadata of collection db0.coll0 already up to date (shard version : 1|1||55ac350ad2c1f750d1548353, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.900+0000 m31100| 2015-07-19T23:38:50.900+0000 I SHARDING [conn15] moveChunk request accepted at version 1|1||55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.900+0000 m31100| 2015-07-19T23:38:50.900+0000 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.901+0000 m31201| 2015-07-19T23:38:50.901+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:35746 #7 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.902+0000 m31200| 2015-07-19T23:38:50.902+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39382 #15 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.903+0000 m31200| 2015-07-19T23:38:50.902+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39383 #16 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.903+0000 m31200| 2015-07-19T23:38:50.903+0000 I SHARDING [conn16] first cluster operation detected, adding sharding hook to enable versioning and authentication to remote servers [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.903+0000 m31200| 2015-07-19T23:38:50.903+0000 I NETWORK [conn16] starting new replica set monitor for replica set test-configRS with seeds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.904+0000 m31200| 2015-07-19T23:38:50.903+0000 I NETWORK [conn16] ip-10-139-123-131:29000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.904+0000 m31200| 2015-07-19T23:38:50.903+0000 I NETWORK [ReplicaSetMonitorWatcher] starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.904+0000 m31200| 2015-07-19T23:38:50.903+0000 I SHARDING [conn16] remote client 10.139.123.131:39383 initialized this host as shard test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.904+0000 m31200| 2015-07-19T23:38:50.904+0000 I SHARDING [conn16] remotely refreshing metadata for db0.coll0, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.904+0000 m29000| 2015-07-19T23:38:50.904+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55282 #23 (23 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.905+0000 m29000| 2015-07-19T23:38:50.905+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55283 #24 (24 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.906+0000 m31200| 2015-07-19T23:38:50.906+0000 I SHARDING [conn16] collection db0.coll0 was previously unsharded, new metadata loaded with shard version 0|0||55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.906+0000 m31200| 2015-07-19T23:38:50.906+0000 I SHARDING [conn16] collection version was loaded at version 1|1||55ac350ad2c1f750d1548353, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.906+0000 m31200| 2015-07-19T23:38:50.906+0000 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db0.coll0 from test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102 at epoch 55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.906+0000 m31200| 2015-07-19T23:38:50.906+0000 I NETWORK [migrateThread] starting new replica set monitor for replica set test-rs0 with seeds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.906+0000 m31200| 2015-07-19T23:38:50.906+0000 I NETWORK [migrateThread] ip-10-139-123-131:31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.907+0000 m31200| 2015-07-19T23:38:50.906+0000 I NETWORK [migrateThread] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.907+0000 m31200| 2015-07-19T23:38:50.906+0000 I NETWORK [migrateThread] ip-10-139-123-131:31101 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.907+0000 m31200| 2015-07-19T23:38:50.906+0000 I NETWORK [migrateThread] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.907+0000 m31200| 2015-07-19T23:38:50.906+0000 I NETWORK [migrateThread] ip-10-139-123-131:31102 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.907+0000 m31101| 2015-07-19T23:38:50.906+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46482 #7 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.907+0000 m31100| 2015-07-19T23:38:50.907+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db0.coll0", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.907+0000 m31100| 2015-07-19T23:38:50.907+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47551 #18 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.908+0000 m31100| 2015-07-19T23:38:50.908+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47552 #19 (15 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.910+0000 m31100| 2015-07-19T23:38:50.909+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db0.coll0", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.910+0000 m31200| 2015-07-19T23:38:50.910+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 689ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.911+0000 m31200| 2015-07-19T23:38:50.910+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 692ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.912+0000 m31200| 2015-07-19T23:38:50.912+0000 I INDEX [migrateThread] build index on: db0.coll0 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.912+0000 m31200| 2015-07-19T23:38:50.912+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.913+0000 m31200| 2015-07-19T23:38:50.913+0000 I INDEX [migrateThread] build index on: db0.coll0 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.914+0000 m31200| 2015-07-19T23:38:50.913+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.914+0000 m31100| 2015-07-19T23:38:50.914+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db0.coll0", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.916+0000 m31200| 2015-07-19T23:38:50.915+0000 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.916+0000 m31200| 2015-07-19T23:38:50.916+0000 I SHARDING [migrateThread] Deleter starting delete for: db0.coll0 from { _id: 0 } -> { _id: MaxKey }, with opId: 143 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.916+0000 m31200| 2015-07-19T23:38:50.916+0000 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db0.coll0 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.920+0000 m31202| 2015-07-19T23:38:50.920+0000 I INDEX [repl writer worker 7] build index on: db0.coll0 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.920+0000 m31202| 2015-07-19T23:38:50.920+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.921+0000 m31201| 2015-07-19T23:38:50.921+0000 I INDEX [repl writer worker 4] build index on: db0.coll0 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.921+0000 m31201| 2015-07-19T23:38:50.921+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.921+0000 m31202| 2015-07-19T23:38:50.921+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.922+0000 m31200| 2015-07-19T23:38:50.921+0000 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.922+0000 m31200| 2015-07-19T23:38:50.922+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db0.coll0' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.922+0000 m31201| 2015-07-19T23:38:50.922+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.922+0000 m31100| 2015-07-19T23:38:50.922+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db0.coll0", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.923+0000 m31100| 2015-07-19T23:38:50.922+0000 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.923+0000 m31100| 2015-07-19T23:38:50.922+0000 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.923+0000 m31100| 2015-07-19T23:38:50.922+0000 I SHARDING [conn15] moveChunk setting version to: 2|0||55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.923+0000 m31200| 2015-07-19T23:38:50.923+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39389 #17 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.932+0000 m31200| 2015-07-19T23:38:50.932+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db0.coll0' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.932+0000 m31200| 2015-07-19T23:38:50.932+0000 I SHARDING [migrateThread] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:50.932+0000-55ac350ad9a63f6196b1724d", server: "ip-10-139-123-131", clientAddr: "", time: new Date(1437349130932), what: "moveChunk.to", ns: "db0.coll0", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 9, step 2 of 5: 5, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 10, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.933+0000 m29000| 2015-07-19T23:38:50.933+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55288 #25 (25 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.984+0000 m31100| 2015-07-19T23:38:50.984+0000 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db0.coll0", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.985+0000 m31100| 2015-07-19T23:38:50.984+0000 I SHARDING [conn15] moveChunk updating self version to: 2|1||55ac350ad2c1f750d1548353 through { _id: MinKey } -> { _id: 0 } for collection 'db0.coll0' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:50.985+0000 m31100| 2015-07-19T23:38:50.985+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:50.985+0000-55ac350a68c42881b59cba12", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349130985), what: "moveChunk.commit", ns: "db0.coll0", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.036+0000 m31100| 2015-07-19T23:38:51.035+0000 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.036+0000 m31100| 2015-07-19T23:38:51.036+0000 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.036+0000 m31100| 2015-07-19T23:38:51.036+0000 I SHARDING [conn15] Deleter starting delete for: db0.coll0 from { _id: 0 } -> { _id: MaxKey }, with opId: 183 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.036+0000 m31100| 2015-07-19T23:38:51.036+0000 I SHARDING [conn15] rangeDeleter deleted 0 documents for db0.coll0 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.036+0000 m31100| 2015-07-19T23:38:51.036+0000 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.037+0000 m29000| 2015-07-19T23:38:51.036+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55289 #26 (26 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.037+0000 m31100| 2015-07-19T23:38:51.037+0000 I SHARDING [conn15] distributed lock 'db0.coll0/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.037+0000 m31100| 2015-07-19T23:38:51.037+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:51.037+0000-55ac350b68c42881b59cba13", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349131037), what: "moveChunk.from", ns: "db0.coll0", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 56, step 3 of 6: 6, step 4 of 6: 16, step 5 of 6: 113, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.088+0000 m31100| 2015-07-19T23:38:51.088+0000 I COMMAND [conn15] command db0.coll0 command: moveChunk { moveChunk: "db0.coll0", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", to: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac350ad2c1f750d1548353') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 245ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.089+0000 m30999| 2015-07-19T23:38:51.089+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db0.coll0: 0ms sequenceNumber: 4 version: 2|1||55ac350ad2c1f750d1548353 based on: 1|1||55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.089+0000 m31100| 2015-07-19T23:38:51.089+0000 I SHARDING [conn15] received splitChunk request: { splitChunk: "db0.coll0", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac350ad2c1f750d1548353') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.090+0000 m31100| 2015-07-19T23:38:51.090+0000 I SHARDING [conn15] distributed lock 'db0.coll0/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac350b68c42881b59cba14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.091+0000 m31100| 2015-07-19T23:38:51.090+0000 I SHARDING [conn15] remotely refreshing metadata for db0.coll0 based on current shard version 2|0||55ac350ad2c1f750d1548353, current metadata version is 2|0||55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.091+0000 m31100| 2015-07-19T23:38:51.091+0000 I SHARDING [conn15] updating metadata for db0.coll0 from shard version 2|0||55ac350ad2c1f750d1548353 to shard version 2|1||55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.091+0000 m31100| 2015-07-19T23:38:51.091+0000 I SHARDING [conn15] collection version was loaded at version 2|1||55ac350ad2c1f750d1548353, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.091+0000 m31100| 2015-07-19T23:38:51.091+0000 I SHARDING [conn15] splitChunk accepted at version 2|1||55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.092+0000 m31100| 2015-07-19T23:38:51.091+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:51.091+0000-55ac350b68c42881b59cba15", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349131091), what: "split", ns: "db0.coll0", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('55ac350ad2c1f750d1548353') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('55ac350ad2c1f750d1548353') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.143+0000 m31100| 2015-07-19T23:38:51.143+0000 I SHARDING [conn15] distributed lock 'db0.coll0/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.144+0000 m30999| 2015-07-19T23:38:51.143+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db0.coll0: 0ms sequenceNumber: 5 version: 2|3||55ac350ad2c1f750d1548353 based on: 2|1||55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.144+0000 m31200| 2015-07-19T23:38:51.144+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39392 #18 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.145+0000 m31200| 2015-07-19T23:38:51.145+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db0.coll0", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac350ad2c1f750d1548353') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.145+0000 m29000| 2015-07-19T23:38:51.145+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55291 #27 (27 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.147+0000 m31200| 2015-07-19T23:38:51.147+0000 I SHARDING [LockPinger] creating distributed lock ping thread for test-configRS/ip-10-139-123-131:29000 and process ip-10-139-123-131:31200:1437349131:182555922 (sleeping for 30000ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.148+0000 m29000| 2015-07-19T23:38:51.148+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55292 #28 (28 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.148+0000 m31200| 2015-07-19T23:38:51.148+0000 I SHARDING [conn18] distributed lock 'db0.coll0/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac350bd9a63f6196b1724e [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.149+0000 m31200| 2015-07-19T23:38:51.148+0000 I SHARDING [conn18] remotely refreshing metadata for db0.coll0 based on current shard version 0|0||55ac350ad2c1f750d1548353, current metadata version is 1|1||55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.149+0000 m31200| 2015-07-19T23:38:51.149+0000 I SHARDING [LockPinger] cluster test-configRS/ip-10-139-123-131:29000 pinged successfully at 2015-07-19T23:38:51.147+0000 by distributed lock pinger 'test-configRS/ip-10-139-123-131:29000/ip-10-139-123-131:31200:1437349131:182555922', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.149+0000 m31200| 2015-07-19T23:38:51.149+0000 I SHARDING [conn18] updating metadata for db0.coll0 from shard version 0|0||55ac350ad2c1f750d1548353 to shard version 2|0||55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.149+0000 m31200| 2015-07-19T23:38:51.149+0000 I SHARDING [conn18] collection version was loaded at version 2|3||55ac350ad2c1f750d1548353, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.149+0000 m31200| 2015-07-19T23:38:51.149+0000 I SHARDING [conn18] splitChunk accepted at version 2|0||55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.150+0000 m31200| 2015-07-19T23:38:51.149+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:51.149+0000-55ac350bd9a63f6196b1724f", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349131149), what: "split", ns: "db0.coll0", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('55ac350ad2c1f750d1548353') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('55ac350ad2c1f750d1548353') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.201+0000 m29000| 2015-07-19T23:38:51.201+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55293 #29 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.202+0000 m31200| 2015-07-19T23:38:51.201+0000 I SHARDING [conn18] distributed lock 'db0.coll0/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.202+0000 m30999| 2015-07-19T23:38:51.202+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db0.coll0: 0ms sequenceNumber: 6 version: 2|5||55ac350ad2c1f750d1548353 based on: 2|3||55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.214+0000 m31100| 2015-07-19T23:38:51.214+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:170 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 635ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.215+0000 m31100| 2015-07-19T23:38:51.214+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:170 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 635ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.215+0000 m31200| 2015-07-19T23:38:51.214+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39396 #19 (15 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.216+0000 m31200| 2015-07-19T23:38:51.215+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:170 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 294ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.216+0000 m31200| 2015-07-19T23:38:51.215+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:2 reslen:318 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 294ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.252+0000 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.315+0000 m30998| 2015-07-19T23:38:51.315+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35636 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.327+0000 m30998| 2015-07-19T23:38:51.327+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35637 #3 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.328+0000 m30999| 2015-07-19T23:38:51.327+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:56997 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.333+0000 m30999| 2015-07-19T23:38:51.333+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:56998 #3 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.335+0000 m30999| 2015-07-19T23:38:51.335+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:56999 #4 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.341+0000 setting random seed: 1816296009346 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.341+0000 setting random seed: 2857075459323 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.342+0000 setting random seed: 4313179994933 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.342+0000 setting random seed: 7658807518891 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.343+0000 setting random seed: 8256903942674 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.344+0000 m30998| 2015-07-19T23:38:51.344+0000 I NETWORK [conn3] starting new replica set monitor for replica set test-rs0 with seeds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.345+0000 m30998| 2015-07-19T23:38:51.344+0000 I NETWORK [conn3] ip-10-139-123-131:31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.345+0000 m30998| 2015-07-19T23:38:51.344+0000 I NETWORK [conn3] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.345+0000 m30998| 2015-07-19T23:38:51.344+0000 I NETWORK [conn3] ip-10-139-123-131:31101 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.345+0000 m30998| 2015-07-19T23:38:51.344+0000 I NETWORK [conn3] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.345+0000 m30998| 2015-07-19T23:38:51.344+0000 I NETWORK [conn3] ip-10-139-123-131:31102 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.345+0000 m30998| 2015-07-19T23:38:51.344+0000 I NETWORK [conn3] starting new replica set monitor for replica set test-rs1 with seeds [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.346+0000 m30998| 2015-07-19T23:38:51.344+0000 I NETWORK [conn3] ip-10-139-123-131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.346+0000 m30998| 2015-07-19T23:38:51.344+0000 I NETWORK [conn3] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.346+0000 m30998| 2015-07-19T23:38:51.344+0000 I NETWORK [conn3] ip-10-139-123-131:31201 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.346+0000 m30998| 2015-07-19T23:38:51.344+0000 I NETWORK [conn3] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.346+0000 m30998| 2015-07-19T23:38:51.344+0000 I NETWORK [conn3] ip-10-139-123-131:31202 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.346+0000 m30998| 2015-07-19T23:38:51.344+0000 I SHARDING [conn3] ChunkManager: time to load chunks for db0.coll0: 0ms sequenceNumber: 2 version: 2|5||55ac350ad2c1f750d1548353 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.347+0000 m31200| 2015-07-19T23:38:51.345+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39402 #20 (16 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.348+0000 m31100| 2015-07-19T23:38:51.347+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47567 #20 (16 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.349+0000 m31100| 2015-07-19T23:38:51.348+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 114ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.349+0000 m31100| 2015-07-19T23:38:51.348+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 112ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.350+0000 m31200| 2015-07-19T23:38:51.350+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39404 #21 (17 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.352+0000 m31200| 2015-07-19T23:38:51.352+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 101ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.357+0000 m31100| 2015-07-19T23:38:51.357+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47569 #21 (17 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.358+0000 m31200| 2015-07-19T23:38:51.358+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39406 #22 (18 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.359+0000 m31200| 2015-07-19T23:38:51.359+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39407 #23 (19 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.360+0000 m31200| 2015-07-19T23:38:51.360+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39408 #24 (20 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.370+0000 m31200| 2015-07-19T23:38:51.370+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39409 #25 (21 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.370+0000 m31100| 2015-07-19T23:38:51.370+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47574 #22 (18 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.382+0000 m31100| 2015-07-19T23:38:51.380+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47575 #23 (19 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.383+0000 m31100| 2015-07-19T23:38:51.380+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47576 #24 (20 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.394+0000 m31200| 2015-07-19T23:38:51.394+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39413 #26 (22 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.403+0000 m31100| 2015-07-19T23:38:51.403+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47578 #25 (21 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.599+0000 m31100| 2015-07-19T23:38:51.599+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 102ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.600+0000 m31100| 2015-07-19T23:38:51.599+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 102ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.920+0000 m31200| 2015-07-19T23:38:51.920+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 105ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:51.921+0000 m31200| 2015-07-19T23:38:51.920+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:52.091+0000 m31100| 2015-07-19T23:38:52.091+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47579 #26 (22 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:52.354+0000 m30998| 2015-07-19T23:38:52.354+0000 I NETWORK [conn3] end connection 10.139.123.131:35637 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:52.512+0000 m31200| 2015-07-19T23:38:52.512+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:52.513+0000 m31200| 2015-07-19T23:38:52.512+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:52.519+0000 m31100| 2015-07-19T23:38:52.519+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:52.519+0000 m31100| 2015-07-19T23:38:52.519+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:52.888+0000 m31200| 2015-07-19T23:38:52.888+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 106ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:52.888+0000 m31200| 2015-07-19T23:38:52.888+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 106ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:52.961+0000 m30998| 2015-07-19T23:38:52.961+0000 I NETWORK [conn2] end connection 10.139.123.131:35636 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.005+0000 m30999| 2015-07-19T23:38:53.005+0000 I NETWORK [conn4] end connection 10.139.123.131:56999 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.163+0000 m31200| 2015-07-19T23:38:53.163+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 170ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.164+0000 m31200| 2015-07-19T23:38:53.163+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 170ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.174+0000 m30999| 2015-07-19T23:38:53.174+0000 I NETWORK [conn2] end connection 10.139.123.131:56997 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.330+0000 m31100| 2015-07-19T23:38:53.330+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 279ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.331+0000 m31100| 2015-07-19T23:38:53.330+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 279ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.355+0000 m30999| 2015-07-19T23:38:53.355+0000 I NETWORK [conn3] end connection 10.139.123.131:56998 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.377+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.377+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.377+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.377+0000 jstests/concurrency/fsm_workloads/yield_id_hack.js: Workload completed in 2122 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.377+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.377+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.377+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.377+0000 m30999| 2015-07-19T23:38:53.377+0000 I COMMAND [conn1] DROP: db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.378+0000 m30999| 2015-07-19T23:38:53.377+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:53.377+0000-55ac350dd2c1f750d1548355", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349133377), what: "dropCollection.start", ns: "db0.coll0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.429+0000 m30999| 2015-07-19T23:38:53.429+0000 I SHARDING [conn1] distributed lock 'db0.coll0/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac350dd2c1f750d1548356 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.429+0000 m31100| 2015-07-19T23:38:53.429+0000 I COMMAND [conn12] CMD: drop db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.430+0000 m31200| 2015-07-19T23:38:53.430+0000 I COMMAND [conn14] CMD: drop db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.432+0000 m31101| 2015-07-19T23:38:53.432+0000 I COMMAND [repl writer worker 4] CMD: drop db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.433+0000 m31102| 2015-07-19T23:38:53.432+0000 I COMMAND [repl writer worker 4] CMD: drop db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.434+0000 m31202| 2015-07-19T23:38:53.433+0000 I COMMAND [repl writer worker 11] CMD: drop db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.434+0000 m31201| 2015-07-19T23:38:53.433+0000 I COMMAND [repl writer worker 10] CMD: drop db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.483+0000 m31100| 2015-07-19T23:38:53.482+0000 I SHARDING [conn12] remotely refreshing metadata for db0.coll0 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||55ac350ad2c1f750d1548353, current metadata version is 2|3||55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.483+0000 m31100| 2015-07-19T23:38:53.483+0000 W SHARDING [conn12] no chunks found when reloading db0.coll0, previous version was 0|0||55ac350ad2c1f750d1548353, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.483+0000 m31100| 2015-07-19T23:38:53.483+0000 I SHARDING [conn12] dropping metadata for db0.coll0 at shard version 2|3||55ac350ad2c1f750d1548353, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.484+0000 m31200| 2015-07-19T23:38:53.483+0000 I SHARDING [conn14] remotely refreshing metadata for db0.coll0 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||55ac350ad2c1f750d1548353, current metadata version is 2|5||55ac350ad2c1f750d1548353 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.484+0000 m31200| 2015-07-19T23:38:53.484+0000 W SHARDING [conn14] no chunks found when reloading db0.coll0, previous version was 0|0||55ac350ad2c1f750d1548353, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.484+0000 m31200| 2015-07-19T23:38:53.484+0000 I SHARDING [conn14] dropping metadata for db0.coll0 at shard version 2|5||55ac350ad2c1f750d1548353, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.484+0000 m30999| 2015-07-19T23:38:53.484+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:53.484+0000-55ac350dd2c1f750d1548357", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349133484), what: "dropCollection", ns: "db0.coll0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.536+0000 m30999| 2015-07-19T23:38:53.535+0000 I SHARDING [conn1] distributed lock 'db0.coll0/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.588+0000 m30999| 2015-07-19T23:38:53.587+0000 I COMMAND [conn1] DROP DATABASE: db0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.588+0000 m30999| 2015-07-19T23:38:53.587+0000 I SHARDING [conn1] DBConfig::dropDatabase: db0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.588+0000 m30999| 2015-07-19T23:38:53.587+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:53.587+0000-55ac350dd2c1f750d1548358", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349133587), what: "dropDatabase.start", ns: "db0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.689+0000 m30999| 2015-07-19T23:38:53.689+0000 I SHARDING [conn1] DBConfig::dropDatabase: db0 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.690+0000 m31100| 2015-07-19T23:38:53.690+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47580 #27 (23 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.690+0000 m31100| 2015-07-19T23:38:53.690+0000 I COMMAND [conn27] dropDatabase db0 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.690+0000 m31100| 2015-07-19T23:38:53.690+0000 I COMMAND [conn27] dropDatabase db0 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.691+0000 m30999| 2015-07-19T23:38:53.690+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:53.690+0000-55ac350dd2c1f750d1548359", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349133690), what: "dropDatabase", ns: "db0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.691+0000 m31100| 2015-07-19T23:38:53.690+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 256ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.691+0000 m31100| 2015-07-19T23:38:53.690+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 255ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.691+0000 m31101| 2015-07-19T23:38:53.691+0000 I COMMAND [repl writer worker 11] dropDatabase db0 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.691+0000 m31101| 2015-07-19T23:38:53.691+0000 I COMMAND [repl writer worker 11] dropDatabase db0 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.691+0000 m31102| 2015-07-19T23:38:53.691+0000 I COMMAND [repl writer worker 10] dropDatabase db0 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.692+0000 m31102| 2015-07-19T23:38:53.691+0000 I COMMAND [repl writer worker 10] dropDatabase db0 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.750+0000 m31100| 2015-07-19T23:38:53.750+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.753+0000 m31102| 2015-07-19T23:38:53.753+0000 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.753+0000 m31101| 2015-07-19T23:38:53.753+0000 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.756+0000 m31200| 2015-07-19T23:38:53.755+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 319ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.759+0000 m31200| 2015-07-19T23:38:53.755+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 319ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.760+0000 m31200| 2015-07-19T23:38:53.760+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.761+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.762+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.762+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.762+0000 jstests/concurrency/fsm_workloads/update_rename_noindex.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.762+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.762+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.762+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.763+0000 m31202| 2015-07-19T23:38:53.763+0000 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.765+0000 m31201| 2015-07-19T23:38:53.764+0000 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.765+0000 m30999| 2015-07-19T23:38:53.764+0000 I SHARDING [conn1] distributed lock 'db1/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac350dd2c1f750d154835a [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.766+0000 m30999| 2015-07-19T23:38:53.766+0000 I SHARDING [conn1] Placing [db1] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.766+0000 m30999| 2015-07-19T23:38:53.766+0000 I SHARDING [conn1] Enabling sharding for database [db1] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.817+0000 m30999| 2015-07-19T23:38:53.817+0000 I SHARDING [conn1] distributed lock 'db1/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.823+0000 m31100| 2015-07-19T23:38:53.823+0000 I INDEX [conn26] build index on: db1.coll1 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.824+0000 m31100| 2015-07-19T23:38:53.823+0000 I INDEX [conn26] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.825+0000 m31100| 2015-07-19T23:38:53.825+0000 I INDEX [conn26] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.826+0000 m30999| 2015-07-19T23:38:53.826+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db1.coll1", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.827+0000 m30999| 2015-07-19T23:38:53.827+0000 I SHARDING [conn1] distributed lock 'db1.coll1/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac350dd2c1f750d154835b [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.827+0000 m30999| 2015-07-19T23:38:53.827+0000 I SHARDING [conn1] enable sharding on: db1.coll1 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.828+0000 m30999| 2015-07-19T23:38:53.827+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:53.827+0000-55ac350dd2c1f750d154835c", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349133827), what: "shardCollection.start", ns: "db1.coll1", details: { shardKey: { _id: "hashed" }, collection: "db1.coll1", primary: "test-rs0:test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.828+0000 m31102| 2015-07-19T23:38:53.828+0000 I INDEX [repl writer worker 8] build index on: db1.coll1 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.829+0000 m31102| 2015-07-19T23:38:53.828+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.829+0000 m31101| 2015-07-19T23:38:53.829+0000 I INDEX [repl writer worker 5] build index on: db1.coll1 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.829+0000 m31101| 2015-07-19T23:38:53.829+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.831+0000 m31102| 2015-07-19T23:38:53.831+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.831+0000 m31101| 2015-07-19T23:38:53.831+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.878+0000 m30999| 2015-07-19T23:38:53.878+0000 I SHARDING [conn1] going to create 2 chunk(s) for: db1.coll1 using new epoch 55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:53.980+0000 m30999| 2015-07-19T23:38:53.980+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db1.coll1: 0ms sequenceNumber: 7 version: 1|1||55ac350dd2c1f750d154835d based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.032+0000 m30999| 2015-07-19T23:38:54.032+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db1.coll1: 0ms sequenceNumber: 8 version: 1|1||55ac350dd2c1f750d154835d based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.032+0000 m31100| 2015-07-19T23:38:54.032+0000 I SHARDING [conn17] remotely refreshing metadata for db1.coll1 with requested shard version 1|1||55ac350dd2c1f750d154835d, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.033+0000 m31100| 2015-07-19T23:38:54.033+0000 I SHARDING [conn17] collection db1.coll1 was previously unsharded, new metadata loaded with shard version 1|1||55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.033+0000 m31100| 2015-07-19T23:38:54.033+0000 I SHARDING [conn17] collection version was loaded at version 1|1||55ac350dd2c1f750d154835d, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.033+0000 m30999| 2015-07-19T23:38:54.033+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:54.033+0000-55ac350ed2c1f750d154835e", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349134033), what: "shardCollection", ns: "db1.coll1", details: { version: "1|1||55ac350dd2c1f750d154835d" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.084+0000 m30999| 2015-07-19T23:38:54.084+0000 I SHARDING [conn1] distributed lock 'db1.coll1/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.085+0000 m30999| 2015-07-19T23:38:54.084+0000 I SHARDING [conn1] moving chunk ns: db1.coll1 moving ( ns: db1.coll1, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.085+0000 m31100| 2015-07-19T23:38:54.085+0000 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.085+0000 m31100| 2015-07-19T23:38:54.085+0000 I SHARDING [conn15] received moveChunk request: { moveChunk: "db1.coll1", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", to: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac350dd2c1f750d154835d') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.086+0000 m31100| 2015-07-19T23:38:54.086+0000 I SHARDING [conn15] distributed lock 'db1.coll1/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac350e68c42881b59cba17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.086+0000 m31100| 2015-07-19T23:38:54.086+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:54.086+0000-55ac350e68c42881b59cba18", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349134086), what: "moveChunk.start", ns: "db1.coll1", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.137+0000 m31100| 2015-07-19T23:38:54.137+0000 I SHARDING [conn15] remotely refreshing metadata for db1.coll1 based on current shard version 1|1||55ac350dd2c1f750d154835d, current metadata version is 1|1||55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.138+0000 m31100| 2015-07-19T23:38:54.137+0000 I SHARDING [conn15] metadata of collection db1.coll1 already up to date (shard version : 1|1||55ac350dd2c1f750d154835d, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.138+0000 m31100| 2015-07-19T23:38:54.137+0000 I SHARDING [conn15] moveChunk request accepted at version 1|1||55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.138+0000 m31100| 2015-07-19T23:38:54.138+0000 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.138+0000 m31200| 2015-07-19T23:38:54.138+0000 I SHARDING [conn16] remotely refreshing metadata for db1.coll1, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.139+0000 m31200| 2015-07-19T23:38:54.138+0000 I SHARDING [conn16] collection db1.coll1 was previously unsharded, new metadata loaded with shard version 0|0||55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.139+0000 m31200| 2015-07-19T23:38:54.139+0000 I SHARDING [conn16] collection version was loaded at version 1|1||55ac350dd2c1f750d154835d, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.139+0000 m31200| 2015-07-19T23:38:54.139+0000 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db1.coll1 from test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102 at epoch 55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.140+0000 m31100| 2015-07-19T23:38:54.140+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db1.coll1", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.141+0000 m31200| 2015-07-19T23:38:54.141+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 374ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.143+0000 m31200| 2015-07-19T23:38:54.141+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 374ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.144+0000 m31100| 2015-07-19T23:38:54.142+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db1.coll1", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.144+0000 m31200| 2015-07-19T23:38:54.143+0000 I INDEX [migrateThread] build index on: db1.coll1 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.144+0000 m31200| 2015-07-19T23:38:54.143+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.146+0000 m31200| 2015-07-19T23:38:54.145+0000 I INDEX [migrateThread] build index on: db1.coll1 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.146+0000 m31200| 2015-07-19T23:38:54.145+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.147+0000 m31100| 2015-07-19T23:38:54.147+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db1.coll1", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.151+0000 m31200| 2015-07-19T23:38:54.151+0000 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.151+0000 m31200| 2015-07-19T23:38:54.151+0000 I SHARDING [migrateThread] Deleter starting delete for: db1.coll1 from { _id: 0 } -> { _id: MaxKey }, with opId: 10730 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.151+0000 m31200| 2015-07-19T23:38:54.151+0000 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db1.coll1 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.155+0000 m31100| 2015-07-19T23:38:54.155+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db1.coll1", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.156+0000 m31202| 2015-07-19T23:38:54.156+0000 I INDEX [repl writer worker 3] build index on: db1.coll1 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.156+0000 m31202| 2015-07-19T23:38:54.156+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.156+0000 m31201| 2015-07-19T23:38:54.156+0000 I INDEX [repl writer worker 9] build index on: db1.coll1 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.157+0000 m31201| 2015-07-19T23:38:54.156+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.162+0000 m31202| 2015-07-19T23:38:54.161+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.162+0000 m31201| 2015-07-19T23:38:54.162+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.162+0000 m31200| 2015-07-19T23:38:54.162+0000 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.162+0000 m31200| 2015-07-19T23:38:54.162+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db1.coll1' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.171+0000 m31100| 2015-07-19T23:38:54.171+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db1.coll1", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.172+0000 m31100| 2015-07-19T23:38:54.171+0000 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.172+0000 m31100| 2015-07-19T23:38:54.171+0000 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.172+0000 m31100| 2015-07-19T23:38:54.172+0000 I SHARDING [conn15] moveChunk setting version to: 2|0||55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.172+0000 m31200| 2015-07-19T23:38:54.172+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db1.coll1' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.173+0000 m31200| 2015-07-19T23:38:54.172+0000 I SHARDING [migrateThread] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:54.172+0000-55ac350ed9a63f6196b17250", server: "ip-10-139-123-131", clientAddr: "", time: new Date(1437349134172), what: "moveChunk.to", ns: "db1.coll1", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 12, step 2 of 5: 10, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 10, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.224+0000 m31100| 2015-07-19T23:38:54.223+0000 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db1.coll1", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.224+0000 m31100| 2015-07-19T23:38:54.223+0000 I SHARDING [conn15] moveChunk updating self version to: 2|1||55ac350dd2c1f750d154835d through { _id: MinKey } -> { _id: 0 } for collection 'db1.coll1' [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.224+0000 m31100| 2015-07-19T23:38:54.224+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:54.224+0000-55ac350e68c42881b59cba19", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349134224), what: "moveChunk.commit", ns: "db1.coll1", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.275+0000 m31100| 2015-07-19T23:38:54.275+0000 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.275+0000 m31100| 2015-07-19T23:38:54.275+0000 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.276+0000 m31100| 2015-07-19T23:38:54.275+0000 I SHARDING [conn15] Deleter starting delete for: db1.coll1 from { _id: 0 } -> { _id: MaxKey }, with opId: 8208 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.276+0000 m31100| 2015-07-19T23:38:54.275+0000 I SHARDING [conn15] rangeDeleter deleted 0 documents for db1.coll1 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.276+0000 m31100| 2015-07-19T23:38:54.275+0000 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.276+0000 m31100| 2015-07-19T23:38:54.275+0000 I SHARDING [conn15] distributed lock 'db1.coll1/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.276+0000 m31100| 2015-07-19T23:38:54.276+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:54.276+0000-55ac350e68c42881b59cba1a", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349134276), what: "moveChunk.from", ns: "db1.coll1", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 52, step 3 of 6: 1, step 4 of 6: 32, step 5 of 6: 103, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.327+0000 m31100| 2015-07-19T23:38:54.326+0000 I COMMAND [conn15] command db1.coll1 command: moveChunk { moveChunk: "db1.coll1", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", to: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac350dd2c1f750d154835d') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 241ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.327+0000 m30999| 2015-07-19T23:38:54.327+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db1.coll1: 0ms sequenceNumber: 9 version: 2|1||55ac350dd2c1f750d154835d based on: 1|1||55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.328+0000 m31100| 2015-07-19T23:38:54.327+0000 I SHARDING [conn15] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac350dd2c1f750d154835d') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.329+0000 m31100| 2015-07-19T23:38:54.328+0000 I SHARDING [conn15] distributed lock 'db1.coll1/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac350e68c42881b59cba1b [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.329+0000 m31100| 2015-07-19T23:38:54.329+0000 I SHARDING [conn15] remotely refreshing metadata for db1.coll1 based on current shard version 2|0||55ac350dd2c1f750d154835d, current metadata version is 2|0||55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.329+0000 m31100| 2015-07-19T23:38:54.329+0000 I SHARDING [conn15] updating metadata for db1.coll1 from shard version 2|0||55ac350dd2c1f750d154835d to shard version 2|1||55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.329+0000 m31100| 2015-07-19T23:38:54.329+0000 I SHARDING [conn15] collection version was loaded at version 2|1||55ac350dd2c1f750d154835d, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.330+0000 m31100| 2015-07-19T23:38:54.329+0000 I SHARDING [conn15] splitChunk accepted at version 2|1||55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.330+0000 m31100| 2015-07-19T23:38:54.330+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:54.330+0000-55ac350e68c42881b59cba1c", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349134330), what: "split", ns: "db1.coll1", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('55ac350dd2c1f750d154835d') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('55ac350dd2c1f750d154835d') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.381+0000 m31100| 2015-07-19T23:38:54.381+0000 I SHARDING [conn15] distributed lock 'db1.coll1/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.382+0000 m30999| 2015-07-19T23:38:54.381+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db1.coll1: 0ms sequenceNumber: 10 version: 2|3||55ac350dd2c1f750d154835d based on: 2|1||55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.382+0000 m31200| 2015-07-19T23:38:54.382+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac350dd2c1f750d154835d') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.383+0000 m31200| 2015-07-19T23:38:54.383+0000 I SHARDING [conn18] distributed lock 'db1.coll1/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac350ed9a63f6196b17251 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.383+0000 m31200| 2015-07-19T23:38:54.383+0000 I SHARDING [conn18] remotely refreshing metadata for db1.coll1 based on current shard version 0|0||55ac350dd2c1f750d154835d, current metadata version is 1|1||55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.383+0000 m31200| 2015-07-19T23:38:54.383+0000 I SHARDING [conn18] updating metadata for db1.coll1 from shard version 0|0||55ac350dd2c1f750d154835d to shard version 2|0||55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.384+0000 m31200| 2015-07-19T23:38:54.383+0000 I SHARDING [conn18] collection version was loaded at version 2|3||55ac350dd2c1f750d154835d, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.384+0000 m31200| 2015-07-19T23:38:54.383+0000 I SHARDING [conn18] splitChunk accepted at version 2|0||55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.384+0000 m31200| 2015-07-19T23:38:54.384+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:38:54.384+0000-55ac350ed9a63f6196b17252", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349134384), what: "split", ns: "db1.coll1", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('55ac350dd2c1f750d154835d') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('55ac350dd2c1f750d154835d') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.435+0000 m31200| 2015-07-19T23:38:54.435+0000 I SHARDING [conn18] distributed lock 'db1.coll1/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.436+0000 m30999| 2015-07-19T23:38:54.436+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db1.coll1: 0ms sequenceNumber: 11 version: 2|5||55ac350dd2c1f750d154835d based on: 2|3||55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.437+0000 m30999| 2015-07-19T23:38:54.437+0000 I SHARDING [conn1] sharded connection to test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.438+0000 m30999| 2015-07-19T23:38:54.437+0000 I SHARDING [conn1] retrying command: { listIndexes: "coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.438+0000 m31100| 2015-07-19T23:38:54.437+0000 I NETWORK [conn17] end connection 10.139.123.131:47539 (22 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.441+0000 m31200| 2015-07-19T23:38:54.441+0000 I INDEX [conn20] build index on: db1.coll1 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.441+0000 m31200| 2015-07-19T23:38:54.441+0000 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.442+0000 m31100| 2015-07-19T23:38:54.442+0000 I INDEX [conn24] build index on: db1.coll1 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.442+0000 m31100| 2015-07-19T23:38:54.442+0000 I INDEX [conn24] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.444+0000 m31200| 2015-07-19T23:38:54.444+0000 I INDEX [conn20] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.444+0000 m31200| 2015-07-19T23:38:54.444+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:181 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 288ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.445+0000 m31200| 2015-07-19T23:38:54.444+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:181 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 287ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.449+0000 m31100| 2015-07-19T23:38:54.447+0000 I INDEX [conn24] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.449+0000 m31100| 2015-07-19T23:38:54.448+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:181 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 620ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.450+0000 m31202| 2015-07-19T23:38:54.448+0000 I INDEX [repl writer worker 13] build index on: db1.coll1 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.450+0000 m31202| 2015-07-19T23:38:54.448+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.450+0000 m31201| 2015-07-19T23:38:54.448+0000 I INDEX [repl writer worker 7] build index on: db1.coll1 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.450+0000 m31201| 2015-07-19T23:38:54.448+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.450+0000 m31100| 2015-07-19T23:38:54.448+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:181 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 620ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.453+0000 m31201| 2015-07-19T23:38:54.453+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.453+0000 m31200| 2015-07-19T23:38:54.453+0000 I INDEX [conn20] build index on: db1.coll1 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.454+0000 m31200| 2015-07-19T23:38:54.453+0000 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.455+0000 m31202| 2015-07-19T23:38:54.454+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.455+0000 m31101| 2015-07-19T23:38:54.454+0000 I INDEX [repl writer worker 12] build index on: db1.coll1 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.455+0000 m31101| 2015-07-19T23:38:54.454+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.455+0000 m31102| 2015-07-19T23:38:54.454+0000 I INDEX [repl writer worker 11] build index on: db1.coll1 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.456+0000 m31102| 2015-07-19T23:38:54.454+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.456+0000 m31100| 2015-07-19T23:38:54.454+0000 I INDEX [conn24] build index on: db1.coll1 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.457+0000 m31100| 2015-07-19T23:38:54.454+0000 I INDEX [conn24] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.458+0000 m31200| 2015-07-19T23:38:54.455+0000 I INDEX [conn20] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.458+0000 m31101| 2015-07-19T23:38:54.456+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.458+0000 m31100| 2015-07-19T23:38:54.456+0000 I INDEX [conn24] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.459+0000 m31201| 2015-07-19T23:38:54.459+0000 I INDEX [repl writer worker 6] build index on: db1.coll1 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.461+0000 m31201| 2015-07-19T23:38:54.459+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.462+0000 m31102| 2015-07-19T23:38:54.459+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.462+0000 m31101| 2015-07-19T23:38:54.460+0000 I INDEX [repl writer worker 14] build index on: db1.coll1 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.462+0000 m31101| 2015-07-19T23:38:54.460+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.464+0000 m31202| 2015-07-19T23:38:54.460+0000 I INDEX [repl writer worker 12] build index on: db1.coll1 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.466+0000 m31202| 2015-07-19T23:38:54.460+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.466+0000 m31201| 2015-07-19T23:38:54.460+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.467+0000 m31101| 2015-07-19T23:38:54.461+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.467+0000 m31202| 2015-07-19T23:38:54.461+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.467+0000 m31102| 2015-07-19T23:38:54.464+0000 I INDEX [repl writer worker 0] build index on: db1.coll1 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.467+0000 m31102| 2015-07-19T23:38:54.464+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.467+0000 m31200| 2015-07-19T23:38:54.465+0000 I COMMAND [conn18] CMD: dropIndexes db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.467+0000 m31100| 2015-07-19T23:38:54.465+0000 I COMMAND [conn15] CMD: dropIndexes db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.472+0000 m31102| 2015-07-19T23:38:54.466+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.472+0000 m31100| 2015-07-19T23:38:54.466+0000 I COMMAND [conn15] CMD: dropIndexes db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.473+0000 m31200| 2015-07-19T23:38:54.466+0000 I COMMAND [conn18] CMD: dropIndexes db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.473+0000 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.473+0000 m31102| 2015-07-19T23:38:54.468+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.473+0000 m31101| 2015-07-19T23:38:54.468+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.473+0000 m31201| 2015-07-19T23:38:54.468+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.474+0000 m31202| 2015-07-19T23:38:54.468+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.474+0000 m31101| 2015-07-19T23:38:54.469+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.474+0000 m31201| 2015-07-19T23:38:54.469+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.474+0000 m31102| 2015-07-19T23:38:54.469+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.474+0000 m31202| 2015-07-19T23:38:54.469+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.774+0000 m30999| 2015-07-19T23:38:54.774+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57015 #5 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.792+0000 m30998| 2015-07-19T23:38:54.792+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35657 #4 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.793+0000 m30999| 2015-07-19T23:38:54.793+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57017 #6 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.795+0000 m30999| 2015-07-19T23:38:54.795+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57018 #7 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.797+0000 m30998| 2015-07-19T23:38:54.797+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35660 #5 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.800+0000 m30999| 2015-07-19T23:38:54.800+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57021 #8 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.801+0000 m30998| 2015-07-19T23:38:54.800+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35661 #6 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.804+0000 m30998| 2015-07-19T23:38:54.804+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35663 #7 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.816+0000 m30999| 2015-07-19T23:38:54.815+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57023 #9 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.817+0000 m30998| 2015-07-19T23:38:54.817+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35665 #8 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.819+0000 m30999| 2015-07-19T23:38:54.819+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57025 #10 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.821+0000 m30999| 2015-07-19T23:38:54.821+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57026 #11 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.832+0000 m30998| 2015-07-19T23:38:54.832+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35668 #9 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.842+0000 m30998| 2015-07-19T23:38:54.842+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35669 #10 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.850+0000 m30999| 2015-07-19T23:38:54.849+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57029 #12 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.854+0000 m30999| 2015-07-19T23:38:54.854+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57030 #13 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.863+0000 m30998| 2015-07-19T23:38:54.863+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35672 #11 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.867+0000 m30999| 2015-07-19T23:38:54.867+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57032 #14 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.868+0000 m30998| 2015-07-19T23:38:54.868+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35674 #12 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.870+0000 m30998| 2015-07-19T23:38:54.870+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35675 #13 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.876+0000 setting random seed: 3057416598312 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.876+0000 setting random seed: 6429491615854 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.877+0000 setting random seed: 2657038043253 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.879+0000 setting random seed: 8234675494022 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.879+0000 setting random seed: 8104688953608 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.879+0000 setting random seed: 8762703891843 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.887+0000 setting random seed: 1515208743512 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.887+0000 setting random seed: 4252377478405 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.887+0000 setting random seed: 3522174940444 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.887+0000 setting random seed: 2664698893204 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.887+0000 setting random seed: 7683548498898 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.888+0000 setting random seed: 3601957443170 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.888+0000 setting random seed: 3718434926122 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.899+0000 setting random seed: 442298036068 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.899+0000 m30998| 2015-07-19T23:38:54.887+0000 I SHARDING [conn4] ChunkManager: time to load chunks for db1.coll1: 0ms sequenceNumber: 3 version: 2|5||55ac350dd2c1f750d154835d based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.899+0000 setting random seed: 2753295591101 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.899+0000 setting random seed: 6301418156363 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.899+0000 setting random seed: 3808313705958 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.900+0000 setting random seed: 3164692781865 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.914+0000 setting random seed: 7387952357530 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:54.918+0000 setting random seed: 3583109541796 [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:55.214+0000 m29000| 2015-07-19T23:38:55.214+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55335 #30 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:55.471+0000 m31100| 2015-07-19T23:38:55.470+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:55.471+0000 m31100| 2015-07-19T23:38:55.470+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:55.471+0000 m31200| 2015-07-19T23:38:55.470+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:55.472+0000 m31200| 2015-07-19T23:38:55.470+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:56.473+0000 m31100| 2015-07-19T23:38:56.473+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:56.473+0000 m31200| 2015-07-19T23:38:56.473+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:56.474+0000 m31200| 2015-07-19T23:38:56.473+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:56.474+0000 m31100| 2015-07-19T23:38:56.473+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:57.476+0000 m31100| 2015-07-19T23:38:57.476+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:57.476+0000 m31200| 2015-07-19T23:38:57.476+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:57.476+0000 m31200| 2015-07-19T23:38:57.476+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:57.477+0000 m31100| 2015-07-19T23:38:57.476+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:57.491+0000 m31102| 2015-07-19T23:38:57.491+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:37320 #6 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:57.492+0000 m31101| 2015-07-19T23:38:57.492+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46536 #8 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:57.493+0000 m31202| 2015-07-19T23:38:57.493+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:41840 #6 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:57.494+0000 m31201| 2015-07-19T23:38:57.494+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:35807 #8 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:58.479+0000 m31200| 2015-07-19T23:38:58.478+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:58.479+0000 m31100| 2015-07-19T23:38:58.478+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:58.479+0000 m31200| 2015-07-19T23:38:58.479+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:58.480+0000 m31100| 2015-07-19T23:38:58.480+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:58.757+0000 m31102| 2015-07-19T23:38:58.757+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:37324 #7 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:58.758+0000 m31202| 2015-07-19T23:38:58.758+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:41843 #7 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:58.835+0000 m29000| 2015-07-19T23:38:58.835+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55343 #31 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:59.481+0000 m31200| 2015-07-19T23:38:59.481+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:59.482+0000 m31100| 2015-07-19T23:38:59.481+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:59.482+0000 m31200| 2015-07-19T23:38:59.481+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:59.482+0000 m31100| 2015-07-19T23:38:59.482+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:59.561+0000 m31102| 2015-07-19T23:38:59.561+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:37327 #8 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:59.562+0000 m31101| 2015-07-19T23:38:59.561+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46543 #9 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:59.563+0000 m31202| 2015-07-19T23:38:59.562+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:41847 #8 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:59.563+0000 m31201| 2015-07-19T23:38:59.563+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:35814 #9 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:38:59.980+0000 m29000| 2015-07-19T23:38:59.980+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55348 #32 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:00.484+0000 m31100| 2015-07-19T23:39:00.484+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:00.485+0000 m31200| 2015-07-19T23:39:00.484+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:00.485+0000 m31200| 2015-07-19T23:39:00.484+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:00.485+0000 m31100| 2015-07-19T23:39:00.485+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:00.790+0000 m31101| 2015-07-19T23:39:00.789+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46547 #10 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:00.791+0000 m31100| 2015-07-19T23:39:00.790+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47616 #28 (23 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:00.791+0000 m31102| 2015-07-19T23:39:00.791+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:37334 #9 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:00.792+0000 m31202| 2015-07-19T23:39:00.792+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:41853 #9 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:00.904+0000 m31102| 2015-07-19T23:39:00.904+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:37336 #10 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:00.983+0000 m30998| 2015-07-19T23:39:00.978+0000 I NETWORK [conn10] end connection 10.139.123.131:35669 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:00.983+0000 m30998| 2015-07-19T23:39:00.979+0000 I NETWORK [conn8] end connection 10.139.123.131:35665 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:00.983+0000 m30998| 2015-07-19T23:39:00.980+0000 I NETWORK [conn9] end connection 10.139.123.131:35668 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:00.985+0000 m30999| 2015-07-19T23:39:00.985+0000 I NETWORK [conn11] end connection 10.139.123.131:57026 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:00.988+0000 m30999| 2015-07-19T23:39:00.988+0000 I NETWORK [conn8] end connection 10.139.123.131:57021 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:00.994+0000 m30998| 2015-07-19T23:39:00.994+0000 I NETWORK [conn11] end connection 10.139.123.131:35672 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.003+0000 m30999| 2015-07-19T23:39:00.991+0000 I NETWORK [conn7] end connection 10.139.123.131:57018 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.004+0000 m30999| 2015-07-19T23:39:00.993+0000 I NETWORK [conn5] end connection 10.139.123.131:57015 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.004+0000 m30998| 2015-07-19T23:39:00.997+0000 I NETWORK [conn6] end connection 10.139.123.131:35661 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.004+0000 m30998| 2015-07-19T23:39:00.998+0000 I NETWORK [conn7] end connection 10.139.123.131:35663 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.004+0000 m30998| 2015-07-19T23:39:00.999+0000 I NETWORK [conn12] end connection 10.139.123.131:35674 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.006+0000 m30998| 2015-07-19T23:39:01.006+0000 I NETWORK [conn5] end connection 10.139.123.131:35660 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.006+0000 m30998| 2015-07-19T23:39:01.006+0000 I NETWORK [conn13] end connection 10.139.123.131:35675 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.006+0000 m30998| 2015-07-19T23:39:01.006+0000 I NETWORK [conn4] end connection 10.139.123.131:35657 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.006+0000 m30999| 2015-07-19T23:39:01.006+0000 I NETWORK [conn13] end connection 10.139.123.131:57030 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.007+0000 m30999| 2015-07-19T23:39:01.007+0000 I NETWORK [conn10] end connection 10.139.123.131:57025 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.007+0000 m30999| 2015-07-19T23:39:01.007+0000 I NETWORK [conn14] end connection 10.139.123.131:57032 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.019+0000 m29000| 2015-07-19T23:39:01.019+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55354 #33 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.021+0000 m30999| 2015-07-19T23:39:01.021+0000 I NETWORK [conn9] end connection 10.139.123.131:57023 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.026+0000 m30999| 2015-07-19T23:39:01.026+0000 I NETWORK [conn6] end connection 10.139.123.131:57017 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.051+0000 m30999| 2015-07-19T23:39:01.050+0000 I NETWORK [conn12] end connection 10.139.123.131:57029 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.187+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.187+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.187+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.187+0000 jstests/concurrency/fsm_workloads/update_rename_noindex.js: Workload completed in 6720 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.187+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.187+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.188+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.188+0000 m30999| 2015-07-19T23:39:01.187+0000 I COMMAND [conn1] DROP: db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.188+0000 m30999| 2015-07-19T23:39:01.187+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:01.187+0000-55ac3515d2c1f750d154835f", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349141187), what: "dropCollection.start", ns: "db1.coll1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.239+0000 m30999| 2015-07-19T23:39:01.239+0000 I SHARDING [conn1] distributed lock 'db1.coll1/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3515d2c1f750d1548360 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.240+0000 m31100| 2015-07-19T23:39:01.239+0000 I COMMAND [conn12] CMD: drop db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.240+0000 m31100| 2015-07-19T23:39:01.240+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 753ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.240+0000 m31100| 2015-07-19T23:39:01.240+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 752ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.241+0000 m31200| 2015-07-19T23:39:01.241+0000 I COMMAND [conn14] CMD: drop db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.241+0000 m31200| 2015-07-19T23:39:01.241+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 754ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.242+0000 m31200| 2015-07-19T23:39:01.241+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 754ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.243+0000 m31102| 2015-07-19T23:39:01.242+0000 I COMMAND [repl writer worker 5] CMD: drop db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.243+0000 m31101| 2015-07-19T23:39:01.243+0000 I COMMAND [repl writer worker 15] CMD: drop db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.244+0000 m31202| 2015-07-19T23:39:01.244+0000 I COMMAND [repl writer worker 7] CMD: drop db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.244+0000 m31201| 2015-07-19T23:39:01.244+0000 I COMMAND [repl writer worker 11] CMD: drop db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.293+0000 m31100| 2015-07-19T23:39:01.293+0000 I SHARDING [conn12] remotely refreshing metadata for db1.coll1 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||55ac350dd2c1f750d154835d, current metadata version is 2|3||55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.294+0000 m31100| 2015-07-19T23:39:01.293+0000 W SHARDING [conn12] no chunks found when reloading db1.coll1, previous version was 0|0||55ac350dd2c1f750d154835d, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.294+0000 m31100| 2015-07-19T23:39:01.293+0000 I SHARDING [conn12] dropping metadata for db1.coll1 at shard version 2|3||55ac350dd2c1f750d154835d, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.294+0000 m31200| 2015-07-19T23:39:01.294+0000 I SHARDING [conn14] remotely refreshing metadata for db1.coll1 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||55ac350dd2c1f750d154835d, current metadata version is 2|5||55ac350dd2c1f750d154835d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.295+0000 m31200| 2015-07-19T23:39:01.294+0000 W SHARDING [conn14] no chunks found when reloading db1.coll1, previous version was 0|0||55ac350dd2c1f750d154835d, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.295+0000 m31200| 2015-07-19T23:39:01.294+0000 I SHARDING [conn14] dropping metadata for db1.coll1 at shard version 2|5||55ac350dd2c1f750d154835d, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.295+0000 m30999| 2015-07-19T23:39:01.295+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:01.295+0000-55ac3515d2c1f750d1548361", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349141295), what: "dropCollection", ns: "db1.coll1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.346+0000 m30999| 2015-07-19T23:39:01.346+0000 I SHARDING [conn1] distributed lock 'db1.coll1/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.398+0000 m30999| 2015-07-19T23:39:01.398+0000 I COMMAND [conn1] DROP DATABASE: db1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.398+0000 m30999| 2015-07-19T23:39:01.398+0000 I SHARDING [conn1] DBConfig::dropDatabase: db1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.398+0000 m30999| 2015-07-19T23:39:01.398+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:01.398+0000-55ac3515d2c1f750d1548362", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349141398), what: "dropDatabase.start", ns: "db1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.500+0000 m30999| 2015-07-19T23:39:01.499+0000 I SHARDING [conn1] DBConfig::dropDatabase: db1 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.500+0000 m31100| 2015-07-19T23:39:01.499+0000 I COMMAND [conn27] dropDatabase db1 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.500+0000 m31100| 2015-07-19T23:39:01.500+0000 I COMMAND [conn27] dropDatabase db1 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.501+0000 m30999| 2015-07-19T23:39:01.500+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:01.500+0000-55ac3515d2c1f750d1548363", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349141500), what: "dropDatabase", ns: "db1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.501+0000 m31100| 2015-07-19T23:39:01.500+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 254ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.502+0000 m31100| 2015-07-19T23:39:01.500+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 255ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.502+0000 m31101| 2015-07-19T23:39:01.500+0000 I COMMAND [repl writer worker 10] dropDatabase db1 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.502+0000 m31101| 2015-07-19T23:39:01.500+0000 I COMMAND [repl writer worker 10] dropDatabase db1 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.502+0000 m31102| 2015-07-19T23:39:01.500+0000 I COMMAND [repl writer worker 6] dropDatabase db1 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.502+0000 m31102| 2015-07-19T23:39:01.500+0000 I COMMAND [repl writer worker 6] dropDatabase db1 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.560+0000 m31100| 2015-07-19T23:39:01.560+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.563+0000 m31101| 2015-07-19T23:39:01.563+0000 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.564+0000 m31102| 2015-07-19T23:39:01.563+0000 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.565+0000 m31200| 2015-07-19T23:39:01.565+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 318ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.567+0000 m31200| 2015-07-19T23:39:01.565+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:2 reslen:223 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 319ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.570+0000 m31200| 2015-07-19T23:39:01.570+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.571+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.571+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.571+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.572+0000 jstests/concurrency/fsm_workloads/update_replace_noindex.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.572+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.572+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.572+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.575+0000 m31201| 2015-07-19T23:39:01.573+0000 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.575+0000 m31202| 2015-07-19T23:39:01.573+0000 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.575+0000 m30999| 2015-07-19T23:39:01.574+0000 I SHARDING [conn1] distributed lock 'db2/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3515d2c1f750d1548364 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.576+0000 m30999| 2015-07-19T23:39:01.576+0000 I SHARDING [conn1] Placing [db2] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.576+0000 m30999| 2015-07-19T23:39:01.576+0000 I SHARDING [conn1] Enabling sharding for database [db2] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.627+0000 m30999| 2015-07-19T23:39:01.627+0000 I SHARDING [conn1] distributed lock 'db2/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.633+0000 m31100| 2015-07-19T23:39:01.633+0000 I INDEX [conn26] build index on: db2.coll2 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.634+0000 m31100| 2015-07-19T23:39:01.633+0000 I INDEX [conn26] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.635+0000 m31100| 2015-07-19T23:39:01.634+0000 I INDEX [conn26] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.635+0000 m30999| 2015-07-19T23:39:01.635+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db2.coll2", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.636+0000 m30999| 2015-07-19T23:39:01.636+0000 I SHARDING [conn1] distributed lock 'db2.coll2/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3515d2c1f750d1548365 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.636+0000 m30999| 2015-07-19T23:39:01.636+0000 I SHARDING [conn1] enable sharding on: db2.coll2 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.637+0000 m30999| 2015-07-19T23:39:01.636+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:01.636+0000-55ac3515d2c1f750d1548366", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349141636), what: "shardCollection.start", ns: "db2.coll2", details: { shardKey: { _id: "hashed" }, collection: "db2.coll2", primary: "test-rs0:test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.641+0000 m31102| 2015-07-19T23:39:01.640+0000 I INDEX [repl writer worker 10] build index on: db2.coll2 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.641+0000 m31102| 2015-07-19T23:39:01.640+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.642+0000 m31102| 2015-07-19T23:39:01.642+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.643+0000 m31101| 2015-07-19T23:39:01.643+0000 I INDEX [repl writer worker 11] build index on: db2.coll2 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.643+0000 m31101| 2015-07-19T23:39:01.643+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.644+0000 m31101| 2015-07-19T23:39:01.644+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.687+0000 m30999| 2015-07-19T23:39:01.687+0000 I SHARDING [conn1] going to create 2 chunk(s) for: db2.coll2 using new epoch 55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.789+0000 m30999| 2015-07-19T23:39:01.789+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db2.coll2: 0ms sequenceNumber: 12 version: 1|1||55ac3515d2c1f750d1548367 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.841+0000 m30999| 2015-07-19T23:39:01.841+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db2.coll2: 0ms sequenceNumber: 13 version: 1|1||55ac3515d2c1f750d1548367 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.842+0000 m31100| 2015-07-19T23:39:01.841+0000 I SHARDING [conn24] remotely refreshing metadata for db2.coll2 with requested shard version 1|1||55ac3515d2c1f750d1548367, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.842+0000 m31100| 2015-07-19T23:39:01.842+0000 I SHARDING [conn24] collection db2.coll2 was previously unsharded, new metadata loaded with shard version 1|1||55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.842+0000 m31100| 2015-07-19T23:39:01.842+0000 I SHARDING [conn24] collection version was loaded at version 1|1||55ac3515d2c1f750d1548367, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.842+0000 m30999| 2015-07-19T23:39:01.842+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:01.842+0000-55ac3515d2c1f750d1548368", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349141842), what: "shardCollection", ns: "db2.coll2", details: { version: "1|1||55ac3515d2c1f750d1548367" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.893+0000 m30999| 2015-07-19T23:39:01.893+0000 I SHARDING [conn1] distributed lock 'db2.coll2/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.894+0000 m30999| 2015-07-19T23:39:01.894+0000 I SHARDING [conn1] moving chunk ns: db2.coll2 moving ( ns: db2.coll2, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.894+0000 m31100| 2015-07-19T23:39:01.894+0000 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.895+0000 m31100| 2015-07-19T23:39:01.894+0000 I SHARDING [conn15] received moveChunk request: { moveChunk: "db2.coll2", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", to: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac3515d2c1f750d1548367') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.895+0000 m31100| 2015-07-19T23:39:01.895+0000 I SHARDING [conn15] distributed lock 'db2.coll2/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac351568c42881b59cba1e [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.896+0000 m31100| 2015-07-19T23:39:01.895+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:01.895+0000-55ac351568c42881b59cba1f", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349141895), what: "moveChunk.start", ns: "db2.coll2", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.946+0000 m31100| 2015-07-19T23:39:01.946+0000 I SHARDING [conn15] remotely refreshing metadata for db2.coll2 based on current shard version 1|1||55ac3515d2c1f750d1548367, current metadata version is 1|1||55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.947+0000 m31100| 2015-07-19T23:39:01.947+0000 I SHARDING [conn15] metadata of collection db2.coll2 already up to date (shard version : 1|1||55ac3515d2c1f750d1548367, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.947+0000 m31100| 2015-07-19T23:39:01.947+0000 I SHARDING [conn15] moveChunk request accepted at version 1|1||55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.947+0000 m31100| 2015-07-19T23:39:01.947+0000 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.948+0000 m31200| 2015-07-19T23:39:01.947+0000 I SHARDING [conn16] remotely refreshing metadata for db2.coll2, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.948+0000 m31200| 2015-07-19T23:39:01.948+0000 I SHARDING [conn16] collection db2.coll2 was previously unsharded, new metadata loaded with shard version 0|0||55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.948+0000 m31200| 2015-07-19T23:39:01.948+0000 I SHARDING [conn16] collection version was loaded at version 1|1||55ac3515d2c1f750d1548367, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.948+0000 m31200| 2015-07-19T23:39:01.948+0000 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db2.coll2 from test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102 at epoch 55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.949+0000 m31100| 2015-07-19T23:39:01.949+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db2.coll2", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.950+0000 m31200| 2015-07-19T23:39:01.950+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 374ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.951+0000 m31200| 2015-07-19T23:39:01.950+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 374ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.952+0000 m31100| 2015-07-19T23:39:01.951+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db2.coll2", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.953+0000 m31200| 2015-07-19T23:39:01.953+0000 I INDEX [migrateThread] build index on: db2.coll2 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.953+0000 m31200| 2015-07-19T23:39:01.953+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.955+0000 m31200| 2015-07-19T23:39:01.954+0000 I INDEX [migrateThread] build index on: db2.coll2 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.955+0000 m31200| 2015-07-19T23:39:01.954+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.956+0000 m31100| 2015-07-19T23:39:01.956+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db2.coll2", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.957+0000 m31200| 2015-07-19T23:39:01.956+0000 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.957+0000 m31200| 2015-07-19T23:39:01.957+0000 I SHARDING [migrateThread] Deleter starting delete for: db2.coll2 from { _id: 0 } -> { _id: MaxKey }, with opId: 10843 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.957+0000 m31200| 2015-07-19T23:39:01.957+0000 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db2.coll2 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.960+0000 m31201| 2015-07-19T23:39:01.960+0000 I INDEX [repl writer worker 8] build index on: db2.coll2 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.961+0000 m31201| 2015-07-19T23:39:01.960+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.961+0000 m31202| 2015-07-19T23:39:01.961+0000 I INDEX [repl writer worker 8] build index on: db2.coll2 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.961+0000 m31202| 2015-07-19T23:39:01.961+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.962+0000 m31201| 2015-07-19T23:39:01.961+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.962+0000 m31200| 2015-07-19T23:39:01.962+0000 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.962+0000 m31200| 2015-07-19T23:39:01.962+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db2.coll2' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.963+0000 m31202| 2015-07-19T23:39:01.963+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.964+0000 m31100| 2015-07-19T23:39:01.964+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db2.coll2", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.965+0000 m31100| 2015-07-19T23:39:01.964+0000 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.965+0000 m31100| 2015-07-19T23:39:01.964+0000 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.965+0000 m31100| 2015-07-19T23:39:01.964+0000 I SHARDING [conn15] moveChunk setting version to: 2|0||55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.973+0000 m31200| 2015-07-19T23:39:01.972+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db2.coll2' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:01.973+0000 m31200| 2015-07-19T23:39:01.972+0000 I SHARDING [migrateThread] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:01.972+0000-55ac3515d9a63f6196b17253", server: "ip-10-139-123-131", clientAddr: "", time: new Date(1437349141972), what: "moveChunk.to", ns: "db2.coll2", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 8, step 2 of 5: 5, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 10, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.024+0000 m31100| 2015-07-19T23:39:02.023+0000 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db2.coll2", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.024+0000 m31100| 2015-07-19T23:39:02.024+0000 I SHARDING [conn15] moveChunk updating self version to: 2|1||55ac3515d2c1f750d1548367 through { _id: MinKey } -> { _id: 0 } for collection 'db2.coll2' [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.025+0000 m31100| 2015-07-19T23:39:02.024+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:02.024+0000-55ac351668c42881b59cba20", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349142024), what: "moveChunk.commit", ns: "db2.coll2", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.075+0000 m31100| 2015-07-19T23:39:02.075+0000 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.076+0000 m31100| 2015-07-19T23:39:02.075+0000 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.076+0000 m31100| 2015-07-19T23:39:02.075+0000 I SHARDING [conn15] Deleter starting delete for: db2.coll2 from { _id: 0 } -> { _id: MaxKey }, with opId: 8320 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.076+0000 m31100| 2015-07-19T23:39:02.075+0000 I SHARDING [conn15] rangeDeleter deleted 0 documents for db2.coll2 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.076+0000 m31100| 2015-07-19T23:39:02.075+0000 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.076+0000 m31100| 2015-07-19T23:39:02.076+0000 I SHARDING [conn15] distributed lock 'db2.coll2/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.077+0000 m31100| 2015-07-19T23:39:02.076+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:02.076+0000-55ac351668c42881b59cba21", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349142076), what: "moveChunk.from", ns: "db2.coll2", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 52, step 3 of 6: 1, step 4 of 6: 16, step 5 of 6: 111, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.127+0000 m31100| 2015-07-19T23:39:02.127+0000 I COMMAND [conn15] command db2.coll2 command: moveChunk { moveChunk: "db2.coll2", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", to: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac3515d2c1f750d1548367') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 233ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.128+0000 m30999| 2015-07-19T23:39:02.128+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db2.coll2: 0ms sequenceNumber: 14 version: 2|1||55ac3515d2c1f750d1548367 based on: 1|1||55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.128+0000 m31100| 2015-07-19T23:39:02.128+0000 I SHARDING [conn15] received splitChunk request: { splitChunk: "db2.coll2", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3515d2c1f750d1548367') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.129+0000 m31100| 2015-07-19T23:39:02.129+0000 I SHARDING [conn15] distributed lock 'db2.coll2/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac351668c42881b59cba22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.130+0000 m31100| 2015-07-19T23:39:02.129+0000 I SHARDING [conn15] remotely refreshing metadata for db2.coll2 based on current shard version 2|0||55ac3515d2c1f750d1548367, current metadata version is 2|0||55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.130+0000 m31100| 2015-07-19T23:39:02.130+0000 I SHARDING [conn15] updating metadata for db2.coll2 from shard version 2|0||55ac3515d2c1f750d1548367 to shard version 2|1||55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.130+0000 m31100| 2015-07-19T23:39:02.130+0000 I SHARDING [conn15] collection version was loaded at version 2|1||55ac3515d2c1f750d1548367, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.130+0000 m31100| 2015-07-19T23:39:02.130+0000 I SHARDING [conn15] splitChunk accepted at version 2|1||55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.131+0000 m31100| 2015-07-19T23:39:02.130+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:02.130+0000-55ac351668c42881b59cba23", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349142130), what: "split", ns: "db2.coll2", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('55ac3515d2c1f750d1548367') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('55ac3515d2c1f750d1548367') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.182+0000 m31100| 2015-07-19T23:39:02.182+0000 I SHARDING [conn15] distributed lock 'db2.coll2/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.182+0000 m30999| 2015-07-19T23:39:02.182+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db2.coll2: 0ms sequenceNumber: 15 version: 2|3||55ac3515d2c1f750d1548367 based on: 2|1||55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.183+0000 m31200| 2015-07-19T23:39:02.182+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db2.coll2", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3515d2c1f750d1548367') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.184+0000 m31200| 2015-07-19T23:39:02.184+0000 I SHARDING [conn18] distributed lock 'db2.coll2/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3516d9a63f6196b17254 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.184+0000 m31200| 2015-07-19T23:39:02.184+0000 I SHARDING [conn18] remotely refreshing metadata for db2.coll2 based on current shard version 0|0||55ac3515d2c1f750d1548367, current metadata version is 1|1||55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.184+0000 m31200| 2015-07-19T23:39:02.184+0000 I SHARDING [conn18] updating metadata for db2.coll2 from shard version 0|0||55ac3515d2c1f750d1548367 to shard version 2|0||55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.184+0000 m31200| 2015-07-19T23:39:02.184+0000 I SHARDING [conn18] collection version was loaded at version 2|3||55ac3515d2c1f750d1548367, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.185+0000 m31200| 2015-07-19T23:39:02.184+0000 I SHARDING [conn18] splitChunk accepted at version 2|0||55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.185+0000 m31200| 2015-07-19T23:39:02.185+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:02.185+0000-55ac3516d9a63f6196b17255", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349142185), what: "split", ns: "db2.coll2", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('55ac3515d2c1f750d1548367') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('55ac3515d2c1f750d1548367') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.236+0000 m31200| 2015-07-19T23:39:02.236+0000 I SHARDING [conn18] distributed lock 'db2.coll2/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.237+0000 m30999| 2015-07-19T23:39:02.237+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db2.coll2: 0ms sequenceNumber: 16 version: 2|5||55ac3515d2c1f750d1548367 based on: 2|3||55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.238+0000 m30999| 2015-07-19T23:39:02.238+0000 I SHARDING [conn1] sharded connection to test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.238+0000 m31100| 2015-07-19T23:39:02.238+0000 I NETWORK [conn24] end connection 10.139.123.131:47576 (22 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.238+0000 m30999| 2015-07-19T23:39:02.238+0000 I SHARDING [conn1] retrying command: { listIndexes: "coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.242+0000 m31100| 2015-07-19T23:39:02.241+0000 I INDEX [conn23] build index on: db2.coll2 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.242+0000 m31100| 2015-07-19T23:39:02.241+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.242+0000 m31200| 2015-07-19T23:39:02.241+0000 I INDEX [conn20] build index on: db2.coll2 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.242+0000 m31200| 2015-07-19T23:39:02.241+0000 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.249+0000 m31100| 2015-07-19T23:39:02.242+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.249+0000 m31200| 2015-07-19T23:39:02.242+0000 I INDEX [conn20] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.249+0000 m31200| 2015-07-19T23:39:02.243+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:153 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 283ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.249+0000 m31200| 2015-07-19T23:39:02.243+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:153 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 281ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.250+0000 m31100| 2015-07-19T23:39:02.243+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:153 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 606ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.250+0000 m31100| 2015-07-19T23:39:02.243+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:153 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 606ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.250+0000 m31202| 2015-07-19T23:39:02.245+0000 I INDEX [repl writer worker 3] build index on: db2.coll2 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.250+0000 m31202| 2015-07-19T23:39:02.245+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.250+0000 m31101| 2015-07-19T23:39:02.245+0000 I INDEX [repl writer worker 9] build index on: db2.coll2 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.251+0000 m31101| 2015-07-19T23:39:02.245+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.251+0000 m31102| 2015-07-19T23:39:02.245+0000 I INDEX [repl writer worker 12] build index on: db2.coll2 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.251+0000 m31102| 2015-07-19T23:39:02.245+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.251+0000 m31201| 2015-07-19T23:39:02.246+0000 I INDEX [repl writer worker 9] build index on: db2.coll2 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.251+0000 m31201| 2015-07-19T23:39:02.246+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.251+0000 m31202| 2015-07-19T23:39:02.246+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.252+0000 m31101| 2015-07-19T23:39:02.247+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.252+0000 m31102| 2015-07-19T23:39:02.250+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.254+0000 m31100| 2015-07-19T23:39:02.250+0000 I INDEX [conn23] build index on: db2.coll2 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.254+0000 m31100| 2015-07-19T23:39:02.250+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.254+0000 m31200| 2015-07-19T23:39:02.251+0000 I INDEX [conn20] build index on: db2.coll2 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.254+0000 m31200| 2015-07-19T23:39:02.251+0000 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.254+0000 m31100| 2015-07-19T23:39:02.252+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.255+0000 m31201| 2015-07-19T23:39:02.252+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.255+0000 m31200| 2015-07-19T23:39:02.253+0000 I INDEX [conn20] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.255+0000 m31102| 2015-07-19T23:39:02.254+0000 I INDEX [repl writer worker 9] build index on: db2.coll2 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.255+0000 m31102| 2015-07-19T23:39:02.254+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.256+0000 m31101| 2015-07-19T23:39:02.254+0000 I INDEX [repl writer worker 2] build index on: db2.coll2 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.256+0000 m31101| 2015-07-19T23:39:02.254+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.257+0000 m31101| 2015-07-19T23:39:02.255+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.257+0000 m31102| 2015-07-19T23:39:02.255+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.258+0000 m31100| 2015-07-19T23:39:02.258+0000 I INDEX [conn23] build index on: db2.coll2 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.259+0000 m31100| 2015-07-19T23:39:02.258+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.259+0000 m31202| 2015-07-19T23:39:02.258+0000 I INDEX [repl writer worker 13] build index on: db2.coll2 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.259+0000 m31202| 2015-07-19T23:39:02.258+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.259+0000 m31200| 2015-07-19T23:39:02.259+0000 I INDEX [conn20] build index on: db2.coll2 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.259+0000 m31200| 2015-07-19T23:39:02.259+0000 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.260+0000 m31201| 2015-07-19T23:39:02.259+0000 I INDEX [repl writer worker 7] build index on: db2.coll2 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.260+0000 m31201| 2015-07-19T23:39:02.259+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.263+0000 m31100| 2015-07-19T23:39:02.263+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.263+0000 m31200| 2015-07-19T23:39:02.263+0000 I INDEX [conn20] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.264+0000 m31202| 2015-07-19T23:39:02.263+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.264+0000 m31201| 2015-07-19T23:39:02.263+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.266+0000 m31100| 2015-07-19T23:39:02.266+0000 I INDEX [conn23] build index on: db2.coll2 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.266+0000 m31100| 2015-07-19T23:39:02.266+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.267+0000 m31200| 2015-07-19T23:39:02.267+0000 I INDEX [conn20] build index on: db2.coll2 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.267+0000 m31200| 2015-07-19T23:39:02.267+0000 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.270+0000 m31200| 2015-07-19T23:39:02.269+0000 I INDEX [conn20] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.270+0000 m31101| 2015-07-19T23:39:02.269+0000 I INDEX [repl writer worker 13] build index on: db2.coll2 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.271+0000 m31101| 2015-07-19T23:39:02.269+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.271+0000 m31102| 2015-07-19T23:39:02.269+0000 I INDEX [repl writer worker 15] build index on: db2.coll2 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.271+0000 m31102| 2015-07-19T23:39:02.269+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.272+0000 m31201| 2015-07-19T23:39:02.269+0000 I INDEX [repl writer worker 6] build index on: db2.coll2 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.272+0000 m31201| 2015-07-19T23:39:02.269+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.272+0000 m31100| 2015-07-19T23:39:02.269+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.272+0000 m31202| 2015-07-19T23:39:02.270+0000 I INDEX [repl writer worker 12] build index on: db2.coll2 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.272+0000 m31202| 2015-07-19T23:39:02.270+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.272+0000 m31201| 2015-07-19T23:39:02.270+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.273+0000 m31101| 2015-07-19T23:39:02.271+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.274+0000 m31102| 2015-07-19T23:39:02.273+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.275+0000 m31201| 2015-07-19T23:39:02.274+0000 I INDEX [repl writer worker 5] build index on: db2.coll2 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.276+0000 m31201| 2015-07-19T23:39:02.274+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.276+0000 m31101| 2015-07-19T23:39:02.275+0000 I INDEX [repl writer worker 1] build index on: db2.coll2 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.276+0000 m31101| 2015-07-19T23:39:02.275+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.277+0000 m31102| 2015-07-19T23:39:02.275+0000 I INDEX [repl writer worker 14] build index on: db2.coll2 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.277+0000 m31102| 2015-07-19T23:39:02.275+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.278+0000 m31202| 2015-07-19T23:39:02.276+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.278+0000 m31100| 2015-07-19T23:39:02.276+0000 I COMMAND [conn15] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.278+0000 m31101| 2015-07-19T23:39:02.277+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.278+0000 m31200| 2015-07-19T23:39:02.277+0000 I COMMAND [conn18] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.278+0000 m31102| 2015-07-19T23:39:02.277+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.278+0000 m31100| 2015-07-19T23:39:02.278+0000 I COMMAND [conn15] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.279+0000 m31201| 2015-07-19T23:39:02.278+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.279+0000 m31200| 2015-07-19T23:39:02.278+0000 I COMMAND [conn18] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.281+0000 m31100| 2015-07-19T23:39:02.279+0000 I COMMAND [conn15] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.281+0000 m31200| 2015-07-19T23:39:02.279+0000 I COMMAND [conn18] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.281+0000 m31202| 2015-07-19T23:39:02.280+0000 I INDEX [repl writer worker 10] build index on: db2.coll2 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.282+0000 m31202| 2015-07-19T23:39:02.280+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.286+0000 m31100| 2015-07-19T23:39:02.280+0000 I COMMAND [conn15] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.286+0000 m31102| 2015-07-19T23:39:02.280+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.286+0000 m31200| 2015-07-19T23:39:02.281+0000 I COMMAND [conn18] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.286+0000 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.286+0000 m31201| 2015-07-19T23:39:02.281+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.286+0000 m31202| 2015-07-19T23:39:02.282+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.287+0000 m31102| 2015-07-19T23:39:02.282+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.287+0000 m31101| 2015-07-19T23:39:02.282+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.287+0000 m31201| 2015-07-19T23:39:02.283+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.287+0000 m31102| 2015-07-19T23:39:02.283+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.287+0000 m31101| 2015-07-19T23:39:02.284+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.287+0000 m31201| 2015-07-19T23:39:02.284+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.287+0000 m31102| 2015-07-19T23:39:02.284+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.288+0000 m31202| 2015-07-19T23:39:02.285+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.288+0000 m31101| 2015-07-19T23:39:02.285+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.288+0000 m31201| 2015-07-19T23:39:02.285+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.288+0000 m31202| 2015-07-19T23:39:02.286+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.288+0000 m31101| 2015-07-19T23:39:02.287+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.288+0000 m31202| 2015-07-19T23:39:02.287+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.288+0000 m31202| 2015-07-19T23:39:02.288+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.393+0000 m30999| 2015-07-19T23:39:02.393+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57055 #15 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.404+0000 m30999| 2015-07-19T23:39:02.404+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57056 #16 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.433+0000 m30999| 2015-07-19T23:39:02.432+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57057 #17 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.447+0000 m30998| 2015-07-19T23:39:02.447+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35699 #14 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.449+0000 m30998| 2015-07-19T23:39:02.449+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35700 #15 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.453+0000 m30999| 2015-07-19T23:39:02.453+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57060 #18 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.454+0000 m30998| 2015-07-19T23:39:02.454+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35702 #16 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.456+0000 m30998| 2015-07-19T23:39:02.456+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35703 #17 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.459+0000 m30998| 2015-07-19T23:39:02.459+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35704 #18 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.473+0000 m30999| 2015-07-19T23:39:02.473+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57064 #19 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.477+0000 setting random seed: 8377407980151 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.479+0000 setting random seed: 6591294026002 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.479+0000 setting random seed: 1006192411296 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.479+0000 setting random seed: 3606938146986 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.479+0000 setting random seed: 2651590267196 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.479+0000 setting random seed: 6633999794721 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.482+0000 setting random seed: 4436709159053 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.485+0000 m31200| 2015-07-19T23:39:02.485+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:159 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 199ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.485+0000 m31200| 2015-07-19T23:39:02.485+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:159 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 199ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.485+0000 m30998| 2015-07-19T23:39:02.485+0000 I SHARDING [conn14] ChunkManager: time to load chunks for db2.coll2: 0ms sequenceNumber: 4 version: 2|5||55ac3515d2c1f750d1548367 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.495+0000 setting random seed: 634823199361 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.495+0000 setting random seed: 8055532565340 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.495+0000 setting random seed: 6690852008759 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.518+0000 m31100| 2015-07-19T23:39:02.518+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47631 #29 (23 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.519+0000 m31100| 2015-07-19T23:39:02.519+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47632 #30 (24 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.519+0000 m31200| 2015-07-19T23:39:02.519+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39469 #27 (23 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.520+0000 m31100| 2015-07-19T23:39:02.519+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47634 #31 (25 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.520+0000 m31200| 2015-07-19T23:39:02.520+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39471 #28 (24 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.530+0000 m30998| 2015-07-19T23:39:02.529+0000 I NETWORK [conn14] end connection 10.139.123.131:35699 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.531+0000 m31100| 2015-07-19T23:39:02.530+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47636 #32 (26 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.545+0000 m31200| 2015-07-19T23:39:02.545+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39473 #29 (25 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.550+0000 m30998| 2015-07-19T23:39:02.549+0000 I NETWORK [conn18] end connection 10.139.123.131:35704 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.559+0000 m30999| 2015-07-19T23:39:02.558+0000 I NETWORK [conn16] end connection 10.139.123.131:57056 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.564+0000 m30999| 2015-07-19T23:39:02.564+0000 I NETWORK [conn17] end connection 10.139.123.131:57057 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.571+0000 m30998| 2015-07-19T23:39:02.571+0000 I NETWORK [conn17] end connection 10.139.123.131:35703 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.587+0000 m30998| 2015-07-19T23:39:02.586+0000 I NETWORK [conn16] end connection 10.139.123.131:35702 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.595+0000 m30998| 2015-07-19T23:39:02.595+0000 I NETWORK [conn15] end connection 10.139.123.131:35700 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.597+0000 m30999| 2015-07-19T23:39:02.596+0000 I NETWORK [conn18] end connection 10.139.123.131:57060 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.614+0000 m30999| 2015-07-19T23:39:02.614+0000 I NETWORK [conn19] end connection 10.139.123.131:57064 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.617+0000 m30999| 2015-07-19T23:39:02.617+0000 I NETWORK [conn15] end connection 10.139.123.131:57055 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.673+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.673+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.673+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.674+0000 jstests/concurrency/fsm_workloads/update_replace_noindex.js: Workload completed in 392 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.674+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.674+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.674+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.674+0000 m30999| 2015-07-19T23:39:02.673+0000 I COMMAND [conn1] DROP: db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.674+0000 m30999| 2015-07-19T23:39:02.673+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:02.673+0000-55ac3516d2c1f750d1548369", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349142673), what: "dropCollection.start", ns: "db2.coll2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.725+0000 m30999| 2015-07-19T23:39:02.725+0000 I SHARDING [conn1] distributed lock 'db2.coll2/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3516d2c1f750d154836a [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.726+0000 m31100| 2015-07-19T23:39:02.725+0000 I COMMAND [conn12] CMD: drop db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.726+0000 m31100| 2015-07-19T23:39:02.726+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 441ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.726+0000 m31100| 2015-07-19T23:39:02.726+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 441ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.727+0000 m31200| 2015-07-19T23:39:02.727+0000 I COMMAND [conn14] CMD: drop db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.727+0000 m31200| 2015-07-19T23:39:02.727+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 117ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.728+0000 m31200| 2015-07-19T23:39:02.727+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 117ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.729+0000 m31101| 2015-07-19T23:39:02.728+0000 I COMMAND [repl writer worker 0] CMD: drop db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.729+0000 m31102| 2015-07-19T23:39:02.729+0000 I COMMAND [repl writer worker 7] CMD: drop db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.731+0000 m31202| 2015-07-19T23:39:02.730+0000 I COMMAND [repl writer worker 0] CMD: drop db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.732+0000 m31201| 2015-07-19T23:39:02.730+0000 I COMMAND [repl writer worker 15] CMD: drop db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.779+0000 m31100| 2015-07-19T23:39:02.779+0000 I SHARDING [conn12] remotely refreshing metadata for db2.coll2 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||55ac3515d2c1f750d1548367, current metadata version is 2|3||55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.779+0000 m31100| 2015-07-19T23:39:02.779+0000 W SHARDING [conn12] no chunks found when reloading db2.coll2, previous version was 0|0||55ac3515d2c1f750d1548367, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.780+0000 m31100| 2015-07-19T23:39:02.779+0000 I SHARDING [conn12] dropping metadata for db2.coll2 at shard version 2|3||55ac3515d2c1f750d1548367, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.780+0000 m31200| 2015-07-19T23:39:02.780+0000 I SHARDING [conn14] remotely refreshing metadata for db2.coll2 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||55ac3515d2c1f750d1548367, current metadata version is 2|5||55ac3515d2c1f750d1548367 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.780+0000 m31200| 2015-07-19T23:39:02.780+0000 W SHARDING [conn14] no chunks found when reloading db2.coll2, previous version was 0|0||55ac3515d2c1f750d1548367, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.780+0000 m31200| 2015-07-19T23:39:02.780+0000 I SHARDING [conn14] dropping metadata for db2.coll2 at shard version 2|5||55ac3515d2c1f750d1548367, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.781+0000 m30999| 2015-07-19T23:39:02.780+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:02.780+0000-55ac3516d2c1f750d154836b", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349142780), what: "dropCollection", ns: "db2.coll2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.832+0000 m30999| 2015-07-19T23:39:02.832+0000 I SHARDING [conn1] distributed lock 'db2.coll2/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.883+0000 m30999| 2015-07-19T23:39:02.883+0000 I COMMAND [conn1] DROP DATABASE: db2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.884+0000 m30999| 2015-07-19T23:39:02.883+0000 I SHARDING [conn1] DBConfig::dropDatabase: db2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.884+0000 m30999| 2015-07-19T23:39:02.883+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:02.883+0000-55ac3516d2c1f750d154836c", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349142883), what: "dropDatabase.start", ns: "db2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.985+0000 m30999| 2015-07-19T23:39:02.985+0000 I SHARDING [conn1] DBConfig::dropDatabase: db2 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.986+0000 m31100| 2015-07-19T23:39:02.985+0000 I COMMAND [conn27] dropDatabase db2 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.986+0000 m31100| 2015-07-19T23:39:02.985+0000 I COMMAND [conn27] dropDatabase db2 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.986+0000 m31100| 2015-07-19T23:39:02.985+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 254ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.986+0000 m31100| 2015-07-19T23:39:02.985+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 254ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.987+0000 m30999| 2015-07-19T23:39:02.985+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:02.985+0000-55ac3516d2c1f750d154836d", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349142985), what: "dropDatabase", ns: "db2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.987+0000 m31101| 2015-07-19T23:39:02.986+0000 I COMMAND [repl writer worker 15] dropDatabase db2 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.987+0000 m31101| 2015-07-19T23:39:02.986+0000 I COMMAND [repl writer worker 15] dropDatabase db2 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.987+0000 m31102| 2015-07-19T23:39:02.986+0000 I COMMAND [repl writer worker 5] dropDatabase db2 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:02.987+0000 m31102| 2015-07-19T23:39:02.986+0000 I COMMAND [repl writer worker 5] dropDatabase db2 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.045+0000 m31100| 2015-07-19T23:39:03.045+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.046+0000 m31102| 2015-07-19T23:39:03.045+0000 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.046+0000 m31101| 2015-07-19T23:39:03.045+0000 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.050+0000 m31200| 2015-07-19T23:39:03.049+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 317ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.050+0000 m31200| 2015-07-19T23:39:03.049+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 317ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.055+0000 m31200| 2015-07-19T23:39:03.055+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.056+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.056+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.056+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.057+0000 jstests/concurrency/fsm_workloads/yield.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.057+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.057+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.057+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.059+0000 m31202| 2015-07-19T23:39:03.058+0000 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.060+0000 m31201| 2015-07-19T23:39:03.058+0000 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.060+0000 m30999| 2015-07-19T23:39:03.059+0000 I SHARDING [conn1] distributed lock 'db3/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3517d2c1f750d154836e [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.061+0000 m30999| 2015-07-19T23:39:03.061+0000 I SHARDING [conn1] Placing [db3] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.061+0000 m30999| 2015-07-19T23:39:03.061+0000 I SHARDING [conn1] Enabling sharding for database [db3] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.112+0000 m30999| 2015-07-19T23:39:03.112+0000 I SHARDING [conn1] distributed lock 'db3/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.118+0000 m31100| 2015-07-19T23:39:03.118+0000 I INDEX [conn16] build index on: db3.coll3 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.118+0000 m31100| 2015-07-19T23:39:03.118+0000 I INDEX [conn16] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.119+0000 m31100| 2015-07-19T23:39:03.119+0000 I INDEX [conn16] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.120+0000 m30999| 2015-07-19T23:39:03.120+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db3.coll3", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.121+0000 m30999| 2015-07-19T23:39:03.121+0000 I SHARDING [conn1] distributed lock 'db3.coll3/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3517d2c1f750d154836f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.121+0000 m30999| 2015-07-19T23:39:03.121+0000 I SHARDING [conn1] enable sharding on: db3.coll3 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.121+0000 m30999| 2015-07-19T23:39:03.121+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:03.121+0000-55ac3517d2c1f750d1548370", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349143121), what: "shardCollection.start", ns: "db3.coll3", details: { shardKey: { _id: "hashed" }, collection: "db3.coll3", primary: "test-rs0:test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.123+0000 m31102| 2015-07-19T23:39:03.123+0000 I INDEX [repl writer worker 4] build index on: db3.coll3 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.123+0000 m31102| 2015-07-19T23:39:03.123+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.124+0000 m31101| 2015-07-19T23:39:03.124+0000 I INDEX [repl writer worker 4] build index on: db3.coll3 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.124+0000 m31101| 2015-07-19T23:39:03.124+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.124+0000 m31102| 2015-07-19T23:39:03.124+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.125+0000 m31101| 2015-07-19T23:39:03.125+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.172+0000 m30999| 2015-07-19T23:39:03.172+0000 I SHARDING [conn1] going to create 2 chunk(s) for: db3.coll3 using new epoch 55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.274+0000 m30999| 2015-07-19T23:39:03.274+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db3.coll3: 0ms sequenceNumber: 17 version: 1|1||55ac3517d2c1f750d1548371 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.326+0000 m30999| 2015-07-19T23:39:03.325+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db3.coll3: 0ms sequenceNumber: 18 version: 1|1||55ac3517d2c1f750d1548371 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.326+0000 m31100| 2015-07-19T23:39:03.326+0000 I SHARDING [conn23] remotely refreshing metadata for db3.coll3 with requested shard version 1|1||55ac3517d2c1f750d1548371, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.327+0000 m31100| 2015-07-19T23:39:03.327+0000 I SHARDING [conn23] collection db3.coll3 was previously unsharded, new metadata loaded with shard version 1|1||55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.327+0000 m31100| 2015-07-19T23:39:03.327+0000 I SHARDING [conn23] collection version was loaded at version 1|1||55ac3517d2c1f750d1548371, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.327+0000 m30999| 2015-07-19T23:39:03.327+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:03.327+0000-55ac3517d2c1f750d1548372", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349143327), what: "shardCollection", ns: "db3.coll3", details: { version: "1|1||55ac3517d2c1f750d1548371" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.378+0000 m30999| 2015-07-19T23:39:03.378+0000 I SHARDING [conn1] distributed lock 'db3.coll3/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.378+0000 m30999| 2015-07-19T23:39:03.378+0000 I SHARDING [conn1] moving chunk ns: db3.coll3 moving ( ns: db3.coll3, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.379+0000 m31100| 2015-07-19T23:39:03.378+0000 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.379+0000 m31100| 2015-07-19T23:39:03.379+0000 I SHARDING [conn15] received moveChunk request: { moveChunk: "db3.coll3", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", to: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac3517d2c1f750d1548371') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.380+0000 m31100| 2015-07-19T23:39:03.380+0000 I SHARDING [conn15] distributed lock 'db3.coll3/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac351768c42881b59cba25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.380+0000 m31100| 2015-07-19T23:39:03.380+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:03.380+0000-55ac351768c42881b59cba26", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349143380), what: "moveChunk.start", ns: "db3.coll3", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.431+0000 m31100| 2015-07-19T23:39:03.431+0000 I SHARDING [conn15] remotely refreshing metadata for db3.coll3 based on current shard version 1|1||55ac3517d2c1f750d1548371, current metadata version is 1|1||55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.431+0000 m31100| 2015-07-19T23:39:03.431+0000 I SHARDING [conn15] metadata of collection db3.coll3 already up to date (shard version : 1|1||55ac3517d2c1f750d1548371, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.431+0000 m31100| 2015-07-19T23:39:03.431+0000 I SHARDING [conn15] moveChunk request accepted at version 1|1||55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.432+0000 m31100| 2015-07-19T23:39:03.432+0000 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.432+0000 m31200| 2015-07-19T23:39:03.432+0000 I SHARDING [conn16] remotely refreshing metadata for db3.coll3, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.432+0000 m31200| 2015-07-19T23:39:03.432+0000 I SHARDING [conn16] collection db3.coll3 was previously unsharded, new metadata loaded with shard version 0|0||55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.433+0000 m31200| 2015-07-19T23:39:03.432+0000 I SHARDING [conn16] collection version was loaded at version 1|1||55ac3517d2c1f750d1548371, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.433+0000 m31200| 2015-07-19T23:39:03.432+0000 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db3.coll3 from test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102 at epoch 55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.434+0000 m31100| 2015-07-19T23:39:03.434+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db3.coll3", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.435+0000 m31200| 2015-07-19T23:39:03.434+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 374ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.435+0000 m31200| 2015-07-19T23:39:03.434+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 374ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.436+0000 m31100| 2015-07-19T23:39:03.436+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db3.coll3", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.438+0000 m31200| 2015-07-19T23:39:03.437+0000 I INDEX [migrateThread] build index on: db3.coll3 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.438+0000 m31200| 2015-07-19T23:39:03.437+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.439+0000 m31200| 2015-07-19T23:39:03.439+0000 I INDEX [migrateThread] build index on: db3.coll3 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.440+0000 m31200| 2015-07-19T23:39:03.440+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.440+0000 m31100| 2015-07-19T23:39:03.440+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db3.coll3", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.442+0000 m31200| 2015-07-19T23:39:03.442+0000 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.442+0000 m31200| 2015-07-19T23:39:03.442+0000 I SHARDING [migrateThread] Deleter starting delete for: db3.coll3 from { _id: 0 } -> { _id: MaxKey }, with opId: 11245 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.443+0000 m31200| 2015-07-19T23:39:03.442+0000 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db3.coll3 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.447+0000 m31201| 2015-07-19T23:39:03.446+0000 I INDEX [repl writer worker 12] build index on: db3.coll3 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.447+0000 m31201| 2015-07-19T23:39:03.446+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.447+0000 m31202| 2015-07-19T23:39:03.447+0000 I INDEX [repl writer worker 2] build index on: db3.coll3 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.448+0000 m31202| 2015-07-19T23:39:03.447+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.448+0000 m31201| 2015-07-19T23:39:03.448+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.449+0000 m31200| 2015-07-19T23:39:03.448+0000 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.449+0000 m31202| 2015-07-19T23:39:03.448+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.449+0000 m31200| 2015-07-19T23:39:03.448+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db3.coll3' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.449+0000 m31100| 2015-07-19T23:39:03.449+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db3.coll3", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "catchup", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.465+0000 m31100| 2015-07-19T23:39:03.465+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db3.coll3", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.465+0000 m31100| 2015-07-19T23:39:03.465+0000 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.465+0000 m31100| 2015-07-19T23:39:03.465+0000 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.465+0000 m31100| 2015-07-19T23:39:03.465+0000 I SHARDING [conn15] moveChunk setting version to: 2|0||55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.469+0000 m31200| 2015-07-19T23:39:03.469+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db3.coll3' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.470+0000 m31200| 2015-07-19T23:39:03.469+0000 I SHARDING [migrateThread] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:03.469+0000-55ac3517d9a63f6196b17256", server: "ip-10-139-123-131", clientAddr: "", time: new Date(1437349143469), what: "moveChunk.to", ns: "db3.coll3", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 9, step 2 of 5: 5, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 21, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.521+0000 m31100| 2015-07-19T23:39:03.520+0000 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db3.coll3", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.521+0000 m31100| 2015-07-19T23:39:03.520+0000 I SHARDING [conn15] moveChunk updating self version to: 2|1||55ac3517d2c1f750d1548371 through { _id: MinKey } -> { _id: 0 } for collection 'db3.coll3' [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.522+0000 m31100| 2015-07-19T23:39:03.521+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:03.521+0000-55ac351768c42881b59cba27", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349143521), what: "moveChunk.commit", ns: "db3.coll3", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.572+0000 m31100| 2015-07-19T23:39:03.572+0000 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.572+0000 m31100| 2015-07-19T23:39:03.572+0000 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.573+0000 m31100| 2015-07-19T23:39:03.572+0000 I SHARDING [conn15] Deleter starting delete for: db3.coll3 from { _id: 0 } -> { _id: MaxKey }, with opId: 8528 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.573+0000 m31100| 2015-07-19T23:39:03.572+0000 I SHARDING [conn15] rangeDeleter deleted 0 documents for db3.coll3 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.573+0000 m31100| 2015-07-19T23:39:03.572+0000 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.573+0000 m31100| 2015-07-19T23:39:03.573+0000 I SHARDING [conn15] distributed lock 'db3.coll3/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.573+0000 m31100| 2015-07-19T23:39:03.573+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:03.573+0000-55ac351768c42881b59cba28", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349143573), what: "moveChunk.from", ns: "db3.coll3", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 52, step 3 of 6: 1, step 4 of 6: 32, step 5 of 6: 107, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.624+0000 m31100| 2015-07-19T23:39:03.623+0000 I COMMAND [conn15] command db3.coll3 command: moveChunk { moveChunk: "db3.coll3", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", to: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac3517d2c1f750d1548371') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 245ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.625+0000 m30999| 2015-07-19T23:39:03.624+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db3.coll3: 0ms sequenceNumber: 19 version: 2|1||55ac3517d2c1f750d1548371 based on: 1|1||55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.625+0000 m31100| 2015-07-19T23:39:03.625+0000 I SHARDING [conn15] received splitChunk request: { splitChunk: "db3.coll3", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3517d2c1f750d1548371') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.626+0000 m31100| 2015-07-19T23:39:03.626+0000 I SHARDING [conn15] distributed lock 'db3.coll3/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac351768c42881b59cba29 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.626+0000 m31100| 2015-07-19T23:39:03.626+0000 I SHARDING [conn15] remotely refreshing metadata for db3.coll3 based on current shard version 2|0||55ac3517d2c1f750d1548371, current metadata version is 2|0||55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.626+0000 m31100| 2015-07-19T23:39:03.626+0000 I SHARDING [conn15] updating metadata for db3.coll3 from shard version 2|0||55ac3517d2c1f750d1548371 to shard version 2|1||55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.626+0000 m31100| 2015-07-19T23:39:03.626+0000 I SHARDING [conn15] collection version was loaded at version 2|1||55ac3517d2c1f750d1548371, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.627+0000 m31100| 2015-07-19T23:39:03.626+0000 I SHARDING [conn15] splitChunk accepted at version 2|1||55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.627+0000 m31100| 2015-07-19T23:39:03.627+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:03.627+0000-55ac351768c42881b59cba2a", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349143627), what: "split", ns: "db3.coll3", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('55ac3517d2c1f750d1548371') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('55ac3517d2c1f750d1548371') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.679+0000 m31100| 2015-07-19T23:39:03.678+0000 I SHARDING [conn15] distributed lock 'db3.coll3/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.679+0000 m30999| 2015-07-19T23:39:03.679+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db3.coll3: 0ms sequenceNumber: 20 version: 2|3||55ac3517d2c1f750d1548371 based on: 2|1||55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.679+0000 m31200| 2015-07-19T23:39:03.679+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db3.coll3", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3517d2c1f750d1548371') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.680+0000 m31200| 2015-07-19T23:39:03.680+0000 I SHARDING [conn18] distributed lock 'db3.coll3/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3517d9a63f6196b17257 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.681+0000 m31200| 2015-07-19T23:39:03.680+0000 I SHARDING [conn18] remotely refreshing metadata for db3.coll3 based on current shard version 0|0||55ac3517d2c1f750d1548371, current metadata version is 1|1||55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.681+0000 m31200| 2015-07-19T23:39:03.681+0000 I SHARDING [conn18] updating metadata for db3.coll3 from shard version 0|0||55ac3517d2c1f750d1548371 to shard version 2|0||55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.681+0000 m31200| 2015-07-19T23:39:03.681+0000 I SHARDING [conn18] collection version was loaded at version 2|3||55ac3517d2c1f750d1548371, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.681+0000 m31200| 2015-07-19T23:39:03.681+0000 I SHARDING [conn18] splitChunk accepted at version 2|0||55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.682+0000 m31200| 2015-07-19T23:39:03.681+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:03.681+0000-55ac3517d9a63f6196b17258", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349143681), what: "split", ns: "db3.coll3", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('55ac3517d2c1f750d1548371') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('55ac3517d2c1f750d1548371') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.733+0000 m31200| 2015-07-19T23:39:03.733+0000 I SHARDING [conn18] distributed lock 'db3.coll3/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.733+0000 m30999| 2015-07-19T23:39:03.733+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db3.coll3: 0ms sequenceNumber: 21 version: 2|5||55ac3517d2c1f750d1548371 based on: 2|3||55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.747+0000 m31100| 2015-07-19T23:39:03.745+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:170 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 623ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.747+0000 m31100| 2015-07-19T23:39:03.745+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:3 reslen:469 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 623ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.770+0000 m31200| 2015-07-19T23:39:03.748+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:25 reslen:3750 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 301ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.770+0000 m31200| 2015-07-19T23:39:03.748+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:25 reslen:3750 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 301ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.781+0000 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.838+0000 m30998| 2015-07-19T23:39:03.838+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35713 #19 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.839+0000 m30999| 2015-07-19T23:39:03.839+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57073 #20 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.842+0000 m30998| 2015-07-19T23:39:03.842+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35715 #20 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.865+0000 m30998| 2015-07-19T23:39:03.865+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35716 #21 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.866+0000 m30999| 2015-07-19T23:39:03.866+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57076 #21 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.872+0000 setting random seed: 2389963348396 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.872+0000 setting random seed: 2052834434434 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.872+0000 setting random seed: 1386536732316 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.872+0000 setting random seed: 9749456210993 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.873+0000 setting random seed: 8428035061806 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.874+0000 m30998| 2015-07-19T23:39:03.874+0000 I SHARDING [conn19] ChunkManager: time to load chunks for db3.coll3: 0ms sequenceNumber: 5 version: 2|5||55ac3517d2c1f750d1548371 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.880+0000 m31100| 2015-07-19T23:39:03.879+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.880+0000 m31100| 2015-07-19T23:39:03.880+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.911+0000 m31100| 2015-07-19T23:39:03.911+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47643 #33 (27 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.921+0000 m31100| 2015-07-19T23:39:03.921+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47644 #34 (28 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.922+0000 m31200| 2015-07-19T23:39:03.922+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39481 #30 (26 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.933+0000 m31100| 2015-07-19T23:39:03.933+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47646 #35 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.943+0000 m31200| 2015-07-19T23:39:03.942+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39483 #31 (27 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.946+0000 m31100| 2015-07-19T23:39:03.945+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47648 #36 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:03.948+0000 m31200| 2015-07-19T23:39:03.948+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39485 #32 (28 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.111+0000 m31100| 2015-07-19T23:39:04.110+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47650 #37 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.147+0000 m31100| 2015-07-19T23:39:04.146+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 207ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.147+0000 m31100| 2015-07-19T23:39:04.146+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 207ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.150+0000 m31200| 2015-07-19T23:39:04.149+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 213ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.150+0000 m31200| 2015-07-19T23:39:04.149+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 213ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.255+0000 m31100| 2015-07-19T23:39:04.254+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 102ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.255+0000 m31100| 2015-07-19T23:39:04.254+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 102ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.582+0000 m31100| 2015-07-19T23:39:04.582+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47651 #38 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.649+0000 m31100| 2015-07-19T23:39:04.648+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 329ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.649+0000 m31100| 2015-07-19T23:39:04.648+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 329ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.657+0000 m31200| 2015-07-19T23:39:04.656+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 247ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.657+0000 m31200| 2015-07-19T23:39:04.657+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 247ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.860+0000 m31200| 2015-07-19T23:39:04.860+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 200ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.860+0000 m31200| 2015-07-19T23:39:04.860+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 200ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.978+0000 m31100| 2015-07-19T23:39:04.977+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 321ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.978+0000 m31100| 2015-07-19T23:39:04.977+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 321ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.993+0000 m31200| 2015-07-19T23:39:04.992+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:04.993+0000 m31200| 2015-07-19T23:39:04.992+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 101ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:05.292+0000 m31100| 2015-07-19T23:39:05.292+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 179ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:05.292+0000 m31100| 2015-07-19T23:39:05.292+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 181ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:05.294+0000 m31200| 2015-07-19T23:39:05.294+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 296ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:05.294+0000 m31200| 2015-07-19T23:39:05.294+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 296ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:05.568+0000 m31100| 2015-07-19T23:39:05.567+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 270ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:05.568+0000 m31100| 2015-07-19T23:39:05.567+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 270ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:05.604+0000 m31200| 2015-07-19T23:39:05.604+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 222ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:05.605+0000 m31200| 2015-07-19T23:39:05.604+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 222ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:05.736+0000 m31200| 2015-07-19T23:39:05.735+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39488 #33 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:05.758+0000 m31200| 2015-07-19T23:39:05.758+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39489 #34 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:05.799+0000 m31100| 2015-07-19T23:39:05.799+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 138ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:05.799+0000 m31100| 2015-07-19T23:39:05.799+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 138ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.022+0000 m31200| 2015-07-19T23:39:06.022+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 331ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.023+0000 m31200| 2015-07-19T23:39:06.022+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 330ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.062+0000 m31100| 2015-07-19T23:39:06.062+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 175ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.092+0000 m31100| 2015-07-19T23:39:06.062+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 178ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.311+0000 m31200| 2015-07-19T23:39:06.311+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 140ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.312+0000 m31200| 2015-07-19T23:39:06.311+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 140ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.331+0000 m31100| 2015-07-19T23:39:06.330+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 161ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.331+0000 m31100| 2015-07-19T23:39:06.330+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 161ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.604+0000 m31200| 2015-07-19T23:39:06.603+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 170ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.604+0000 m31200| 2015-07-19T23:39:06.603+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 170ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.712+0000 m31100| 2015-07-19T23:39:06.711+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.712+0000 m31100| 2015-07-19T23:39:06.711+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.816+0000 m31200| 2015-07-19T23:39:06.816+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 107ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.817+0000 m31200| 2015-07-19T23:39:06.816+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 107ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.965+0000 m31100| 2015-07-19T23:39:06.965+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 127ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.966+0000 m31100| 2015-07-19T23:39:06.965+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 127ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.969+0000 m31200| 2015-07-19T23:39:06.969+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 136ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:06.969+0000 m31200| 2015-07-19T23:39:06.969+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 136ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.162+0000 m31100| 2015-07-19T23:39:07.162+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 159ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.162+0000 m31100| 2015-07-19T23:39:07.162+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 159ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.203+0000 m31200| 2015-07-19T23:39:07.202+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 217ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.203+0000 m31200| 2015-07-19T23:39:07.202+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 217ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.355+0000 m31100| 2015-07-19T23:39:07.355+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 190ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.356+0000 m31100| 2015-07-19T23:39:07.355+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 188ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.458+0000 m31200| 2015-07-19T23:39:07.457+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 221ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.458+0000 m31200| 2015-07-19T23:39:07.457+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 221ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.461+0000 m31100| 2015-07-19T23:39:07.461+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.462+0000 m31100| 2015-07-19T23:39:07.461+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.647+0000 m31200| 2015-07-19T23:39:07.647+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 186ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.648+0000 m31200| 2015-07-19T23:39:07.647+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 186ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.648+0000 m31100| 2015-07-19T23:39:07.648+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 184ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.649+0000 m31100| 2015-07-19T23:39:07.648+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 184ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.992+0000 m31200| 2015-07-19T23:39:07.992+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 271ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.993+0000 m31200| 2015-07-19T23:39:07.992+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 271ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.996+0000 m31100| 2015-07-19T23:39:07.996+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 319ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:07.996+0000 m31100| 2015-07-19T23:39:07.996+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 319ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.191+0000 m31200| 2015-07-19T23:39:08.190+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 195ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.191+0000 m31200| 2015-07-19T23:39:08.190+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 193ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.307+0000 m31200| 2015-07-19T23:39:08.306+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 110ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.307+0000 m31200| 2015-07-19T23:39:08.306+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 110ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.311+0000 m31100| 2015-07-19T23:39:08.310+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 312ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.311+0000 m31100| 2015-07-19T23:39:08.310+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 309ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.414+0000 m30999| 2015-07-19T23:39:08.414+0000 I NETWORK [conn20] end connection 10.139.123.131:57073 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.486+0000 m31100| 2015-07-19T23:39:08.486+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 159ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.487+0000 m31100| 2015-07-19T23:39:08.486+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 157ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.577+0000 m31200| 2015-07-19T23:39:08.576+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 215ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.577+0000 m31200| 2015-07-19T23:39:08.576+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 215ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.644+0000 m30999| 2015-07-19T23:39:08.643+0000 I NETWORK [conn21] end connection 10.139.123.131:57076 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.698+0000 m31200| 2015-07-19T23:39:08.698+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 114ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.699+0000 m31200| 2015-07-19T23:39:08.698+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 114ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.702+0000 m31100| 2015-07-19T23:39:08.702+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.703+0000 m31100| 2015-07-19T23:39:08.702+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.958+0000 m31100| 2015-07-19T23:39:08.958+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 160ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.959+0000 m31100| 2015-07-19T23:39:08.958+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 160ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.965+0000 m31200| 2015-07-19T23:39:08.965+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 170ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:08.966+0000 m31200| 2015-07-19T23:39:08.965+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 167ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.086+0000 m31200| 2015-07-19T23:39:09.085+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 115ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.086+0000 m31200| 2015-07-19T23:39:09.085+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 115ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.132+0000 m31100| 2015-07-19T23:39:09.131+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 159ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.132+0000 m31100| 2015-07-19T23:39:09.131+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 159ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.169+0000 m30998| 2015-07-19T23:39:09.169+0000 I NETWORK [conn19] end connection 10.139.123.131:35713 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.238+0000 m31200| 2015-07-19T23:39:09.238+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 147ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.239+0000 m31200| 2015-07-19T23:39:09.238+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 147ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.241+0000 m31100| 2015-07-19T23:39:09.240+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.241+0000 m31100| 2015-07-19T23:39:09.240+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.370+0000 m31100| 2015-07-19T23:39:09.369+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 121ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.370+0000 m31100| 2015-07-19T23:39:09.369+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:104 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 121ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.495+0000 m31200| 2015-07-19T23:39:09.495+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 242ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.496+0000 m31200| 2015-07-19T23:39:09.495+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:135 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 242ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.530+0000 m30998| 2015-07-19T23:39:09.530+0000 I NETWORK [conn20] end connection 10.139.123.131:35715 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.673+0000 m30998| 2015-07-19T23:39:09.673+0000 I NETWORK [conn21] end connection 10.139.123.131:35716 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.677+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.677+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.677+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.677+0000 jstests/concurrency/fsm_workloads/yield.js: Workload completed in 5893 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.677+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.677+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.678+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.678+0000 m30999| 2015-07-19T23:39:09.677+0000 I COMMAND [conn1] DROP: db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.678+0000 m30999| 2015-07-19T23:39:09.677+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:09.677+0000-55ac351dd2c1f750d1548373", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349149677), what: "dropCollection.start", ns: "db3.coll3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.697+0000 m29000| 2015-07-19T23:39:09.696+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55389 #34 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.732+0000 m30999| 2015-07-19T23:39:09.732+0000 I SHARDING [conn1] distributed lock 'db3.coll3/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac351dd2c1f750d1548374 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.733+0000 m31100| 2015-07-19T23:39:09.732+0000 I COMMAND [conn12] CMD: drop db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.733+0000 m31100| 2015-07-19T23:39:09.733+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 236ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.733+0000 m31100| 2015-07-19T23:39:09.733+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 236ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.733+0000 m31200| 2015-07-19T23:39:09.733+0000 I COMMAND [conn14] CMD: drop db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.734+0000 m31200| 2015-07-19T23:39:09.733+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 236ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.734+0000 m31200| 2015-07-19T23:39:09.734+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 236ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.736+0000 m31102| 2015-07-19T23:39:09.735+0000 I COMMAND [repl writer worker 12] CMD: drop db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.736+0000 m31101| 2015-07-19T23:39:09.735+0000 I COMMAND [repl writer worker 14] CMD: drop db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.737+0000 m31202| 2015-07-19T23:39:09.736+0000 I COMMAND [repl writer worker 5] CMD: drop db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.739+0000 m31201| 2015-07-19T23:39:09.737+0000 I COMMAND [repl writer worker 14] CMD: drop db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.786+0000 m31100| 2015-07-19T23:39:09.786+0000 I SHARDING [conn12] remotely refreshing metadata for db3.coll3 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||55ac3517d2c1f750d1548371, current metadata version is 2|3||55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.786+0000 m31100| 2015-07-19T23:39:09.786+0000 W SHARDING [conn12] no chunks found when reloading db3.coll3, previous version was 0|0||55ac3517d2c1f750d1548371, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.787+0000 m31100| 2015-07-19T23:39:09.786+0000 I SHARDING [conn12] dropping metadata for db3.coll3 at shard version 2|3||55ac3517d2c1f750d1548371, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.787+0000 m31200| 2015-07-19T23:39:09.787+0000 I SHARDING [conn14] remotely refreshing metadata for db3.coll3 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||55ac3517d2c1f750d1548371, current metadata version is 2|5||55ac3517d2c1f750d1548371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.788+0000 m31200| 2015-07-19T23:39:09.787+0000 W SHARDING [conn14] no chunks found when reloading db3.coll3, previous version was 0|0||55ac3517d2c1f750d1548371, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.788+0000 m31200| 2015-07-19T23:39:09.787+0000 I SHARDING [conn14] dropping metadata for db3.coll3 at shard version 2|5||55ac3517d2c1f750d1548371, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.788+0000 m30999| 2015-07-19T23:39:09.788+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:09.788+0000-55ac351dd2c1f750d1548375", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349149788), what: "dropCollection", ns: "db3.coll3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.839+0000 m30999| 2015-07-19T23:39:09.839+0000 I SHARDING [conn1] distributed lock 'db3.coll3/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.891+0000 m30999| 2015-07-19T23:39:09.890+0000 I COMMAND [conn1] DROP DATABASE: db3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.891+0000 m30999| 2015-07-19T23:39:09.890+0000 I SHARDING [conn1] DBConfig::dropDatabase: db3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.891+0000 m30999| 2015-07-19T23:39:09.890+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:09.890+0000-55ac351dd2c1f750d1548376", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349149890), what: "dropDatabase.start", ns: "db3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.992+0000 m30999| 2015-07-19T23:39:09.992+0000 I SHARDING [conn1] DBConfig::dropDatabase: db3 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.993+0000 m31100| 2015-07-19T23:39:09.992+0000 I COMMAND [conn27] dropDatabase db3 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.993+0000 m31100| 2015-07-19T23:39:09.992+0000 I COMMAND [conn27] dropDatabase db3 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.994+0000 m31100| 2015-07-19T23:39:09.992+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 254ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.994+0000 m30999| 2015-07-19T23:39:09.992+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:09.992+0000-55ac351dd2c1f750d1548377", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349149992), what: "dropDatabase", ns: "db3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.994+0000 m31100| 2015-07-19T23:39:09.992+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 149 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 255ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.994+0000 m31101| 2015-07-19T23:39:09.993+0000 I COMMAND [repl writer worker 9] dropDatabase db3 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.994+0000 m31101| 2015-07-19T23:39:09.993+0000 I COMMAND [repl writer worker 9] dropDatabase db3 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.994+0000 m31102| 2015-07-19T23:39:09.993+0000 I COMMAND [repl writer worker 5] dropDatabase db3 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:09.994+0000 m31102| 2015-07-19T23:39:09.993+0000 I COMMAND [repl writer worker 5] dropDatabase db3 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.054+0000 m31100| 2015-07-19T23:39:10.053+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.056+0000 m31101| 2015-07-19T23:39:10.056+0000 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.057+0000 m31102| 2015-07-19T23:39:10.057+0000 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.058+0000 m31200| 2015-07-19T23:39:10.058+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 319ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.060+0000 m31200| 2015-07-19T23:39:10.058+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 319ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.066+0000 m31200| 2015-07-19T23:39:10.066+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.067+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.067+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.067+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.067+0000 jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.067+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.067+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.068+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.070+0000 m31201| 2015-07-19T23:39:10.069+0000 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.070+0000 m31202| 2015-07-19T23:39:10.069+0000 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.070+0000 m30999| 2015-07-19T23:39:10.070+0000 I SHARDING [conn1] distributed lock 'db4/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac351ed2c1f750d1548378 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.072+0000 m30999| 2015-07-19T23:39:10.072+0000 I SHARDING [conn1] Placing [db4] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.072+0000 m30999| 2015-07-19T23:39:10.072+0000 I SHARDING [conn1] Enabling sharding for database [db4] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.123+0000 m30999| 2015-07-19T23:39:10.123+0000 I SHARDING [conn1] distributed lock 'db4/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.129+0000 m31100| 2015-07-19T23:39:10.129+0000 I INDEX [conn16] build index on: db4.coll4 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.129+0000 m31100| 2015-07-19T23:39:10.129+0000 I INDEX [conn16] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.130+0000 m31100| 2015-07-19T23:39:10.130+0000 I INDEX [conn16] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.131+0000 m30999| 2015-07-19T23:39:10.131+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db4.coll4", key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.133+0000 m30999| 2015-07-19T23:39:10.132+0000 I SHARDING [conn1] distributed lock 'db4.coll4/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac351ed2c1f750d1548379 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.133+0000 m30999| 2015-07-19T23:39:10.133+0000 I SHARDING [conn1] enable sharding on: db4.coll4 with shard key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.134+0000 m30999| 2015-07-19T23:39:10.133+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:10.133+0000-55ac351ed2c1f750d154837a", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349150133), what: "shardCollection.start", ns: "db4.coll4", details: { shardKey: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, collection: "db4.coll4", primary: "test-rs0:test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.134+0000 m31102| 2015-07-19T23:39:10.133+0000 I INDEX [repl writer worker 2] build index on: db4.coll4 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.134+0000 m31102| 2015-07-19T23:39:10.133+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.135+0000 m31101| 2015-07-19T23:39:10.134+0000 I INDEX [repl writer worker 8] build index on: db4.coll4 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.135+0000 m31101| 2015-07-19T23:39:10.134+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.135+0000 m31102| 2015-07-19T23:39:10.135+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.135+0000 m31101| 2015-07-19T23:39:10.135+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.184+0000 m30999| 2015-07-19T23:39:10.184+0000 I SHARDING [conn1] going to create 1 chunk(s) for: db4.coll4 using new epoch 55ac351ed2c1f750d154837b [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.236+0000 m30999| 2015-07-19T23:39:10.236+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db4.coll4: 0ms sequenceNumber: 22 version: 1|0||55ac351ed2c1f750d154837b based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.288+0000 m30999| 2015-07-19T23:39:10.288+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db4.coll4: 0ms sequenceNumber: 23 version: 1|0||55ac351ed2c1f750d154837b based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.289+0000 m31100| 2015-07-19T23:39:10.289+0000 I SHARDING [conn23] remotely refreshing metadata for db4.coll4 with requested shard version 1|0||55ac351ed2c1f750d154837b, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.289+0000 m31100| 2015-07-19T23:39:10.289+0000 I SHARDING [conn23] collection db4.coll4 was previously unsharded, new metadata loaded with shard version 1|0||55ac351ed2c1f750d154837b [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.289+0000 m31100| 2015-07-19T23:39:10.289+0000 I SHARDING [conn23] collection version was loaded at version 1|0||55ac351ed2c1f750d154837b, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.290+0000 m30999| 2015-07-19T23:39:10.289+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:10.289+0000-55ac351ed2c1f750d154837c", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349150289), what: "shardCollection", ns: "db4.coll4", details: { version: "1|0||55ac351ed2c1f750d154837b" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.341+0000 m30999| 2015-07-19T23:39:10.341+0000 I SHARDING [conn1] distributed lock 'db4.coll4/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.345+0000 m31200| 2015-07-19T23:39:10.344+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 273ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.346+0000 m31200| 2015-07-19T23:39:10.344+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 273ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.346+0000 m31200| 2015-07-19T23:39:10.346+0000 I INDEX [conn24] build index on: db4.coll4 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.346+0000 m31200| 2015-07-19T23:39:10.346+0000 I INDEX [conn24] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.348+0000 m31200| 2015-07-19T23:39:10.347+0000 I INDEX [conn24] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.349+0000 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.353+0000 m31201| 2015-07-19T23:39:10.352+0000 I INDEX [repl writer worker 1] build index on: db4.coll4 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.353+0000 m31201| 2015-07-19T23:39:10.352+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.354+0000 m31202| 2015-07-19T23:39:10.352+0000 I INDEX [repl writer worker 12] build index on: db4.coll4 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.354+0000 m31201| 2015-07-19T23:39:10.353+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.355+0000 m31202| 2015-07-19T23:39:10.352+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.355+0000 m31202| 2015-07-19T23:39:10.354+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.585+0000 m30999| 2015-07-19T23:39:10.585+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57090 #22 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.616+0000 m30998| 2015-07-19T23:39:10.616+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35732 #22 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.655+0000 m30999| 2015-07-19T23:39:10.655+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57092 #23 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.670+0000 m30999| 2015-07-19T23:39:10.669+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57093 #24 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.679+0000 m30998| 2015-07-19T23:39:10.679+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35735 #23 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.685+0000 m30998| 2015-07-19T23:39:10.685+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35736 #24 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.687+0000 m30998| 2015-07-19T23:39:10.686+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35737 #25 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.689+0000 m30999| 2015-07-19T23:39:10.688+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57097 #25 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.692+0000 m30999| 2015-07-19T23:39:10.692+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57098 #26 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.698+0000 m30998| 2015-07-19T23:39:10.698+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35740 #26 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.700+0000 m30999| 2015-07-19T23:39:10.700+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57100 #27 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.703+0000 m30998| 2015-07-19T23:39:10.703+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35742 #27 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.726+0000 m30998| 2015-07-19T23:39:10.726+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35743 #28 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.732+0000 m30999| 2015-07-19T23:39:10.731+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57103 #28 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.733+0000 m30998| 2015-07-19T23:39:10.733+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35745 #29 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.745+0000 m30998| 2015-07-19T23:39:10.744+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35746 #30 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.746+0000 m30999| 2015-07-19T23:39:10.746+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57106 #29 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.747+0000 m30999| 2015-07-19T23:39:10.747+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57107 #30 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.750+0000 m30999| 2015-07-19T23:39:10.750+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57108 #31 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.761+0000 m30998| 2015-07-19T23:39:10.761+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35750 #31 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.767+0000 setting random seed: 9848132268525 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.767+0000 setting random seed: 7455322216264 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.768+0000 setting random seed: 6494513587094 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.768+0000 setting random seed: 5909576583653 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.770+0000 setting random seed: 1367877908051 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.770+0000 setting random seed: 3739349106326 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.770+0000 setting random seed: 6857767445035 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.770+0000 setting random seed: 5680101211182 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.774+0000 setting random seed: 9884349503554 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.774+0000 setting random seed: 7839960833080 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.774+0000 setting random seed: 7839075434021 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.790+0000 setting random seed: 132944779470 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.790+0000 setting random seed: 5078454581089 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.791+0000 setting random seed: 2444321177899 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.791+0000 setting random seed: 849750936031 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.791+0000 m31100| 2015-07-19T23:39:10.779+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:218 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 646ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.791+0000 m31100| 2015-07-19T23:39:10.779+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:218 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 646ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.791+0000 setting random seed: 2249396643601 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.792+0000 m30998| 2015-07-19T23:39:10.786+0000 I SHARDING [conn26] ChunkManager: time to load chunks for db4.coll4: 0ms sequenceNumber: 6 version: 1|0||55ac351ed2c1f750d154837b based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.800+0000 m31100| 2015-07-19T23:39:10.800+0000 I SHARDING [conn12] request split points lookup for chunk db4.coll4 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.801+0000 m31100| 2015-07-19T23:39:10.801+0000 I SHARDING [conn15] received splitChunk request: { splitChunk: "db4.coll4", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac351ed2c1f750d154837b') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.802+0000 m31100| 2015-07-19T23:39:10.802+0000 I SHARDING [conn15] distributed lock 'db4.coll4/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac351e68c42881b59cba2c [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.802+0000 m31100| 2015-07-19T23:39:10.802+0000 I SHARDING [conn15] remotely refreshing metadata for db4.coll4 based on current shard version 1|0||55ac351ed2c1f750d154837b, current metadata version is 1|0||55ac351ed2c1f750d154837b [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.803+0000 m31100| 2015-07-19T23:39:10.802+0000 I SHARDING [conn15] metadata of collection db4.coll4 already up to date (shard version : 1|0||55ac351ed2c1f750d154837b, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.803+0000 m31100| 2015-07-19T23:39:10.802+0000 I SHARDING [conn15] splitChunk accepted at version 1|0||55ac351ed2c1f750d154837b [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.804+0000 m31100| 2015-07-19T23:39:10.803+0000 I SHARDING [conn12] request split points lookup for chunk db4.coll4 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.804+0000 m31100| 2015-07-19T23:39:10.803+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:10.803+0000-55ac351e68c42881b59cba2d", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349150803), what: "multi-split", ns: "db4.coll4", details: { before: { min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey } }, number: 1, of: 3, chunk: { min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('55ac351ed2c1f750d154837b') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.805+0000 m31100| 2015-07-19T23:39:10.804+0000 I SHARDING [conn12] request split points lookup for chunk db4.coll4 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.808+0000 m31100| 2015-07-19T23:39:10.807+0000 I SHARDING [conn12] request split points lookup for chunk db4.coll4 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.808+0000 m31100| 2015-07-19T23:39:10.807+0000 I SHARDING [conn12] request split points lookup for chunk db4.coll4 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.810+0000 m31100| 2015-07-19T23:39:10.810+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47676 #39 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.811+0000 m31100| 2015-07-19T23:39:10.811+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47677 #40 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.811+0000 m31100| 2015-07-19T23:39:10.811+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47678 #41 (35 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.812+0000 m30998| 2015-07-19T23:39:10.811+0000 I SHARDING [conn25] ChunkManager: time to load chunks for db4.coll4: 0ms sequenceNumber: 7 version: 1|3||55ac351ed2c1f750d154837b based on: 1|0||55ac351ed2c1f750d154837b [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.815+0000 setting random seed: 4693906591273 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.815+0000 m30999| 2015-07-19T23:39:10.813+0000 I SHARDING [conn29] ChunkManager: time to load chunks for db4.coll4: 0ms sequenceNumber: 24 version: 1|3||55ac351ed2c1f750d154837b based on: 1|0||55ac351ed2c1f750d154837b [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.815+0000 setting random seed: 8815920571796 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.816+0000 setting random seed: 1847216873429 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.816+0000 setting random seed: 3418385894037 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.821+0000 m31100| 2015-07-19T23:39:10.821+0000 I SHARDING [conn41] received splitChunk request: { splitChunk: "db4.coll4", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac351ed2c1f750d154837b') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.822+0000 m31100| 2015-07-19T23:39:10.822+0000 W SHARDING [conn41] could not acquire collection lock for db4.coll4 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db4.coll4 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.827+0000 m30999| 2015-07-19T23:39:10.823+0000 W SHARDING [conn22] splitChunk failed - cmd: { splitChunk: "db4.coll4", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac351ed2c1f750d154837b') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db4.coll4 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.828+0000 m31100| 2015-07-19T23:39:10.823+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47679 #42 (36 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.828+0000 m31100| 2015-07-19T23:39:10.826+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47680 #43 (37 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.833+0000 m31100| 2015-07-19T23:39:10.833+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47681 #44 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.833+0000 m31100| 2015-07-19T23:39:10.833+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47682 #45 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.835+0000 m31100| 2015-07-19T23:39:10.834+0000 I SHARDING [conn40] received splitChunk request: { splitChunk: "db4.coll4", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac351ed2c1f750d154837b') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.840+0000 m31100| 2015-07-19T23:39:10.838+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47683 #46 (40 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.842+0000 m31100| 2015-07-19T23:39:10.839+0000 I SHARDING [conn42] received splitChunk request: { splitChunk: "db4.coll4", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac351ed2c1f750d154837b') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.847+0000 m31100| 2015-07-19T23:39:10.847+0000 W SHARDING [conn42] could not acquire collection lock for db4.coll4 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db4.coll4 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.854+0000 m30999| 2015-07-19T23:39:10.848+0000 W SHARDING [conn27] splitChunk failed - cmd: { splitChunk: "db4.coll4", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac351ed2c1f750d154837b') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db4.coll4 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.855+0000 m31100| 2015-07-19T23:39:10.854+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:10.854+0000-55ac351e68c42881b59cba2e", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349150854), what: "multi-split", ns: "db4.coll4", details: { before: { min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey } }, number: 2, of: 3, chunk: { min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('55ac351ed2c1f750d154837b') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.871+0000 m31100| 2015-07-19T23:39:10.840+0000 W SHARDING [conn40] could not acquire collection lock for db4.coll4 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db4.coll4 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.871+0000 m31100| 2015-07-19T23:39:10.840+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47684 #47 (41 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.875+0000 m31100| 2015-07-19T23:39:10.869+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47685 #48 (42 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.885+0000 m30999| 2015-07-19T23:39:10.869+0000 W SHARDING [conn26] splitChunk failed - cmd: { splitChunk: "db4.coll4", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac351ed2c1f750d154837b') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db4.coll4 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.892+0000 m31100| 2015-07-19T23:39:10.845+0000 I SHARDING [conn43] received splitChunk request: { splitChunk: "db4.coll4", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac351ed2c1f750d154837b') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.893+0000 m31100| 2015-07-19T23:39:10.871+0000 W SHARDING [conn43] could not acquire collection lock for db4.coll4 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db4.coll4 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.910+0000 m30999| 2015-07-19T23:39:10.871+0000 W SHARDING [conn23] splitChunk failed - cmd: { splitChunk: "db4.coll4", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac351ed2c1f750d154837b') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db4.coll4 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.910+0000 m31100| 2015-07-19T23:39:10.873+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47686 #49 (43 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.918+0000 m31100| 2015-07-19T23:39:10.905+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:10.905+0000-55ac351e68c42881b59cba2f", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349150905), what: "multi-split", ns: "db4.coll4", details: { before: { min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey } }, number: 3, of: 3, chunk: { min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('55ac351ed2c1f750d154837b') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.956+0000 m31100| 2015-07-19T23:39:10.956+0000 I SHARDING [conn15] distributed lock 'db4.coll4/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.957+0000 m31100| 2015-07-19T23:39:10.956+0000 I COMMAND [conn15] command db4.coll4 command: splitChunk { splitChunk: "db4.coll4", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac351ed2c1f750d154837b') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 } } } protocol:op_command 155ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:10.958+0000 m30999| 2015-07-19T23:39:10.957+0000 I SHARDING [conn24] autosplitted db4.coll4 shard: ns: db4.coll4, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.092+0000 m31100| 2015-07-19T23:39:11.091+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47687 #50 (44 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.101+0000 m31100| 2015-07-19T23:39:11.101+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47688 #51 (45 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.109+0000 m31100| 2015-07-19T23:39:11.109+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47689 #52 (46 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.160+0000 m31100| 2015-07-19T23:39:11.160+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47690 #53 (47 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.161+0000 m30999| 2015-07-19T23:39:11.161+0000 I NETWORK [conn27] end connection 10.139.123.131:57100 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.179+0000 m30999| 2015-07-19T23:39:11.179+0000 I NETWORK [conn25] end connection 10.139.123.131:57097 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.189+0000 m31100| 2015-07-19T23:39:11.188+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47691 #54 (48 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.190+0000 m30999| 2015-07-19T23:39:11.190+0000 I NETWORK [conn29] end connection 10.139.123.131:57106 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.196+0000 m31100| 2015-07-19T23:39:11.196+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47692 #55 (49 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.202+0000 m31100| 2015-07-19T23:39:11.202+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47693 #56 (50 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.208+0000 m30998| 2015-07-19T23:39:11.208+0000 I NETWORK [conn24] end connection 10.139.123.131:35736 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.211+0000 m31100| 2015-07-19T23:39:11.210+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47694 #57 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.214+0000 m30999| 2015-07-19T23:39:11.214+0000 I NETWORK [conn23] end connection 10.139.123.131:57092 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.217+0000 m30998| 2015-07-19T23:39:11.217+0000 I NETWORK [conn29] end connection 10.139.123.131:35745 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.233+0000 m30999| 2015-07-19T23:39:11.232+0000 I NETWORK [conn26] end connection 10.139.123.131:57098 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.246+0000 m31100| 2015-07-19T23:39:11.245+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47695 #58 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.250+0000 m30999| 2015-07-19T23:39:11.250+0000 I NETWORK [conn28] end connection 10.139.123.131:57103 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.258+0000 m30998| 2015-07-19T23:39:11.258+0000 I NETWORK [conn23] end connection 10.139.123.131:35735 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.264+0000 m30998| 2015-07-19T23:39:11.263+0000 I NETWORK [conn30] end connection 10.139.123.131:35746 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.270+0000 m30998| 2015-07-19T23:39:11.270+0000 I NETWORK [conn25] end connection 10.139.123.131:35737 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.276+0000 m30998| 2015-07-19T23:39:11.276+0000 I NETWORK [conn28] end connection 10.139.123.131:35743 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.295+0000 m30999| 2015-07-19T23:39:11.294+0000 I NETWORK [conn22] end connection 10.139.123.131:57090 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.303+0000 m30998| 2015-07-19T23:39:11.303+0000 I NETWORK [conn22] end connection 10.139.123.131:35732 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.319+0000 m30998| 2015-07-19T23:39:11.319+0000 I NETWORK [conn26] end connection 10.139.123.131:35740 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.321+0000 m30998| 2015-07-19T23:39:11.321+0000 I NETWORK [conn27] end connection 10.139.123.131:35742 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.340+0000 m30998| 2015-07-19T23:39:11.340+0000 I NETWORK [conn31] end connection 10.139.123.131:35750 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.345+0000 m30999| 2015-07-19T23:39:11.345+0000 I NETWORK [conn31] end connection 10.139.123.131:57108 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.352+0000 m31200| 2015-07-19T23:39:11.352+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.353+0000 m31200| 2015-07-19T23:39:11.353+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.385+0000 m30999| 2015-07-19T23:39:11.385+0000 I NETWORK [conn24] end connection 10.139.123.131:57093 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.415+0000 m30999| 2015-07-19T23:39:11.415+0000 I NETWORK [conn30] end connection 10.139.123.131:57107 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.497+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.497+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.497+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.497+0000 jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js: Workload completed in 1149 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.497+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.498+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.498+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.498+0000 m30999| 2015-07-19T23:39:11.497+0000 I COMMAND [conn1] DROP: db4.coll4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.498+0000 m30999| 2015-07-19T23:39:11.497+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:11.497+0000-55ac351fd2c1f750d154837d", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349151497), what: "dropCollection.start", ns: "db4.coll4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.550+0000 m30999| 2015-07-19T23:39:11.549+0000 I SHARDING [conn1] distributed lock 'db4.coll4/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac351fd2c1f750d154837e [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.550+0000 m31100| 2015-07-19T23:39:11.549+0000 I COMMAND [conn12] CMD: drop db4.coll4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.550+0000 m31100| 2015-07-19T23:39:11.550+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 136ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.550+0000 m31100| 2015-07-19T23:39:11.550+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 136ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.551+0000 m31200| 2015-07-19T23:39:11.551+0000 I COMMAND [conn14] CMD: drop db4.coll4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.551+0000 m31200| 2015-07-19T23:39:11.551+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 196ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.551+0000 m31200| 2015-07-19T23:39:11.551+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 196ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.553+0000 m31101| 2015-07-19T23:39:11.553+0000 I COMMAND [repl writer worker 13] CMD: drop db4.coll4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.553+0000 m31102| 2015-07-19T23:39:11.553+0000 I COMMAND [repl writer worker 5] CMD: drop db4.coll4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.554+0000 m31201| 2015-07-19T23:39:11.554+0000 I COMMAND [repl writer worker 8] CMD: drop db4.coll4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.554+0000 m31202| 2015-07-19T23:39:11.554+0000 I COMMAND [repl writer worker 6] CMD: drop db4.coll4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.603+0000 m31100| 2015-07-19T23:39:11.603+0000 I SHARDING [conn12] remotely refreshing metadata for db4.coll4 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||55ac351ed2c1f750d154837b, current metadata version is 1|3||55ac351ed2c1f750d154837b [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.603+0000 m31100| 2015-07-19T23:39:11.603+0000 W SHARDING [conn12] no chunks found when reloading db4.coll4, previous version was 0|0||55ac351ed2c1f750d154837b, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.604+0000 m31100| 2015-07-19T23:39:11.603+0000 I SHARDING [conn12] dropping metadata for db4.coll4 at shard version 1|3||55ac351ed2c1f750d154837b, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.604+0000 m30999| 2015-07-19T23:39:11.604+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:11.604+0000-55ac351fd2c1f750d154837f", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349151604), what: "dropCollection", ns: "db4.coll4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.656+0000 m30999| 2015-07-19T23:39:11.655+0000 I SHARDING [conn1] distributed lock 'db4.coll4/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.707+0000 m30999| 2015-07-19T23:39:11.707+0000 I COMMAND [conn1] DROP DATABASE: db4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.707+0000 m30999| 2015-07-19T23:39:11.707+0000 I SHARDING [conn1] DBConfig::dropDatabase: db4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.708+0000 m30999| 2015-07-19T23:39:11.707+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:11.707+0000-55ac351fd2c1f750d1548380", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349151707), what: "dropDatabase.start", ns: "db4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.809+0000 m30999| 2015-07-19T23:39:11.809+0000 I SHARDING [conn1] DBConfig::dropDatabase: db4 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.809+0000 m31100| 2015-07-19T23:39:11.809+0000 I COMMAND [conn27] dropDatabase db4 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.809+0000 m31100| 2015-07-19T23:39:11.809+0000 I COMMAND [conn27] dropDatabase db4 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.809+0000 m31100| 2015-07-19T23:39:11.809+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 254ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.810+0000 m30999| 2015-07-19T23:39:11.809+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:11.809+0000-55ac351fd2c1f750d1548381", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349151809), what: "dropDatabase", ns: "db4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.810+0000 m31100| 2015-07-19T23:39:11.809+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 254ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.811+0000 m31101| 2015-07-19T23:39:11.810+0000 I COMMAND [repl writer worker 4] dropDatabase db4 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.811+0000 m31101| 2015-07-19T23:39:11.810+0000 I COMMAND [repl writer worker 4] dropDatabase db4 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.811+0000 m31102| 2015-07-19T23:39:11.810+0000 I COMMAND [repl writer worker 4] dropDatabase db4 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.811+0000 m31102| 2015-07-19T23:39:11.810+0000 I COMMAND [repl writer worker 4] dropDatabase db4 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.870+0000 m31100| 2015-07-19T23:39:11.870+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.873+0000 m31102| 2015-07-19T23:39:11.872+0000 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.873+0000 m31101| 2015-07-19T23:39:11.872+0000 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.874+0000 m31200| 2015-07-19T23:39:11.874+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 318ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.875+0000 m31200| 2015-07-19T23:39:11.874+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:2 reslen:223 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 317ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.880+0000 m31200| 2015-07-19T23:39:11.880+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.881+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.881+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.881+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.881+0000 jstests/concurrency/fsm_workloads/agg_sort.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.881+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.881+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.881+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.883+0000 m31202| 2015-07-19T23:39:11.882+0000 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.883+0000 m31201| 2015-07-19T23:39:11.882+0000 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.884+0000 m30999| 2015-07-19T23:39:11.884+0000 I SHARDING [conn1] distributed lock 'db5/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac351fd2c1f750d1548382 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.885+0000 m30999| 2015-07-19T23:39:11.885+0000 I SHARDING [conn1] Placing [db5] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.885+0000 m30999| 2015-07-19T23:39:11.885+0000 I SHARDING [conn1] Enabling sharding for database [db5] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.937+0000 m30999| 2015-07-19T23:39:11.936+0000 I SHARDING [conn1] distributed lock 'db5/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.943+0000 m31100| 2015-07-19T23:39:11.942+0000 I INDEX [conn16] build index on: db5.coll5 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.943+0000 m31100| 2015-07-19T23:39:11.942+0000 I INDEX [conn16] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.947+0000 m31100| 2015-07-19T23:39:11.944+0000 I INDEX [conn16] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.948+0000 m30999| 2015-07-19T23:39:11.945+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db5.coll5", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.948+0000 m30999| 2015-07-19T23:39:11.946+0000 I SHARDING [conn1] distributed lock 'db5.coll5/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac351fd2c1f750d1548383 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.948+0000 m31101| 2015-07-19T23:39:11.947+0000 I INDEX [repl writer worker 15] build index on: db5.coll5 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.948+0000 m31101| 2015-07-19T23:39:11.947+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.948+0000 m30999| 2015-07-19T23:39:11.947+0000 I SHARDING [conn1] enable sharding on: db5.coll5 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.949+0000 m30999| 2015-07-19T23:39:11.947+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:11.947+0000-55ac351fd2c1f750d1548384", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349151947), what: "shardCollection.start", ns: "db5.coll5", details: { shardKey: { _id: "hashed" }, collection: "db5.coll5", primary: "test-rs0:test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.949+0000 m31101| 2015-07-19T23:39:11.948+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.949+0000 m31102| 2015-07-19T23:39:11.948+0000 I INDEX [repl writer worker 10] build index on: db5.coll5 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.949+0000 m31102| 2015-07-19T23:39:11.948+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.949+0000 m31102| 2015-07-19T23:39:11.949+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:11.998+0000 m30999| 2015-07-19T23:39:11.998+0000 I SHARDING [conn1] going to create 2 chunk(s) for: db5.coll5 using new epoch 55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.100+0000 m30999| 2015-07-19T23:39:12.100+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db5.coll5: 0ms sequenceNumber: 25 version: 1|1||55ac351fd2c1f750d1548385 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.152+0000 m30999| 2015-07-19T23:39:12.152+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db5.coll5: 0ms sequenceNumber: 26 version: 1|1||55ac351fd2c1f750d1548385 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.152+0000 m31100| 2015-07-19T23:39:12.152+0000 I SHARDING [conn49] remotely refreshing metadata for db5.coll5 with requested shard version 1|1||55ac351fd2c1f750d1548385, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.153+0000 m31100| 2015-07-19T23:39:12.153+0000 I SHARDING [conn49] collection db5.coll5 was previously unsharded, new metadata loaded with shard version 1|1||55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.153+0000 m31100| 2015-07-19T23:39:12.153+0000 I SHARDING [conn49] collection version was loaded at version 1|1||55ac351fd2c1f750d1548385, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.153+0000 m30999| 2015-07-19T23:39:12.153+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:12.153+0000-55ac3520d2c1f750d1548386", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349152153), what: "shardCollection", ns: "db5.coll5", details: { version: "1|1||55ac351fd2c1f750d1548385" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.204+0000 m30999| 2015-07-19T23:39:12.204+0000 I SHARDING [conn1] distributed lock 'db5.coll5/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.205+0000 m30999| 2015-07-19T23:39:12.204+0000 I SHARDING [conn1] moving chunk ns: db5.coll5 moving ( ns: db5.coll5, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.205+0000 m31100| 2015-07-19T23:39:12.204+0000 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.205+0000 m31100| 2015-07-19T23:39:12.205+0000 I SHARDING [conn15] received moveChunk request: { moveChunk: "db5.coll5", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", to: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac351fd2c1f750d1548385') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.206+0000 m31100| 2015-07-19T23:39:12.206+0000 I SHARDING [conn15] distributed lock 'db5.coll5/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac352068c42881b59cba31 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.206+0000 m31100| 2015-07-19T23:39:12.206+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:12.206+0000-55ac352068c42881b59cba32", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349152206), what: "moveChunk.start", ns: "db5.coll5", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.257+0000 m31100| 2015-07-19T23:39:12.257+0000 I SHARDING [conn15] remotely refreshing metadata for db5.coll5 based on current shard version 1|1||55ac351fd2c1f750d1548385, current metadata version is 1|1||55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.257+0000 m31100| 2015-07-19T23:39:12.257+0000 I SHARDING [conn15] metadata of collection db5.coll5 already up to date (shard version : 1|1||55ac351fd2c1f750d1548385, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.258+0000 m31100| 2015-07-19T23:39:12.257+0000 I SHARDING [conn15] moveChunk request accepted at version 1|1||55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.258+0000 m31100| 2015-07-19T23:39:12.258+0000 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.258+0000 m31200| 2015-07-19T23:39:12.258+0000 I SHARDING [conn16] remotely refreshing metadata for db5.coll5, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.259+0000 m31200| 2015-07-19T23:39:12.258+0000 I SHARDING [conn16] collection db5.coll5 was previously unsharded, new metadata loaded with shard version 0|0||55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.259+0000 m31200| 2015-07-19T23:39:12.258+0000 I SHARDING [conn16] collection version was loaded at version 1|1||55ac351fd2c1f750d1548385, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.259+0000 m31200| 2015-07-19T23:39:12.259+0000 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db5.coll5 from test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102 at epoch 55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.260+0000 m31100| 2015-07-19T23:39:12.260+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db5.coll5", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.261+0000 m31200| 2015-07-19T23:39:12.260+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 375ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.261+0000 m31200| 2015-07-19T23:39:12.261+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 375ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.264+0000 m31100| 2015-07-19T23:39:12.263+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db5.coll5", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.264+0000 m31200| 2015-07-19T23:39:12.264+0000 I INDEX [migrateThread] build index on: db5.coll5 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.266+0000 m31200| 2015-07-19T23:39:12.264+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.267+0000 m31100| 2015-07-19T23:39:12.267+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db5.coll5", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.267+0000 m31200| 2015-07-19T23:39:12.267+0000 I INDEX [migrateThread] build index on: db5.coll5 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.267+0000 m31200| 2015-07-19T23:39:12.267+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.269+0000 m31200| 2015-07-19T23:39:12.269+0000 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.269+0000 m31200| 2015-07-19T23:39:12.269+0000 I SHARDING [migrateThread] Deleter starting delete for: db5.coll5 from { _id: 0 } -> { _id: MaxKey }, with opId: 23392 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.270+0000 m31200| 2015-07-19T23:39:12.269+0000 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db5.coll5 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.273+0000 m31202| 2015-07-19T23:39:12.273+0000 I INDEX [repl writer worker 2] build index on: db5.coll5 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.274+0000 m31202| 2015-07-19T23:39:12.273+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.275+0000 m31201| 2015-07-19T23:39:12.274+0000 I INDEX [repl writer worker 6] build index on: db5.coll5 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.276+0000 m31201| 2015-07-19T23:39:12.274+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.276+0000 m31100| 2015-07-19T23:39:12.275+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db5.coll5", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.276+0000 m31201| 2015-07-19T23:39:12.276+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.276+0000 m31202| 2015-07-19T23:39:12.276+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.277+0000 m31200| 2015-07-19T23:39:12.276+0000 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.277+0000 m31200| 2015-07-19T23:39:12.277+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db5.coll5' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.292+0000 m31100| 2015-07-19T23:39:12.291+0000 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db5.coll5", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.292+0000 m31100| 2015-07-19T23:39:12.291+0000 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.292+0000 m31100| 2015-07-19T23:39:12.292+0000 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.292+0000 m31100| 2015-07-19T23:39:12.292+0000 I SHARDING [conn15] moveChunk setting version to: 2|0||55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.297+0000 m31200| 2015-07-19T23:39:12.297+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db5.coll5' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.298+0000 m31200| 2015-07-19T23:39:12.297+0000 I SHARDING [migrateThread] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:12.297+0000-55ac3520d9a63f6196b17259", server: "ip-10-139-123-131", clientAddr: "", time: new Date(1437349152297), what: "moveChunk.to", ns: "db5.coll5", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 10, step 2 of 5: 7, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 20, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.349+0000 m31100| 2015-07-19T23:39:12.348+0000 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db5.coll5", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.349+0000 m31100| 2015-07-19T23:39:12.348+0000 I SHARDING [conn15] moveChunk updating self version to: 2|1||55ac351fd2c1f750d1548385 through { _id: MinKey } -> { _id: 0 } for collection 'db5.coll5' [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.349+0000 m31100| 2015-07-19T23:39:12.349+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:12.349+0000-55ac352068c42881b59cba33", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349152349), what: "moveChunk.commit", ns: "db5.coll5", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.400+0000 m31100| 2015-07-19T23:39:12.400+0000 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.400+0000 m31100| 2015-07-19T23:39:12.400+0000 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.401+0000 m31100| 2015-07-19T23:39:12.400+0000 I SHARDING [conn15] Deleter starting delete for: db5.coll5 from { _id: 0 } -> { _id: MaxKey }, with opId: 22581 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.401+0000 m31100| 2015-07-19T23:39:12.400+0000 I SHARDING [conn15] rangeDeleter deleted 0 documents for db5.coll5 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.401+0000 m31100| 2015-07-19T23:39:12.400+0000 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.401+0000 m31100| 2015-07-19T23:39:12.400+0000 I SHARDING [conn15] distributed lock 'db5.coll5/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.401+0000 m31100| 2015-07-19T23:39:12.400+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:12.400+0000-55ac352068c42881b59cba34", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349152400), what: "moveChunk.from", ns: "db5.coll5", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 52, step 3 of 6: 1, step 4 of 6: 32, step 5 of 6: 108, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.452+0000 m31100| 2015-07-19T23:39:12.451+0000 I COMMAND [conn15] command db5.coll5 command: moveChunk { moveChunk: "db5.coll5", from: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", to: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac351fd2c1f750d1548385') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 246ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.452+0000 m30999| 2015-07-19T23:39:12.452+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db5.coll5: 0ms sequenceNumber: 27 version: 2|1||55ac351fd2c1f750d1548385 based on: 1|1||55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.453+0000 m31100| 2015-07-19T23:39:12.452+0000 I SHARDING [conn15] received splitChunk request: { splitChunk: "db5.coll5", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac351fd2c1f750d1548385') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.454+0000 m31100| 2015-07-19T23:39:12.453+0000 I SHARDING [conn15] distributed lock 'db5.coll5/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac352068c42881b59cba35 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.454+0000 m31100| 2015-07-19T23:39:12.453+0000 I SHARDING [conn15] remotely refreshing metadata for db5.coll5 based on current shard version 2|0||55ac351fd2c1f750d1548385, current metadata version is 2|0||55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.454+0000 m31100| 2015-07-19T23:39:12.454+0000 I SHARDING [conn15] updating metadata for db5.coll5 from shard version 2|0||55ac351fd2c1f750d1548385 to shard version 2|1||55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.454+0000 m31100| 2015-07-19T23:39:12.454+0000 I SHARDING [conn15] collection version was loaded at version 2|1||55ac351fd2c1f750d1548385, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.455+0000 m31100| 2015-07-19T23:39:12.454+0000 I SHARDING [conn15] splitChunk accepted at version 2|1||55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.455+0000 m31100| 2015-07-19T23:39:12.455+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:12.455+0000-55ac352068c42881b59cba36", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349152455), what: "split", ns: "db5.coll5", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('55ac351fd2c1f750d1548385') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('55ac351fd2c1f750d1548385') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.506+0000 m31100| 2015-07-19T23:39:12.506+0000 I SHARDING [conn15] distributed lock 'db5.coll5/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.507+0000 m30999| 2015-07-19T23:39:12.506+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db5.coll5: 0ms sequenceNumber: 28 version: 2|3||55ac351fd2c1f750d1548385 based on: 2|1||55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.507+0000 m31200| 2015-07-19T23:39:12.507+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db5.coll5", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac351fd2c1f750d1548385') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.508+0000 m31200| 2015-07-19T23:39:12.508+0000 I SHARDING [conn18] distributed lock 'db5.coll5/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3520d9a63f6196b1725a [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.508+0000 m31200| 2015-07-19T23:39:12.508+0000 I SHARDING [conn18] remotely refreshing metadata for db5.coll5 based on current shard version 0|0||55ac351fd2c1f750d1548385, current metadata version is 1|1||55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.508+0000 m31200| 2015-07-19T23:39:12.508+0000 I SHARDING [conn18] updating metadata for db5.coll5 from shard version 0|0||55ac351fd2c1f750d1548385 to shard version 2|0||55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.509+0000 m31200| 2015-07-19T23:39:12.508+0000 I SHARDING [conn18] collection version was loaded at version 2|3||55ac351fd2c1f750d1548385, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.509+0000 m31200| 2015-07-19T23:39:12.508+0000 I SHARDING [conn18] splitChunk accepted at version 2|0||55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.509+0000 m31200| 2015-07-19T23:39:12.509+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:12.509+0000-55ac3520d9a63f6196b1725b", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349152509), what: "split", ns: "db5.coll5", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('55ac351fd2c1f750d1548385') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('55ac351fd2c1f750d1548385') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.560+0000 m31200| 2015-07-19T23:39:12.560+0000 I SHARDING [conn18] distributed lock 'db5.coll5/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.561+0000 m30999| 2015-07-19T23:39:12.561+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db5.coll5: 0ms sequenceNumber: 29 version: 2|5||55ac351fd2c1f750d1548385 based on: 2|3||55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.871+0000 m31100| 2015-07-19T23:39:12.870+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 923ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.871+0000 m31100| 2015-07-19T23:39:12.870+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 923ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.885+0000 m31200| 2015-07-19T23:39:12.885+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 611ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.886+0000 m31200| 2015-07-19T23:39:12.885+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 611ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:12.992+0000 m31200| 2015-07-19T23:39:12.991+0000 I COMMAND [conn28] command db5.$cmd command: insert { insert: "coll5", documents: 482, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('55ac351fd2c1f750d1548385') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 488, w: 488 } }, Database: { acquireCount: { w: 488 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 482 } }, oplog: { acquireCount: { w: 482 } } } protocol:op_command 109ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.049+0000 m31100| 2015-07-19T23:39:13.049+0000 I COMMAND [conn16] command db5.$cmd command: insert { insert: "coll5", documents: 518, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('55ac351fd2c1f750d1548385') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 527, w: 527 } }, Database: { acquireCount: { w: 527 } }, Collection: { acquireCount: { w: 9 } }, Metadata: { acquireCount: { w: 518 } }, oplog: { acquireCount: { w: 518 } } } protocol:op_command 180ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.129+0000 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.191+0000 m30998| 2015-07-19T23:39:13.191+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35771 #32 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.198+0000 m30998| 2015-07-19T23:39:13.197+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35772 #33 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.201+0000 m30999| 2015-07-19T23:39:13.201+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57132 #32 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.204+0000 m30999| 2015-07-19T23:39:13.204+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57133 #33 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.211+0000 m30999| 2015-07-19T23:39:13.211+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57134 #34 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.215+0000 setting random seed: 9136593649163 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.216+0000 setting random seed: 302934967912 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.216+0000 setting random seed: 9725018055178 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.217+0000 setting random seed: 9508422380313 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.218+0000 setting random seed: 5447630477137 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.218+0000 m30998| 2015-07-19T23:39:13.218+0000 I SHARDING [conn32] ChunkManager: time to load chunks for db5.coll5: 0ms sequenceNumber: 8 version: 2|5||55ac351fd2c1f750d1548385 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.221+0000 m31100| 2015-07-19T23:39:13.221+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:123 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 173ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.222+0000 m31100| 2015-07-19T23:39:13.221+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:123 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 172ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.222+0000 m31100| 2015-07-19T23:39:13.222+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47701 #59 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.224+0000 m31200| 2015-07-19T23:39:13.223+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39538 #35 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.230+0000 m31100| 2015-07-19T23:39:13.230+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47703 #60 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.231+0000 m31200| 2015-07-19T23:39:13.231+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39540 #36 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.234+0000 m31100| 2015-07-19T23:39:13.234+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47705 #61 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.237+0000 m31100| 2015-07-19T23:39:13.237+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47706 #62 (56 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.243+0000 m31200| 2015-07-19T23:39:13.242+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39543 #37 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.254+0000 m31200| 2015-07-19T23:39:13.254+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39544 #38 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.256+0000 m31100| 2015-07-19T23:39:13.255+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47709 #63 (57 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.266+0000 m31200| 2015-07-19T23:39:13.266+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39546 #39 (35 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.554+0000 m31100| 2015-07-19T23:39:13.554+0000 I WRITE [conn49] insert db5.tmp.agg_out.1 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 511, w: 503 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 1 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 274ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.567+0000 m31100| 2015-07-19T23:39:13.566+0000 I WRITE [conn52] insert db5.tmp.agg_out.3 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 511, w: 503 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 3744, W: 397 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 233ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.580+0000 m31100| 2015-07-19T23:39:13.580+0000 I WRITE [conn53] insert db5.tmp.agg_out.2 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 511, w: 503 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 236 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 295ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.581+0000 m31100| 2015-07-19T23:39:13.580+0000 I WRITE [conn58] insert db5.tmp.agg_out.5 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 511, w: 503 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 1 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 8987, W: 1150 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 240ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.590+0000 m31100| 2015-07-19T23:39:13.589+0000 I WRITE [conn57] insert db5.tmp.agg_out.4 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 511, w: 503 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 1 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 8942, W: 1218 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 268ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.671+0000 m31100| 2015-07-19T23:39:13.667+0000 I COMMAND [conn49] command db5.tmp.agg_out.1 command: renameCollection { renameCollection: "db5.tmp.agg_out.1", to: "db5.coll5_out_agg_sort_0", dropTarget: true } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:97 locks:{ Global: { acquireCount: { r: 513, w: 504, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 109080 } }, Database: { acquireCount: { r: 3, w: 503, R: 1, W: 1 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 502 } }, oplog: { acquireCount: { w: 502 } } } protocol:op_query 112ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.671+0000 m31100| 2015-07-19T23:39:13.668+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.672+0000 m31100| 2015-07-19T23:39:13.668+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.672+0000 m31100| 2015-07-19T23:39:13.668+0000 I COMMAND [conn49] command db5.coll5_out_agg_sort_0 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73637833372 }, { host: "ip-10-139-123-131:31200", id: 73030560150 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 515, w: 504, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 109080 } }, Database: { acquireCount: { r: 4, w: 503, R: 1, W: 1 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 502 } }, oplog: { acquireCount: { w: 502 } } } protocol:op_command 450ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.672+0000 m31100| 2015-07-19T23:39:13.668+0000 I NETWORK [conn59] end connection 10.139.123.131:47701 (56 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.672+0000 m31200| 2015-07-19T23:39:13.669+0000 I NETWORK [conn36] end connection 10.139.123.131:39540 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.673+0000 m31200| 2015-07-19T23:39:13.671+0000 I NETWORK [conn35] end connection 10.139.123.131:39538 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.673+0000 m31100| 2015-07-19T23:39:13.668+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.673+0000 m31100| 2015-07-19T23:39:13.668+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.673+0000 m31100| 2015-07-19T23:39:13.668+0000 I COMMAND [conn53] command db5.coll5_out_agg_sort_3 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74606936004 }, { host: "ip-10-139-123-131:31200", id: 74580038288 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 515, w: 504, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 7284, W: 76416 } }, Database: { acquireCount: { r: 4, w: 503, R: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 236 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 502 } }, oplog: { acquireCount: { w: 502 } } } protocol:op_command 446ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.674+0000 m31100| 2015-07-19T23:39:13.669+0000 I NETWORK [conn60] end connection 10.139.123.131:47703 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.674+0000 m31100| 2015-07-19T23:39:13.672+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.675+0000 m31100| 2015-07-19T23:39:13.672+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.676+0000 m31100| 2015-07-19T23:39:13.672+0000 I COMMAND [conn52] command db5.coll5_out_agg_sort_4 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74950894210 }, { host: "ip-10-139-123-131:31200", id: 73237264885 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 515, w: 504, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 7933, W: 93914 } }, Database: { acquireCount: { r: 4, w: 503, R: 1, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 3744, W: 397 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 502 } }, oplog: { acquireCount: { w: 502 } } } protocol:op_command 448ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.676+0000 m31100| 2015-07-19T23:39:13.672+0000 I NETWORK [conn61] end connection 10.139.123.131:47705 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.676+0000 m31200| 2015-07-19T23:39:13.672+0000 I NETWORK [conn38] end connection 10.139.123.131:39544 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.676+0000 m31200| 2015-07-19T23:39:13.673+0000 I NETWORK [conn39] end connection 10.139.123.131:39546 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.677+0000 m31200| 2015-07-19T23:39:13.674+0000 I NETWORK [conn37] end connection 10.139.123.131:39543 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.684+0000 m31100| 2015-07-19T23:39:13.673+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.684+0000 m31100| 2015-07-19T23:39:13.673+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.684+0000 m31100| 2015-07-19T23:39:13.673+0000 I COMMAND [conn58] command db5.coll5_out_agg_sort_1 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 75002259060 }, { host: "ip-10-139-123-131:31200", id: 74456043277 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 515, w: 504, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 17273, W: 8175 } }, Database: { acquireCount: { r: 4, w: 503, R: 1, W: 1 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 8987, W: 1150 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 502 } }, oplog: { acquireCount: { w: 502 } } } protocol:op_command 445ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.685+0000 m31100| 2015-07-19T23:39:13.673+0000 I NETWORK [conn63] end connection 10.139.123.131:47709 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.685+0000 m31100| 2015-07-19T23:39:13.674+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.685+0000 m31100| 2015-07-19T23:39:13.674+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.685+0000 m31100| 2015-07-19T23:39:13.674+0000 I NETWORK [conn62] end connection 10.139.123.131:47706 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.686+0000 m31100| 2015-07-19T23:39:13.674+0000 I COMMAND [conn57] command db5.coll5_out_agg_sort_2 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73717484127 }, { host: "ip-10-139-123-131:31200", id: 73663955739 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 515, w: 504, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 16850, W: 66019 } }, Database: { acquireCount: { r: 4, w: 503, R: 1, W: 1 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 8942, W: 1218 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 502 } }, oplog: { acquireCount: { w: 502 } } } protocol:op_command 452ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.686+0000 m31100| 2015-07-19T23:39:13.674+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:347 reslen:4195597 locks:{ Global: { acquireCount: { r: 8 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 112311 } }, Database: { acquireCount: { r: 4 } }, oplog: { acquireCount: { r: 4 } } } 134ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.784+0000 m31100| 2015-07-19T23:39:13.784+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47711 #64 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.792+0000 m31200| 2015-07-19T23:39:13.792+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39548 #40 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.824+0000 m31100| 2015-07-19T23:39:13.823+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47713 #65 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.843+0000 m31200| 2015-07-19T23:39:13.843+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39550 #41 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.988+0000 m31100| 2015-07-19T23:39:13.988+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.989+0000 m31100| 2015-07-19T23:39:13.989+0000 I NETWORK [conn64] end connection 10.139.123.131:47711 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.989+0000 m31100| 2015-07-19T23:39:13.989+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.990+0000 m31200| 2015-07-19T23:39:13.989+0000 I NETWORK [conn40] end connection 10.139.123.131:39548 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.990+0000 m31100| 2015-07-19T23:39:13.990+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.991+0000 m31100| 2015-07-19T23:39:13.990+0000 I NETWORK [conn65] end connection 10.139.123.131:47713 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.991+0000 m31100| 2015-07-19T23:39:13.991+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47715 #66 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.993+0000 m31200| 2015-07-19T23:39:13.992+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1001ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:13.993+0000 m31200| 2015-07-19T23:39:13.992+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1001ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.019+0000 m31200| 2015-07-19T23:39:14.018+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39552 #42 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.042+0000 m31100| 2015-07-19T23:39:14.041+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.042+0000 m31200| 2015-07-19T23:39:14.042+0000 I NETWORK [conn41] end connection 10.139.123.131:39550 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.043+0000 m31100| 2015-07-19T23:39:14.043+0000 I COMMAND [conn53] command db5.coll5_out_agg_sort_1 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74613540824 }, { host: "ip-10-139-123-131:31200", id: 74216106076 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 5705, W: 735 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 18751, w: 529 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 228ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.049+0000 m31100| 2015-07-19T23:39:14.049+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47717 #67 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.053+0000 m31100| 2015-07-19T23:39:14.053+0000 I COMMAND [conn49] command db5.coll5_out_agg_sort_4 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73474207511 }, { host: "ip-10-139-123-131:31200", id: 74043669912 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 34056 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 18092 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 277ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.080+0000 m31200| 2015-07-19T23:39:14.080+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39554 #43 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.104+0000 m31100| 2015-07-19T23:39:14.104+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47719 #68 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.116+0000 m31200| 2015-07-19T23:39:14.116+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39556 #44 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.174+0000 m31101| 2015-07-19T23:39:14.174+0000 I COMMAND [repl writer worker 1] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.191+0000 m31101| 2015-07-19T23:39:14.191+0000 I COMMAND [repl writer worker 12] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.197+0000 m31102| 2015-07-19T23:39:14.196+0000 I COMMAND [repl writer worker 10] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.215+0000 m31100| 2015-07-19T23:39:14.215+0000 I WRITE [conn57] insert db5.tmp.agg_out.8 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 33230 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 76503, W: 15723 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 124ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.217+0000 m31102| 2015-07-19T23:39:14.216+0000 I COMMAND [repl writer worker 8] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.357+0000 m31100| 2015-07-19T23:39:14.356+0000 I COMMAND [conn58] command db5.tmp.agg_out.9 command: renameCollection { renameCollection: "db5.tmp.agg_out.9", to: "db5.coll5_out_agg_sort_3", dropTarget: true } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:97 locks:{ Global: { acquireCount: { r: 515, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 33047, W: 106728 } }, Database: { acquireCount: { r: 3, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 3830, R: 55734, W: 11342 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_query 108ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.358+0000 m31100| 2015-07-19T23:39:14.358+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.358+0000 m31100| 2015-07-19T23:39:14.358+0000 I NETWORK [conn68] end connection 10.139.123.131:47719 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.360+0000 m31200| 2015-07-19T23:39:14.359+0000 I NETWORK [conn42] end connection 10.139.123.131:39552 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.360+0000 m31100| 2015-07-19T23:39:14.358+0000 I COMMAND [conn57] command db5.tmp.agg_out.8 command: renameCollection { renameCollection: "db5.tmp.agg_out.8", to: "db5.coll5_out_agg_sort_2", dropTarget: true } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:97 locks:{ Global: { acquireCount: { r: 515, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 33230, W: 141033 } }, Database: { acquireCount: { r: 3, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 76503, W: 15723 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_query 142ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.360+0000 m31100| 2015-07-19T23:39:14.359+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.361+0000 m31100| 2015-07-19T23:39:14.359+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.361+0000 m31100| 2015-07-19T23:39:14.359+0000 I COMMAND [conn57] command db5.coll5_out_agg_sort_2 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74331127771 }, { host: "ip-10-139-123-131:31200", id: 73664464875 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 33230, W: 141033 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 76503, W: 15723 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 499ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.361+0000 m31100| 2015-07-19T23:39:14.359+0000 I NETWORK [conn66] end connection 10.139.123.131:47715 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.362+0000 m31100| 2015-07-19T23:39:14.362+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.362+0000 m31200| 2015-07-19T23:39:14.362+0000 I NETWORK [conn44] end connection 10.139.123.131:39556 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.363+0000 m31100| 2015-07-19T23:39:14.362+0000 I COMMAND [conn52] command db5.coll5_out_agg_sort_0 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74715094023 }, { host: "ip-10-139-123-131:31200", id: 73904842439 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 3569, w: 32912, W: 6759 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, w: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 8417, w: 75892, R: 49533, W: 1644 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 475ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.364+0000 m31100| 2015-07-19T23:39:14.362+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.364+0000 m31100| 2015-07-19T23:39:14.363+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47721 #69 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.370+0000 m31100| 2015-07-19T23:39:14.363+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.371+0000 m31100| 2015-07-19T23:39:14.363+0000 I COMMAND [conn58] command db5.coll5_out_agg_sort_3 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74165012080 }, { host: "ip-10-139-123-131:31200", id: 73859508585 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 6478, w: 33047, W: 106728 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 3830, R: 55734, W: 11342 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 482ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.374+0000 m31100| 2015-07-19T23:39:14.363+0000 I NETWORK [conn67] end connection 10.139.123.131:47717 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.374+0000 m31200| 2015-07-19T23:39:14.366+0000 I NETWORK [conn43] end connection 10.139.123.131:39554 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.404+0000 m31200| 2015-07-19T23:39:14.404+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39558 #45 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.424+0000 m31100| 2015-07-19T23:39:14.424+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47723 #70 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.485+0000 m31200| 2015-07-19T23:39:14.485+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39560 #46 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.499+0000 m31102| 2015-07-19T23:39:14.499+0000 I COMMAND [repl writer worker 11] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.554+0000 m31100| 2015-07-19T23:39:14.554+0000 I WRITE [conn53] insert db5.tmp.agg_out.12 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 36480, w: 4729 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 77393, W: 74065 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 125ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.561+0000 m31102| 2015-07-19T23:39:14.560+0000 I COMMAND [repl writer worker 5] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.563+0000 m31101| 2015-07-19T23:39:14.563+0000 I COMMAND [repl writer worker 10] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.563+0000 m31102| 2015-07-19T23:39:14.563+0000 I COMMAND [repl writer worker 6] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.602+0000 m31101| 2015-07-19T23:39:14.602+0000 I COMMAND [repl writer worker 2] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.604+0000 m31101| 2015-07-19T23:39:14.604+0000 I COMMAND [repl writer worker 6] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.650+0000 m31100| 2015-07-19T23:39:14.650+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.650+0000 m31100| 2015-07-19T23:39:14.650+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.651+0000 m31200| 2015-07-19T23:39:14.650+0000 I NETWORK [conn46] end connection 10.139.123.131:39560 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.651+0000 m31100| 2015-07-19T23:39:14.650+0000 I NETWORK [conn70] end connection 10.139.123.131:47723 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.651+0000 m31100| 2015-07-19T23:39:14.650+0000 I COMMAND [conn49] command db5.coll5_out_agg_sort_4 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74423039821 }, { host: "ip-10-139-123-131:31200", id: 74758625831 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 47782, w: 226, W: 921 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, w: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 6715, w: 2588, R: 82192, W: 74372 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 491ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.652+0000 m31100| 2015-07-19T23:39:14.651+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.652+0000 m31100| 2015-07-19T23:39:14.651+0000 I NETWORK [conn69] end connection 10.139.123.131:47721 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.652+0000 m31100| 2015-07-19T23:39:14.651+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.652+0000 m31200| 2015-07-19T23:39:14.651+0000 I NETWORK [conn45] end connection 10.139.123.131:39558 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.653+0000 m31100| 2015-07-19T23:39:14.651+0000 I COMMAND [conn53] command db5.coll5_out_agg_sort_1 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74114788863 }, { host: "ip-10-139-123-131:31200", id: 74062261312 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 36480, w: 4729, W: 80730 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 7011, R: 77393, W: 74065 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 487ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.655+0000 m31100| 2015-07-19T23:39:14.654+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47725 #71 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.664+0000 m31200| 2015-07-19T23:39:14.663+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39562 #47 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.668+0000 m31100| 2015-07-19T23:39:14.668+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47727 #72 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.669+0000 m31100| 2015-07-19T23:39:14.669+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47728 #73 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.681+0000 m31200| 2015-07-19T23:39:14.681+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39565 #48 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.704+0000 m31200| 2015-07-19T23:39:14.703+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39566 #49 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.776+0000 m31101| 2015-07-19T23:39:14.776+0000 I COMMAND [repl writer worker 9] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.791+0000 m31102| 2015-07-19T23:39:14.790+0000 I COMMAND [repl writer worker 3] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.797+0000 m31101| 2015-07-19T23:39:14.796+0000 I COMMAND [repl writer worker 5] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.855+0000 m31102| 2015-07-19T23:39:14.854+0000 I COMMAND [repl writer worker 14] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.868+0000 m31100| 2015-07-19T23:39:14.868+0000 I WRITE [conn52] insert db5.tmp.agg_out.13 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 64265 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 3252, R: 66524, W: 9722 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 154ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.929+0000 m31100| 2015-07-19T23:39:14.929+0000 I WRITE [conn58] insert db5.tmp.agg_out.14 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 64257 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 6460, R: 53419, W: 3158 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 153ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.976+0000 m31100| 2015-07-19T23:39:14.976+0000 I WRITE [conn57] insert db5.tmp.agg_out.15 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 64380 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 48677, W: 12347 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 264ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.977+0000 m31100| 2015-07-19T23:39:14.976+0000 I COMMAND [conn53] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_1" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 171817 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 172ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.978+0000 m31100| 2015-07-19T23:39:14.976+0000 I COMMAND [conn49] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_4" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 170372 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 170ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.995+0000 m31200| 2015-07-19T23:39:14.995+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:14.996+0000 m31200| 2015-07-19T23:39:14.995+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.016+0000 m31100| 2015-07-19T23:39:15.016+0000 I COMMAND [conn52] command db5.tmp.agg_out.13 command: renameCollection { renameCollection: "db5.tmp.agg_out.13", to: "db5.coll5_out_agg_sort_0", dropTarget: true } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:97 locks:{ Global: { acquireCount: { r: 515, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 64265, W: 145149 } }, Database: { acquireCount: { r: 3, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 3252, R: 66524, W: 9722 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_query 146ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.023+0000 m31200| 2015-07-19T23:39:15.023+0000 I NETWORK [conn49] end connection 10.139.123.131:39566 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.023+0000 m31200| 2015-07-19T23:39:15.023+0000 I NETWORK [conn47] end connection 10.139.123.131:39562 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.023+0000 m31100| 2015-07-19T23:39:15.022+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.024+0000 m31100| 2015-07-19T23:39:15.022+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.024+0000 m31100| 2015-07-19T23:39:15.022+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.024+0000 m31100| 2015-07-19T23:39:15.022+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.024+0000 m31100| 2015-07-19T23:39:15.022+0000 I COMMAND [conn52] command db5.coll5_out_agg_sort_0 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74661478715 }, { host: "ip-10-139-123-131:31200", id: 74027909263 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 64265, W: 145149 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 2 }, timeAcquiringMicros: { r: 9835, R: 66524, W: 9722 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 516ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.025+0000 m31100| 2015-07-19T23:39:15.023+0000 I NETWORK [conn72] end connection 10.139.123.131:47727 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.025+0000 m31100| 2015-07-19T23:39:15.023+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.025+0000 m31100| 2015-07-19T23:39:15.023+0000 I NETWORK [conn71] end connection 10.139.123.131:47725 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.025+0000 m31100| 2015-07-19T23:39:15.023+0000 I NETWORK [conn73] end connection 10.139.123.131:47728 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.025+0000 m31100| 2015-07-19T23:39:15.024+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.026+0000 m31200| 2015-07-19T23:39:15.024+0000 I NETWORK [conn48] end connection 10.139.123.131:39565 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.026+0000 m31100| 2015-07-19T23:39:15.024+0000 I COMMAND [conn57] command db5.coll5_out_agg_sort_2 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73062271697 }, { host: "ip-10-139-123-131:31200", id: 73253699991 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 1797, w: 64380, W: 34145 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 6748, R: 48677, W: 12347 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 500ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.036+0000 m31100| 2015-07-19T23:39:15.022+0000 I COMMAND [conn58] command db5.coll5_out_agg_sort_3 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74091015944 }, { host: "ip-10-139-123-131:31200", id: 73524837912 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 3508, w: 64257, W: 49761 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 1 }, timeAcquiringMicros: { r: 13191, R: 53419, W: 3158 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 503ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.036+0000 m31100| 2015-07-19T23:39:15.031+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47731 #74 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.076+0000 m31100| 2015-07-19T23:39:15.075+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47732 #75 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.076+0000 m31200| 2015-07-19T23:39:15.076+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39569 #50 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.103+0000 m31200| 2015-07-19T23:39:15.102+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39570 #51 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.129+0000 m31100| 2015-07-19T23:39:15.128+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 101ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.129+0000 m31100| 2015-07-19T23:39:15.128+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.133+0000 m31101| 2015-07-19T23:39:15.133+0000 I COMMAND [repl writer worker 3] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.156+0000 m31101| 2015-07-19T23:39:15.156+0000 I COMMAND [repl writer worker 8] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.165+0000 m31101| 2015-07-19T23:39:15.165+0000 I COMMAND [repl writer worker 4] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.199+0000 m31102| 2015-07-19T23:39:15.198+0000 I COMMAND [repl writer worker 8] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.219+0000 m31102| 2015-07-19T23:39:15.219+0000 I COMMAND [repl writer worker 14] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.222+0000 m31102| 2015-07-19T23:39:15.222+0000 I COMMAND [repl writer worker 2] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.262+0000 m31100| 2015-07-19T23:39:15.261+0000 I WRITE [conn53] insert db5.tmp.agg_out.16 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 38805 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 171817, W: 4120 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 122ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.293+0000 m31100| 2015-07-19T23:39:15.293+0000 I WRITE [conn49] insert db5.tmp.agg_out.17 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 38545 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 3089, R: 170372, W: 498 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 164ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.294+0000 m31100| 2015-07-19T23:39:15.293+0000 I COMMAND [conn57] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_2" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 133413 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 133ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.294+0000 m31100| 2015-07-19T23:39:15.293+0000 I COMMAND [conn58] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_3" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 143157 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 143ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.295+0000 m31100| 2015-07-19T23:39:15.293+0000 I COMMAND [conn52] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_0" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 125445 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 125ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.328+0000 m31100| 2015-07-19T23:39:15.327+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.328+0000 m31100| 2015-07-19T23:39:15.327+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.329+0000 m31100| 2015-07-19T23:39:15.327+0000 I COMMAND [conn53] command db5.coll5_out_agg_sort_1 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74490720410 }, { host: "ip-10-139-123-131:31200", id: 75005009850 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 38805, W: 51997 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 8689, R: 171817, W: 4120 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 523ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.329+0000 m31200| 2015-07-19T23:39:15.327+0000 I NETWORK [conn51] end connection 10.139.123.131:39570 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.329+0000 m31100| 2015-07-19T23:39:15.328+0000 I NETWORK [conn75] end connection 10.139.123.131:47732 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.338+0000 m31100| 2015-07-19T23:39:15.337+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.338+0000 m31100| 2015-07-19T23:39:15.338+0000 I NETWORK [conn74] end connection 10.139.123.131:47731 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.339+0000 m31100| 2015-07-19T23:39:15.339+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.340+0000 m31200| 2015-07-19T23:39:15.340+0000 I NETWORK [conn50] end connection 10.139.123.131:39569 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.349+0000 m31100| 2015-07-19T23:39:15.349+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47735 #76 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.349+0000 m31100| 2015-07-19T23:39:15.349+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47736 #77 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.358+0000 m31200| 2015-07-19T23:39:15.358+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39573 #52 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.363+0000 m31101| 2015-07-19T23:39:15.362+0000 I COMMAND [repl writer worker 13] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.364+0000 m31100| 2015-07-19T23:39:15.364+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47738 #78 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.375+0000 m31100| 2015-07-19T23:39:15.374+0000 I COMMAND [conn49] command db5.coll5_out_agg_sort_4 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73541050735 }, { host: "ip-10-139-123-131:31200", id: 74875905664 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 3984, w: 38545, W: 1218 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 1 }, timeAcquiringMicros: { r: 19352, R: 170372, W: 498 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 568ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.378+0000 m31200| 2015-07-19T23:39:15.378+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39575 #53 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.382+0000 m31200| 2015-07-19T23:39:15.382+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39576 #54 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.385+0000 m31102| 2015-07-19T23:39:15.384+0000 I COMMAND [repl writer worker 12] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.388+0000 m31101| 2015-07-19T23:39:15.388+0000 I COMMAND [repl writer worker 3] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.416+0000 m31102| 2015-07-19T23:39:15.416+0000 I COMMAND [repl writer worker 8] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.589+0000 m31100| 2015-07-19T23:39:15.589+0000 I WRITE [conn52] insert db5.tmp.agg_out.20 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 24898 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 1663, R: 125445, W: 16756 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 179ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.593+0000 m31100| 2015-07-19T23:39:15.592+0000 I WRITE [conn58] insert db5.tmp.agg_out.18 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 24876 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 13578, R: 143157, W: 4633 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 187ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.615+0000 m31100| 2015-07-19T23:39:15.615+0000 I WRITE [conn57] insert db5.tmp.agg_out.19 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 24842 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, w: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 7839, w: 9641, R: 133413, W: 12754 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 137ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.616+0000 m31100| 2015-07-19T23:39:15.615+0000 I COMMAND [conn53] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_1" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 155933 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 156ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.616+0000 m31100| 2015-07-19T23:39:15.615+0000 I COMMAND [conn49] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_4" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 124061 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 124ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.656+0000 m31100| 2015-07-19T23:39:15.655+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.656+0000 m31100| 2015-07-19T23:39:15.655+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.656+0000 m31100| 2015-07-19T23:39:15.655+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.657+0000 m31100| 2015-07-19T23:39:15.655+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.657+0000 m31100| 2015-07-19T23:39:15.655+0000 I COMMAND [conn58] command db5.coll5_out_agg_sort_3 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73973599040 }, { host: "ip-10-139-123-131:31200", id: 73749998668 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 1398, w: 24876, W: 52810 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 2 }, timeAcquiringMicros: { r: 20477, R: 143157, W: 4633 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 506ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.658+0000 m31100| 2015-07-19T23:39:15.655+0000 I COMMAND [conn52] command db5.coll5_out_agg_sort_0 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73684327632 }, { host: "ip-10-139-123-131:31200", id: 74972088504 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 24898, W: 57489 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 2 }, timeAcquiringMicros: { r: 8540, R: 125445, W: 16756 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 488ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.658+0000 m31200| 2015-07-19T23:39:15.656+0000 I NETWORK [conn52] end connection 10.139.123.131:39573 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.658+0000 m31200| 2015-07-19T23:39:15.656+0000 I NETWORK [conn53] end connection 10.139.123.131:39575 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.658+0000 m31200| 2015-07-19T23:39:15.658+0000 I NETWORK [conn54] end connection 10.139.123.131:39576 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.661+0000 m31100| 2015-07-19T23:39:15.656+0000 I NETWORK [conn78] end connection 10.139.123.131:47738 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.661+0000 m31100| 2015-07-19T23:39:15.656+0000 I NETWORK [conn77] end connection 10.139.123.131:47736 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.661+0000 m31100| 2015-07-19T23:39:15.657+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.662+0000 m31100| 2015-07-19T23:39:15.658+0000 I NETWORK [conn76] end connection 10.139.123.131:47735 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.662+0000 m31100| 2015-07-19T23:39:15.658+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.662+0000 m31100| 2015-07-19T23:39:15.658+0000 I COMMAND [conn57] command db5.coll5_out_agg_sort_2 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74065544778 }, { host: "ip-10-139-123-131:31200", id: 73143619256 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 2656, w: 24842, W: 1071 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, w: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 16618, w: 9641, R: 133413, W: 12754 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 499ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.688+0000 m31100| 2015-07-19T23:39:15.688+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47741 #79 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.689+0000 m31100| 2015-07-19T23:39:15.689+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47742 #80 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.699+0000 m31200| 2015-07-19T23:39:15.698+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39579 #55 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.724+0000 m31200| 2015-07-19T23:39:15.724+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39580 #56 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.753+0000 m31102| 2015-07-19T23:39:15.752+0000 I COMMAND [repl writer worker 7] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.765+0000 m31101| 2015-07-19T23:39:15.764+0000 I COMMAND [repl writer worker 8] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.813+0000 m31102| 2015-07-19T23:39:15.812+0000 I COMMAND [repl writer worker 11] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.814+0000 m31101| 2015-07-19T23:39:15.814+0000 I COMMAND [repl writer worker 7] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.815+0000 m31102| 2015-07-19T23:39:15.815+0000 I COMMAND [repl writer worker 13] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.816+0000 m31101| 2015-07-19T23:39:15.816+0000 I COMMAND [repl writer worker 4] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.893+0000 m31100| 2015-07-19T23:39:15.892+0000 I WRITE [conn49] insert db5.tmp.agg_out.22 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 33185 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 14131, R: 124061, W: 10597 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 146ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.918+0000 m31100| 2015-07-19T23:39:15.918+0000 I WRITE [conn53] insert db5.tmp.agg_out.21 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 33562 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 155933, W: 20882 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 159ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.919+0000 m31100| 2015-07-19T23:39:15.918+0000 I COMMAND [conn57] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_2" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 126089 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 126ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.919+0000 m31100| 2015-07-19T23:39:15.918+0000 I COMMAND [conn52] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_0" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 113225 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 113ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.920+0000 m31100| 2015-07-19T23:39:15.919+0000 I COMMAND [conn58] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_3" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 119813 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 120ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.951+0000 m31100| 2015-07-19T23:39:15.950+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.951+0000 m31100| 2015-07-19T23:39:15.950+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.952+0000 m31100| 2015-07-19T23:39:15.951+0000 I COMMAND [conn49] command db5.coll5_out_agg_sort_4 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73448766068 }, { host: "ip-10-139-123-131:31200", id: 74266937000 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 2606, w: 33185, W: 25484 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 1 }, timeAcquiringMicros: { r: 25722, R: 124061, W: 10597 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 460ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.952+0000 m31100| 2015-07-19T23:39:15.951+0000 I NETWORK [conn79] end connection 10.139.123.131:47741 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.952+0000 m31200| 2015-07-19T23:39:15.951+0000 I NETWORK [conn56] end connection 10.139.123.131:39580 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.952+0000 m31100| 2015-07-19T23:39:15.952+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.952+0000 m31100| 2015-07-19T23:39:15.952+0000 I NETWORK [conn80] end connection 10.139.123.131:47742 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.953+0000 m31100| 2015-07-19T23:39:15.952+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.953+0000 m31200| 2015-07-19T23:39:15.953+0000 I NETWORK [conn55] end connection 10.139.123.131:39579 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.953+0000 m31100| 2015-07-19T23:39:15.953+0000 I COMMAND [conn53] command db5.coll5_out_agg_sort_1 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73529843690 }, { host: "ip-10-139-123-131:31200", id: 74159746064 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 33562, W: 16337 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 13345, R: 155933, W: 20882 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 494ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.956+0000 m31100| 2015-07-19T23:39:15.956+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47745 #81 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.988+0000 m31100| 2015-07-19T23:39:15.987+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47746 #82 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.998+0000 m31200| 2015-07-19T23:39:15.998+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.999+0000 m31200| 2015-07-19T23:39:15.998+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:15.999+0000 m31101| 2015-07-19T23:39:15.998+0000 I COMMAND [repl writer worker 9] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.001+0000 m31200| 2015-07-19T23:39:16.001+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39583 #57 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.007+0000 m31200| 2015-07-19T23:39:16.007+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39585 #58 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.007+0000 m31100| 2015-07-19T23:39:16.007+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47748 #83 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.009+0000 m31102| 2015-07-19T23:39:16.008+0000 I COMMAND [repl writer worker 13] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.027+0000 m31200| 2015-07-19T23:39:16.026+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39586 #59 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.058+0000 m31102| 2015-07-19T23:39:16.058+0000 I COMMAND [repl writer worker 15] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.059+0000 m31101| 2015-07-19T23:39:16.058+0000 I COMMAND [repl writer worker 14] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.237+0000 m31100| 2015-07-19T23:39:16.237+0000 I WRITE [conn58] insert db5.tmp.agg_out.24 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 19148 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 6239, R: 119813, W: 6066 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 202ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.258+0000 m31100| 2015-07-19T23:39:16.257+0000 I WRITE [conn57] insert db5.tmp.agg_out.23 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 19119 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 126089, W: 11922 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 205ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.266+0000 m31100| 2015-07-19T23:39:16.265+0000 I COMMAND [conn53] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_1" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 131664 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 132ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.268+0000 m31100| 2015-07-19T23:39:16.266+0000 I COMMAND [conn49] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_4" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 130329 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 131ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.272+0000 m31100| 2015-07-19T23:39:16.272+0000 I WRITE [conn52] insert db5.tmp.agg_out.25 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 19362 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 6180, R: 113225, W: 3382 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 244ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.306+0000 m31100| 2015-07-19T23:39:16.305+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.306+0000 m31100| 2015-07-19T23:39:16.305+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.306+0000 m31100| 2015-07-19T23:39:16.305+0000 I COMMAND [conn58] command db5.coll5_out_agg_sort_3 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73981600878 }, { host: "ip-10-139-123-131:31200", id: 73334140222 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 19148, W: 65647 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 6239, R: 119813, W: 6066 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 507ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.306+0000 m31200| 2015-07-19T23:39:16.305+0000 I NETWORK [conn57] end connection 10.139.123.131:39583 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.307+0000 m31100| 2015-07-19T23:39:16.305+0000 I NETWORK [conn82] end connection 10.139.123.131:47746 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.308+0000 m31200| 2015-07-19T23:39:16.307+0000 I NETWORK [conn59] end connection 10.139.123.131:39586 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.308+0000 m31100| 2015-07-19T23:39:16.307+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.308+0000 m31100| 2015-07-19T23:39:16.307+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.309+0000 m31100| 2015-07-19T23:39:16.307+0000 I COMMAND [conn57] command db5.coll5_out_agg_sort_2 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 75052435882 }, { host: "ip-10-139-123-131:31200", id: 73382196093 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 3843, w: 19119, W: 43526 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 126089, W: 11922 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 515ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.309+0000 m31100| 2015-07-19T23:39:16.307+0000 I NETWORK [conn83] end connection 10.139.123.131:47748 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.311+0000 m31100| 2015-07-19T23:39:16.311+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47752 #84 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.314+0000 m31100| 2015-07-19T23:39:16.313+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.314+0000 m31100| 2015-07-19T23:39:16.314+0000 I NETWORK [conn81] end connection 10.139.123.131:47745 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.314+0000 m31100| 2015-07-19T23:39:16.314+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.315+0000 m31200| 2015-07-19T23:39:16.315+0000 I NETWORK [conn58] end connection 10.139.123.131:39585 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.316+0000 m31100| 2015-07-19T23:39:16.315+0000 I COMMAND [conn52] command db5.coll5_out_agg_sort_0 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73109654461 }, { host: "ip-10-139-123-131:31200", id: 74891535164 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 11252, w: 19362, W: 6210 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 6180, R: 113225, W: 3382 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 510ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.318+0000 m31100| 2015-07-19T23:39:16.316+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47753 #85 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.321+0000 m31200| 2015-07-19T23:39:16.321+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39590 #60 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.325+0000 m31101| 2015-07-19T23:39:16.325+0000 I COMMAND [repl writer worker 12] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.341+0000 m31200| 2015-07-19T23:39:16.340+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39591 #61 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.367+0000 m31101| 2015-07-19T23:39:16.367+0000 I COMMAND [repl writer worker 13] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.370+0000 m31101| 2015-07-19T23:39:16.370+0000 I COMMAND [repl writer worker 2] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.377+0000 m31102| 2015-07-19T23:39:16.377+0000 I COMMAND [repl writer worker 4] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.429+0000 m31102| 2015-07-19T23:39:16.429+0000 I COMMAND [repl writer worker 1] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.432+0000 m31102| 2015-07-19T23:39:16.431+0000 I COMMAND [repl writer worker 13] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.499+0000 m31100| 2015-07-19T23:39:16.499+0000 I WRITE [conn49] insert db5.tmp.agg_out.27 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 31039, w: 1458 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 130329 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 148ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.534+0000 m31100| 2015-07-19T23:39:16.533+0000 I WRITE [conn53] insert db5.tmp.agg_out.26 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 29956, w: 2276 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 131664, W: 8134 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 149ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.575+0000 m31200| 2015-07-19T23:39:16.575+0000 I NETWORK [conn60] end connection 10.139.123.131:39590 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.576+0000 m31100| 2015-07-19T23:39:16.575+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.576+0000 m31100| 2015-07-19T23:39:16.575+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.576+0000 m31100| 2015-07-19T23:39:16.575+0000 I NETWORK [conn84] end connection 10.139.123.131:47752 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.577+0000 m31100| 2015-07-19T23:39:16.575+0000 I COMMAND [conn49] command db5.coll5_out_agg_sort_4 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73931704898 }, { host: "ip-10-139-123-131:31200", id: 73669978630 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 31039, w: 1458, W: 73611 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 130329 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 440ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.577+0000 m31100| 2015-07-19T23:39:16.576+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.577+0000 m31100| 2015-07-19T23:39:16.576+0000 I NETWORK [conn85] end connection 10.139.123.131:47753 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.577+0000 m31100| 2015-07-19T23:39:16.576+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.577+0000 m31200| 2015-07-19T23:39:16.576+0000 I NETWORK [conn61] end connection 10.139.123.131:39591 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.578+0000 m31100| 2015-07-19T23:39:16.576+0000 I COMMAND [conn53] command db5.coll5_out_agg_sort_1 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74340083449 }, { host: "ip-10-139-123-131:31200", id: 74517551563 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 32552, w: 2276, W: 16482 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 131664, W: 8134 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 443ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.590+0000 m31100| 2015-07-19T23:39:16.580+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47756 #86 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.590+0000 m31100| 2015-07-19T23:39:16.581+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47757 #87 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.591+0000 m31100| 2015-07-19T23:39:16.590+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47758 #88 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.596+0000 m31200| 2015-07-19T23:39:16.595+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39595 #62 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.612+0000 m31200| 2015-07-19T23:39:16.612+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39596 #63 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.618+0000 m31101| 2015-07-19T23:39:16.617+0000 I COMMAND [repl writer worker 6] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.627+0000 m31200| 2015-07-19T23:39:16.627+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39597 #64 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.646+0000 m31101| 2015-07-19T23:39:16.646+0000 I COMMAND [repl writer worker 1] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.663+0000 m31102| 2015-07-19T23:39:16.662+0000 I COMMAND [repl writer worker 4] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.676+0000 m31102| 2015-07-19T23:39:16.675+0000 I COMMAND [repl writer worker 6] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.791+0000 m31100| 2015-07-19T23:39:16.791+0000 I WRITE [conn58] insert db5.tmp.agg_out.30 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 27923, w: 1256 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 43339, W: 10113 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 136ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.792+0000 m31100| 2015-07-19T23:39:16.792+0000 I COMMAND [conn53] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_1" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 116291 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 116ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.793+0000 m31100| 2015-07-19T23:39:16.792+0000 I COMMAND [conn49] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_4" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 112843 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 113ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.793+0000 m31100| 2015-07-19T23:39:16.792+0000 I WRITE [conn57] insert db5.tmp.agg_out.29 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 32805, w: 94 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 46427 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 161ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.936+0000 m31100| 2015-07-19T23:39:16.936+0000 I COMMAND [conn57] command db5.tmp.agg_out.29 command: renameCollection { renameCollection: "db5.tmp.agg_out.29", to: "db5.coll5_out_agg_sort_2", dropTarget: true } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:97 locks:{ Global: { acquireCount: { r: 515, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 32805, w: 94, W: 142199 } }, Database: { acquireCount: { r: 3, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 46427 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_query 143ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.938+0000 m31100| 2015-07-19T23:39:16.937+0000 I COMMAND [conn58] command db5.tmp.agg_out.30 command: renameCollection { renameCollection: "db5.tmp.agg_out.30", to: "db5.coll5_out_agg_sort_3", dropTarget: true } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:97 locks:{ Global: { acquireCount: { r: 515, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 27923, w: 1256, W: 144662 } }, Database: { acquireCount: { r: 3, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 43339, W: 10113 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_query 146ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.938+0000 m31100| 2015-07-19T23:39:16.937+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.938+0000 m31100| 2015-07-19T23:39:16.937+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.939+0000 m31100| 2015-07-19T23:39:16.937+0000 I NETWORK [conn86] end connection 10.139.123.131:47756 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.939+0000 m31100| 2015-07-19T23:39:16.938+0000 I COMMAND [conn58] command db5.coll5_out_agg_sort_3 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74481483655 }, { host: "ip-10-139-123-131:31200", id: 74331396131 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 27923, w: 1256, W: 144662 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 43339, W: 10113 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 448ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.939+0000 m31200| 2015-07-19T23:39:16.938+0000 I NETWORK [conn63] end connection 10.139.123.131:39596 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.940+0000 m31100| 2015-07-19T23:39:16.938+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.940+0000 m31100| 2015-07-19T23:39:16.938+0000 I NETWORK [conn88] end connection 10.139.123.131:47758 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.941+0000 m31200| 2015-07-19T23:39:16.941+0000 I NETWORK [conn62] end connection 10.139.123.131:39595 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.941+0000 m31100| 2015-07-19T23:39:16.940+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.942+0000 m31100| 2015-07-19T23:39:16.940+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.945+0000 m31100| 2015-07-19T23:39:16.941+0000 I NETWORK [conn87] end connection 10.139.123.131:47757 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.945+0000 m31100| 2015-07-19T23:39:16.941+0000 I COMMAND [conn57] command db5.coll5_out_agg_sort_2 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73990746670 }, { host: "ip-10-139-123-131:31200", id: 73791805830 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 37206, w: 94, W: 142199 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 46427 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 454ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.945+0000 m31100| 2015-07-19T23:39:16.942+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47762 #89 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.974+0000 m31200| 2015-07-19T23:39:16.949+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39599 #65 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.974+0000 m31100| 2015-07-19T23:39:16.972+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.974+0000 m31200| 2015-07-19T23:39:16.973+0000 I NETWORK [conn64] end connection 10.139.123.131:39597 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:16.975+0000 m31100| 2015-07-19T23:39:16.973+0000 I COMMAND [conn52] command db5.coll5_out_agg_sort_0 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74188753463 }, { host: "ip-10-139-123-131:31200", id: 73936372383 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 34148, w: 335, W: 5329 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { w: 1, R: 1, W: 1 }, timeAcquiringMicros: { w: 83761, R: 74842, W: 8344 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 515ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.001+0000 m31200| 2015-07-19T23:39:17.000+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.001+0000 m31200| 2015-07-19T23:39:17.000+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.017+0000 m31100| 2015-07-19T23:39:17.016+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47764 #90 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.047+0000 m31200| 2015-07-19T23:39:17.047+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39601 #66 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.062+0000 m31100| 2015-07-19T23:39:17.062+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47766 #91 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.069+0000 m31101| 2015-07-19T23:39:17.069+0000 I COMMAND [repl writer worker 9] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.080+0000 m31200| 2015-07-19T23:39:17.080+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39603 #67 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.088+0000 m31101| 2015-07-19T23:39:17.088+0000 I COMMAND [repl writer worker 5] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.090+0000 m31101| 2015-07-19T23:39:17.090+0000 I COMMAND [repl writer worker 2] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.102+0000 m31102| 2015-07-19T23:39:17.102+0000 I COMMAND [repl writer worker 13] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.132+0000 m31102| 2015-07-19T23:39:17.131+0000 I COMMAND [repl writer worker 11] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.135+0000 m31102| 2015-07-19T23:39:17.134+0000 I COMMAND [repl writer worker 10] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.174+0000 m31100| 2015-07-19T23:39:17.171+0000 I WRITE [conn52] insert db5.system.indexes keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 11, w: 3 } }, Database: { acquireCount: { r: 3, w: 1, R: 1, W: 2 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 111488 } }, Collection: { acquireCount: { r: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } 111ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.179+0000 m31100| 2015-07-19T23:39:17.178+0000 I WRITE [conn49] insert db5.tmp.agg_out.32 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 68414, w: 79 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { w: 1, R: 1, W: 1 }, timeAcquiringMicros: { w: 1722, R: 112843, W: 74049 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 119ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.197+0000 m31100| 2015-07-19T23:39:17.197+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47768 #92 (56 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.210+0000 m31200| 2015-07-19T23:39:17.209+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39605 #68 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.263+0000 m31100| 2015-07-19T23:39:17.262+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.263+0000 m31100| 2015-07-19T23:39:17.262+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.263+0000 m31100| 2015-07-19T23:39:17.262+0000 I NETWORK [conn89] end connection 10.139.123.131:47762 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.264+0000 m31100| 2015-07-19T23:39:17.262+0000 I COMMAND [conn49] command db5.coll5_out_agg_sort_4 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73814354680 }, { host: "ip-10-139-123-131:31200", id: 74338332030 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 75187, w: 79, W: 63829 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { w: 1, R: 1, W: 1 }, timeAcquiringMicros: { w: 1722, R: 112843, W: 74049 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 583ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.264+0000 m31200| 2015-07-19T23:39:17.262+0000 I NETWORK [conn65] end connection 10.139.123.131:39599 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.274+0000 m31100| 2015-07-19T23:39:17.273+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.298+0000 m31100| 2015-07-19T23:39:17.297+0000 I NETWORK [conn90] end connection 10.139.123.131:47764 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.298+0000 m31100| 2015-07-19T23:39:17.298+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.299+0000 m31200| 2015-07-19T23:39:17.298+0000 I NETWORK [conn66] end connection 10.139.123.131:39601 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.301+0000 m31100| 2015-07-19T23:39:17.300+0000 I COMMAND [conn53] command db5.coll5_out_agg_sort_1 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73072385813 }, { host: "ip-10-139-123-131:31200", id: 73146586017 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 73758, w: 132, W: 7411 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { w: 1, R: 1, W: 1 }, timeAcquiringMicros: { w: 89391, R: 116291, W: 71057 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 625ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.359+0000 m31100| 2015-07-19T23:39:17.359+0000 I WRITE [conn58] insert db5.tmp.agg_out.33 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 60742 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 102ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.367+0000 m31101| 2015-07-19T23:39:17.366+0000 I COMMAND [repl writer worker 8] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.374+0000 m31102| 2015-07-19T23:39:17.373+0000 I COMMAND [repl writer worker 3] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.397+0000 m31100| 2015-07-19T23:39:17.397+0000 I COMMAND [conn49] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_0" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 130150 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 131ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.417+0000 m31102| 2015-07-19T23:39:17.417+0000 I COMMAND [repl writer worker 14] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.417+0000 m31101| 2015-07-19T23:39:17.417+0000 I COMMAND [repl writer worker 5] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.423+0000 m31100| 2015-07-19T23:39:17.423+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.423+0000 m31100| 2015-07-19T23:39:17.423+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.424+0000 m31100| 2015-07-19T23:39:17.423+0000 I COMMAND [conn58] command db5.coll5_out_agg_sort_3 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73749018392 }, { host: "ip-10-139-123-131:31200", id: 75089499306 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 60742, W: 59067 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 372ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.424+0000 m31100| 2015-07-19T23:39:17.423+0000 I NETWORK [conn91] end connection 10.139.123.131:47766 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.424+0000 m31200| 2015-07-19T23:39:17.423+0000 I NETWORK [conn67] end connection 10.139.123.131:39603 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.446+0000 m31100| 2015-07-19T23:39:17.446+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47770 #93 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.455+0000 m31200| 2015-07-19T23:39:17.454+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39607 #69 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.463+0000 m31100| 2015-07-19T23:39:17.459+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.464+0000 m31100| 2015-07-19T23:39:17.459+0000 I NETWORK [conn92] end connection 10.139.123.131:47768 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.464+0000 m31100| 2015-07-19T23:39:17.459+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.464+0000 m31100| 2015-07-19T23:39:17.459+0000 I COMMAND [conn52] command db5.coll5_out_agg_sort_2 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73943975305 }, { host: "ip-10-139-123-131:31200", id: 74396665698 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 39123, W: 1274 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 50033, W: 111488 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 404ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.464+0000 m31200| 2015-07-19T23:39:17.459+0000 I NETWORK [conn68] end connection 10.139.123.131:39605 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.511+0000 m31100| 2015-07-19T23:39:17.510+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47772 #94 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.536+0000 m31200| 2015-07-19T23:39:17.536+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39609 #70 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.585+0000 m31102| 2015-07-19T23:39:17.585+0000 I COMMAND [repl writer worker 13] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.604+0000 m31102| 2015-07-19T23:39:17.604+0000 I COMMAND [repl writer worker 7] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.612+0000 m31101| 2015-07-19T23:39:17.612+0000 I COMMAND [repl writer worker 11] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.621+0000 m31100| 2015-07-19T23:39:17.617+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47774 #95 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.625+0000 m31101| 2015-07-19T23:39:17.625+0000 I COMMAND [repl writer worker 7] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.632+0000 m31200| 2015-07-19T23:39:17.631+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39611 #71 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.644+0000 m31100| 2015-07-19T23:39:17.644+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.644+0000 m31100| 2015-07-19T23:39:17.644+0000 I NETWORK [conn93] end connection 10.139.123.131:47770 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.645+0000 m31100| 2015-07-19T23:39:17.644+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.645+0000 m31100| 2015-07-19T23:39:17.645+0000 I COMMAND [conn49] command db5.coll5_out_agg_sort_0 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73148181663 }, { host: "ip-10-139-123-131:31200", id: 73984358718 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 26008, W: 320 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { w: 1, R: 1, W: 2 }, timeAcquiringMicros: { w: 2381, R: 130150, W: 2518 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 379ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.645+0000 m31200| 2015-07-19T23:39:17.645+0000 I NETWORK [conn69] end connection 10.139.123.131:39607 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.744+0000 m31101| 2015-07-19T23:39:17.743+0000 I COMMAND [repl writer worker 4] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.744+0000 m31102| 2015-07-19T23:39:17.744+0000 I COMMAND [repl writer worker 5] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.829+0000 m31100| 2015-07-19T23:39:17.828+0000 I WRITE [conn57] insert db5.tmp.agg_out.36 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 2541 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 184ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.840+0000 m31100| 2015-07-19T23:39:17.840+0000 I COMMAND [conn53] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_3" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 194346 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 195ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.841+0000 m31100| 2015-07-19T23:39:17.841+0000 I COMMAND [conn49] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_2" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 169426 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 171ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.842+0000 m31100| 2015-07-19T23:39:17.842+0000 I WRITE [conn58] insert db5.tmp.agg_out.37 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 2508, W: 98091 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 121ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.885+0000 m31100| 2015-07-19T23:39:17.885+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.886+0000 m31100| 2015-07-19T23:39:17.885+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.886+0000 m31100| 2015-07-19T23:39:17.885+0000 I NETWORK [conn94] end connection 10.139.123.131:47772 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.886+0000 m31100| 2015-07-19T23:39:17.885+0000 I COMMAND [conn57] command db5.coll5_out_agg_sort_4 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73794745037 }, { host: "ip-10-139-123-131:31200", id: 73020128422 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 14134, W: 12867 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, W: 2 }, timeAcquiringMicros: { r: 8628, W: 2541 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 415ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.887+0000 m31200| 2015-07-19T23:39:17.885+0000 I NETWORK [conn70] end connection 10.139.123.131:39609 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.887+0000 m31100| 2015-07-19T23:39:17.886+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.887+0000 m31100| 2015-07-19T23:39:17.887+0000 I NETWORK [conn95] end connection 10.139.123.131:47774 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.887+0000 m31100| 2015-07-19T23:39:17.887+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.887+0000 m31200| 2015-07-19T23:39:17.887+0000 I NETWORK [conn71] end connection 10.139.123.131:39611 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.888+0000 m31100| 2015-07-19T23:39:17.887+0000 I COMMAND [conn58] command db5.coll5_out_agg_sort_1 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73455699060 }, { host: "ip-10-139-123-131:31200", id: 74422053087 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 21004 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 8857, w: 2508, W: 98091 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 372ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.889+0000 m31100| 2015-07-19T23:39:17.889+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47776 #96 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.890+0000 m31100| 2015-07-19T23:39:17.890+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47777 #97 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.892+0000 m31100| 2015-07-19T23:39:17.892+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47778 #98 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.904+0000 m31200| 2015-07-19T23:39:17.903+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39615 #72 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.905+0000 m31200| 2015-07-19T23:39:17.904+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39616 #73 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.917+0000 m31102| 2015-07-19T23:39:17.916+0000 I COMMAND [repl writer worker 15] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.934+0000 m31102| 2015-07-19T23:39:17.933+0000 I COMMAND [repl writer worker 12] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.938+0000 m31200| 2015-07-19T23:39:17.938+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39617 #74 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.939+0000 m31101| 2015-07-19T23:39:17.939+0000 I COMMAND [repl writer worker 15] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:17.961+0000 m31101| 2015-07-19T23:39:17.960+0000 I COMMAND [repl writer worker 5] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.003+0000 m31200| 2015-07-19T23:39:18.003+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.004+0000 m31200| 2015-07-19T23:39:18.003+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.168+0000 m31100| 2015-07-19T23:39:18.168+0000 I WRITE [conn49] insert db5.tmp.agg_out.39 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 35337 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 169426, W: 6374 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 224ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.187+0000 m31100| 2015-07-19T23:39:18.187+0000 I WRITE [conn53] insert db5.tmp.agg_out.38 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 36366 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 3129, R: 194346, W: 3861 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 244ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.199+0000 m31100| 2015-07-19T23:39:18.198+0000 I WRITE [conn52] insert db5.tmp.agg_out.40 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 36009 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, w: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 5991, w: 6932, R: 97510, W: 931 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 210ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.199+0000 m31100| 2015-07-19T23:39:18.198+0000 I COMMAND [conn58] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_1" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 183674 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 186ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.200+0000 m31100| 2015-07-19T23:39:18.199+0000 I COMMAND [conn57] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_4" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 218026 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 220ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.237+0000 m31100| 2015-07-19T23:39:18.236+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.237+0000 m31100| 2015-07-19T23:39:18.236+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.237+0000 m31100| 2015-07-19T23:39:18.237+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.237+0000 m31100| 2015-07-19T23:39:18.237+0000 I NETWORK [conn97] end connection 10.139.123.131:47777 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.238+0000 m31200| 2015-07-19T23:39:18.237+0000 I NETWORK [conn72] end connection 10.139.123.131:39615 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.238+0000 m31200| 2015-07-19T23:39:18.237+0000 I NETWORK [conn73] end connection 10.139.123.131:39616 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.240+0000 m31200| 2015-07-19T23:39:18.237+0000 I NETWORK [conn74] end connection 10.139.123.131:39617 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.241+0000 m31100| 2015-07-19T23:39:18.237+0000 I COMMAND [conn53] command db5.coll5_out_agg_sort_3 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74842231369 }, { host: "ip-10-139-123-131:31200", id: 73677391941 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 3058, w: 36366, W: 11532 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 2 }, timeAcquiringMicros: { r: 6387, R: 194346, W: 3861 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 592ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.241+0000 m31100| 2015-07-19T23:39:18.237+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.241+0000 m31100| 2015-07-19T23:39:18.237+0000 I NETWORK [conn98] end connection 10.139.123.131:47778 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.241+0000 m31100| 2015-07-19T23:39:18.237+0000 I NETWORK [conn96] end connection 10.139.123.131:47776 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.242+0000 m31100| 2015-07-19T23:39:18.237+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.242+0000 m31100| 2015-07-19T23:39:18.237+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.242+0000 m31100| 2015-07-19T23:39:18.237+0000 I COMMAND [conn49] command db5.coll5_out_agg_sort_2 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73066677327 }, { host: "ip-10-139-123-131:31200", id: 74350408721 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 35337, W: 63533 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 3560, R: 169426, W: 6374 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 568ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.243+0000 m31100| 2015-07-19T23:39:18.237+0000 I COMMAND [conn52] command db5.coll5_out_agg_sort_0 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73588601536 }, { host: "ip-10-139-123-131:31200", id: 73809333674 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 1449, w: 36009, W: 31490 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, w: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 9176, w: 6932, R: 97510, W: 931 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 496ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.267+0000 m31100| 2015-07-19T23:39:18.265+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47782 #99 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.271+0000 m31101| 2015-07-19T23:39:18.271+0000 I COMMAND [repl writer worker 6] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.285+0000 m31200| 2015-07-19T23:39:18.285+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39619 #75 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.287+0000 m31100| 2015-07-19T23:39:18.286+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47784 #100 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.300+0000 m31102| 2015-07-19T23:39:18.300+0000 I COMMAND [repl writer worker 6] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.308+0000 m31200| 2015-07-19T23:39:18.307+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39621 #76 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.358+0000 m31102| 2015-07-19T23:39:18.358+0000 I COMMAND [repl writer worker 4] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.359+0000 m31101| 2015-07-19T23:39:18.359+0000 I COMMAND [repl writer worker 15] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.361+0000 m31102| 2015-07-19T23:39:18.361+0000 I COMMAND [repl writer worker 1] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.364+0000 m31101| 2015-07-19T23:39:18.363+0000 I COMMAND [repl writer worker 0] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.422+0000 m31100| 2015-07-19T23:39:18.422+0000 I WRITE [conn57] insert db5.tmp.agg_out.41 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 34105 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 7831, R: 218026, W: 333 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 112ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.497+0000 m31200| 2015-07-19T23:39:18.496+0000 I NETWORK [conn76] end connection 10.139.123.131:39621 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.497+0000 m31200| 2015-07-19T23:39:18.497+0000 I NETWORK [conn75] end connection 10.139.123.131:39619 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.499+0000 m31100| 2015-07-19T23:39:18.496+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.500+0000 m31100| 2015-07-19T23:39:18.496+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.500+0000 m31100| 2015-07-19T23:39:18.496+0000 I COMMAND [conn57] command db5.coll5_out_agg_sort_4 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 75116740532 }, { host: "ip-10-139-123-131:31200", id: 74844740362 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 34105, W: 71920 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 7831, R: 218026, W: 333 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 517ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.500+0000 m31100| 2015-07-19T23:39:18.496+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.501+0000 m31100| 2015-07-19T23:39:18.496+0000 I NETWORK [conn100] end connection 10.139.123.131:47784 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.501+0000 m31100| 2015-07-19T23:39:18.496+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.501+0000 m31100| 2015-07-19T23:39:18.496+0000 I NETWORK [conn99] end connection 10.139.123.131:47782 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.501+0000 m31100| 2015-07-19T23:39:18.497+0000 I COMMAND [conn58] command db5.coll5_out_agg_sort_1 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73891070183 }, { host: "ip-10-139-123-131:31200", id: 74259318622 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 1772, w: 34418, W: 9431 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { w: 1, R: 1, W: 2 }, timeAcquiringMicros: { w: 74086, R: 183674, W: 7465 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 484ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.502+0000 m31100| 2015-07-19T23:39:18.498+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47786 #101 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.502+0000 m31100| 2015-07-19T23:39:18.498+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47787 #102 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.508+0000 m31200| 2015-07-19T23:39:18.508+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39624 #77 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.510+0000 m31100| 2015-07-19T23:39:18.509+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47789 #103 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.572+0000 m31200| 2015-07-19T23:39:18.572+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39626 #78 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.577+0000 m31200| 2015-07-19T23:39:18.576+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39627 #79 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.629+0000 m31102| 2015-07-19T23:39:18.629+0000 I COMMAND [repl writer worker 6] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.644+0000 m31102| 2015-07-19T23:39:18.643+0000 I COMMAND [repl writer worker 2] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.710+0000 m31101| 2015-07-19T23:39:18.710+0000 I COMMAND [repl writer worker 12] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.723+0000 m31101| 2015-07-19T23:39:18.723+0000 I COMMAND [repl writer worker 2] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.743+0000 m31100| 2015-07-19T23:39:18.739+0000 I WRITE [conn49] insert db5.tmp.agg_out.44 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 23973 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 79999, W: 47905 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 164ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.765+0000 m31100| 2015-07-19T23:39:18.764+0000 I WRITE [conn52] insert db5.tmp.agg_out.43 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 19132 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 92396, W: 51172 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 143ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.780+0000 m30999| 2015-07-19T23:39:18.780+0000 I SHARDING [LockPinger] cluster test-configRS/ip-10-139-123-131:29000 pinged successfully at 2015-07-19T23:39:18.779+0000 by distributed lock pinger 'test-configRS/ip-10-139-123-131:29000/ip-10-139-123-131:30999:1437349128:1804289383', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.792+0000 m31100| 2015-07-19T23:39:18.792+0000 I COMMAND [conn58] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_1" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 125199 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 125ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.793+0000 m31100| 2015-07-19T23:39:18.792+0000 I COMMAND [conn57] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_4" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 122889 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 123ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.794+0000 m31100| 2015-07-19T23:39:18.794+0000 I WRITE [conn53] insert db5.tmp.agg_out.45 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 17503 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 76299, W: 54338 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 166ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.826+0000 m31100| 2015-07-19T23:39:18.825+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.826+0000 m31100| 2015-07-19T23:39:18.826+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.827+0000 m31100| 2015-07-19T23:39:18.826+0000 I COMMAND [conn49] command db5.coll5_out_agg_sort_0 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73141911004 }, { host: "ip-10-139-123-131:31200", id: 75142173591 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 23973, W: 84032 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 79999, W: 47905 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 484ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.827+0000 m31200| 2015-07-19T23:39:18.826+0000 I NETWORK [conn77] end connection 10.139.123.131:39624 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.827+0000 m31200| 2015-07-19T23:39:18.827+0000 I NETWORK [conn78] end connection 10.139.123.131:39626 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.827+0000 m31100| 2015-07-19T23:39:18.826+0000 I NETWORK [conn102] end connection 10.139.123.131:47787 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.828+0000 m31100| 2015-07-19T23:39:18.826+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47792 #104 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.828+0000 m31100| 2015-07-19T23:39:18.827+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.828+0000 m31100| 2015-07-19T23:39:18.827+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47793 #105 (56 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.828+0000 m31100| 2015-07-19T23:39:18.827+0000 I NETWORK [conn103] end connection 10.139.123.131:47789 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.828+0000 m31100| 2015-07-19T23:39:18.827+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.829+0000 m31100| 2015-07-19T23:39:18.827+0000 I COMMAND [conn52] command db5.coll5_out_agg_sort_2 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74562640760 }, { host: "ip-10-139-123-131:31200", id: 74056276327 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 22278, W: 56194 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 92396, W: 51172 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 497ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.834+0000 m31100| 2015-07-19T23:39:18.834+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.835+0000 m31100| 2015-07-19T23:39:18.834+0000 I NETWORK [conn101] end connection 10.139.123.131:47786 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.840+0000 m31200| 2015-07-19T23:39:18.839+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39630 #80 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.840+0000 m31200| 2015-07-19T23:39:18.839+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39631 #81 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.847+0000 m31100| 2015-07-19T23:39:18.846+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.847+0000 m31200| 2015-07-19T23:39:18.847+0000 I NETWORK [conn79] end connection 10.139.123.131:39627 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.848+0000 m31100| 2015-07-19T23:39:18.848+0000 I COMMAND [conn53] command db5.coll5_out_agg_sort_3 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74660038337 }, { host: "ip-10-139-123-131:31200", id: 74430100473 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 29662, W: 4181 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 76299, W: 54338 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 502ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:18.993+0000 m31102| 2015-07-19T23:39:18.992+0000 I COMMAND [repl writer worker 14] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.006+0000 m31200| 2015-07-19T23:39:19.006+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.006+0000 m31200| 2015-07-19T23:39:19.006+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.011+0000 m31102| 2015-07-19T23:39:19.011+0000 I COMMAND [repl writer worker 6] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.017+0000 m31102| 2015-07-19T23:39:19.017+0000 I COMMAND [repl writer worker 8] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.026+0000 m31100| 2015-07-19T23:39:19.025+0000 I WRITE [conn58] insert db5.tmp.agg_out.46 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 27410, w: 139 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 125199 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 135ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.047+0000 m31100| 2015-07-19T23:39:19.047+0000 I COMMAND [conn52] command db5.$cmd command: listCollections { listCollections: 1, filter: { name: "coll5_out_agg_sort_2" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:225 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 105694 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 106ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.049+0000 m31100| 2015-07-19T23:39:19.048+0000 I WRITE [conn57] insert db5.tmp.agg_out.47 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 24122, w: 866 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 122889, W: 5214 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 159ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.094+0000 m31200| 2015-07-19T23:39:19.093+0000 I NETWORK [conn80] end connection 10.139.123.131:39630 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.095+0000 m31100| 2015-07-19T23:39:19.093+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.096+0000 m31100| 2015-07-19T23:39:19.093+0000 I NETWORK [conn58] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.096+0000 m31100| 2015-07-19T23:39:19.093+0000 I NETWORK [conn104] end connection 10.139.123.131:47792 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.096+0000 m31100| 2015-07-19T23:39:19.093+0000 I COMMAND [conn58] command db5.coll5_out_agg_sort_1 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73898965840 }, { host: "ip-10-139-123-131:31200", id: 74559542792 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 27410, w: 139, W: 58883 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 125199 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 426ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.097+0000 m31100| 2015-07-19T23:39:19.096+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47796 #106 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.097+0000 m31100| 2015-07-19T23:39:19.097+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47797 #107 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.098+0000 m31200| 2015-07-19T23:39:19.097+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39634 #82 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.098+0000 m31200| 2015-07-19T23:39:19.098+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39635 #83 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.101+0000 m31101| 2015-07-19T23:39:19.101+0000 I COMMAND [repl writer worker 9] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.103+0000 m31100| 2015-07-19T23:39:19.103+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.104+0000 m31100| 2015-07-19T23:39:19.103+0000 I NETWORK [conn105] end connection 10.139.123.131:47793 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.104+0000 m31100| 2015-07-19T23:39:19.104+0000 I NETWORK [conn57] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.104+0000 m31200| 2015-07-19T23:39:19.104+0000 I NETWORK [conn81] end connection 10.139.123.131:39631 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.105+0000 m31100| 2015-07-19T23:39:19.105+0000 I COMMAND [conn57] command db5.coll5_out_agg_sort_4 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74217031058 }, { host: "ip-10-139-123-131:31200", id: 73043499145 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 42048, w: 866, W: 9361 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 122889, W: 5214 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 435ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.128+0000 m31100| 2015-07-19T23:39:19.128+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47800 #108 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.129+0000 m31101| 2015-07-19T23:39:19.129+0000 I COMMAND [repl writer worker 15] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.133+0000 m31101| 2015-07-19T23:39:19.132+0000 I COMMAND [repl writer worker 5] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.136+0000 m31200| 2015-07-19T23:39:19.136+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39637 #84 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.201+0000 m31102| 2015-07-19T23:39:19.201+0000 I COMMAND [repl writer worker 7] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.228+0000 m31102| 2015-07-19T23:39:19.228+0000 I COMMAND [repl writer worker 11] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.307+0000 m30998| 2015-07-19T23:39:19.306+0000 I NETWORK [conn32] end connection 10.139.123.131:35771 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.308+0000 m31101| 2015-07-19T23:39:19.307+0000 I COMMAND [repl writer worker 1] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.312+0000 m30999| 2015-07-19T23:39:19.312+0000 I NETWORK [conn34] end connection 10.139.123.131:57134 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.327+0000 m31101| 2015-07-19T23:39:19.327+0000 I COMMAND [repl writer worker 13] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.372+0000 m31100| 2015-07-19T23:39:19.371+0000 I WRITE [conn49] insert db5.tmp.agg_out.49 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 42847, w: 262 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 92840 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 177ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.417+0000 m31100| 2015-07-19T23:39:19.417+0000 I WRITE [conn52] insert db5.tmp.agg_out.48 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 37725, w: 1131 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 105694, W: 3761 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 199ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.418+0000 m31100| 2015-07-19T23:39:19.417+0000 I WRITE [conn53] insert db5.tmp.agg_out.50 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 35001, w: 556 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 64245, W: 6688 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 256ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.469+0000 m31100| 2015-07-19T23:39:19.465+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.470+0000 m31100| 2015-07-19T23:39:19.465+0000 I NETWORK [conn49] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.470+0000 m31100| 2015-07-19T23:39:19.465+0000 I COMMAND [conn49] command db5.coll5_out_agg_sort_0 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73720429594 }, { host: "ip-10-139-123-131:31200", id: 74349219389 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 42847, w: 262, W: 91165 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 92840 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 511ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.470+0000 m31100| 2015-07-19T23:39:19.468+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:347 reslen:4195944 locks:{ Global: { acquireCount: { r: 8 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 78653 } }, Database: { acquireCount: { r: 4 } }, oplog: { acquireCount: { r: 4 } } } 125ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.471+0000 m31100| 2015-07-19T23:39:19.469+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.471+0000 m31100| 2015-07-19T23:39:19.469+0000 I NETWORK [conn107] end connection 10.139.123.131:47797 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.471+0000 m31100| 2015-07-19T23:39:19.469+0000 I NETWORK [conn108] end connection 10.139.123.131:47800 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.471+0000 m31100| 2015-07-19T23:39:19.469+0000 I NETWORK [conn53] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.471+0000 m31100| 2015-07-19T23:39:19.470+0000 I COMMAND [conn53] command db5.coll5_out_agg_sort_3 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 73272677353 }, { host: "ip-10-139-123-131:31200", id: 74075679731 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 39876, w: 556, W: 44172 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 64245, W: 6688 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 487ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.473+0000 m31200| 2015-07-19T23:39:19.469+0000 I NETWORK [conn83] end connection 10.139.123.131:39635 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.473+0000 m31200| 2015-07-19T23:39:19.469+0000 I NETWORK [conn84] end connection 10.139.123.131:39637 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.484+0000 m31100| 2015-07-19T23:39:19.484+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.485+0000 m31100| 2015-07-19T23:39:19.485+0000 I NETWORK [conn106] end connection 10.139.123.131:47796 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.485+0000 m31100| 2015-07-19T23:39:19.485+0000 I NETWORK [conn52] scoped connection to ip-10-139-123-131:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.486+0000 m31200| 2015-07-19T23:39:19.486+0000 I NETWORK [conn82] end connection 10.139.123.131:39634 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.489+0000 m31100| 2015-07-19T23:39:19.489+0000 I COMMAND [conn52] command db5.coll5_out_agg_sort_2 command: aggregate { aggregate: "coll5", pipeline: [ { $mergeCursors: [ { host: "ip-10-139-123-131:31100", id: 74752008406 }, { host: "ip-10-139-123-131:31200", id: 74809157388 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll5_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:160 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 59998, w: 1131, W: 334 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 105694, W: 3761 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 548ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.517+0000 m31102| 2015-07-19T23:39:19.517+0000 I COMMAND [repl writer worker 7] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.535+0000 m31102| 2015-07-19T23:39:19.535+0000 I COMMAND [repl writer worker 4] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.538+0000 m31102| 2015-07-19T23:39:19.538+0000 I COMMAND [repl writer worker 14] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.598+0000 m30999| 2015-07-19T23:39:19.598+0000 I NETWORK [conn32] end connection 10.139.123.131:57132 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.604+0000 m30998| 2015-07-19T23:39:19.604+0000 I NETWORK [conn33] end connection 10.139.123.131:35772 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.611+0000 m31101| 2015-07-19T23:39:19.611+0000 I COMMAND [repl writer worker 3] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.615+0000 m30999| 2015-07-19T23:39:19.615+0000 I NETWORK [conn33] end connection 10.139.123.131:57133 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.637+0000 m30999| 2015-07-19T23:39:19.637+0000 I COMMAND [conn1] DROP: db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.637+0000 m30999| 2015-07-19T23:39:19.637+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:19.637+0000-55ac3527d2c1f750d1548387", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349159637), what: "dropCollection.start", ns: "db5.coll5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.645+0000 m31101| 2015-07-19T23:39:19.645+0000 I COMMAND [repl writer worker 14] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.647+0000 m31101| 2015-07-19T23:39:19.647+0000 I COMMAND [repl writer worker 8] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.689+0000 m30999| 2015-07-19T23:39:19.689+0000 I SHARDING [conn1] distributed lock 'db5.coll5/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3527d2c1f750d1548388 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.689+0000 m31100| 2015-07-19T23:39:19.689+0000 I COMMAND [conn12] CMD: drop db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.690+0000 m31100| 2015-07-19T23:39:19.689+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 215ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.690+0000 m31100| 2015-07-19T23:39:19.689+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 210ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.690+0000 m31200| 2015-07-19T23:39:19.690+0000 I COMMAND [conn14] CMD: drop db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.691+0000 m31200| 2015-07-19T23:39:19.691+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 682ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.691+0000 m31200| 2015-07-19T23:39:19.691+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 682ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.692+0000 m31102| 2015-07-19T23:39:19.692+0000 I COMMAND [repl writer worker 0] CMD: drop db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.693+0000 m31101| 2015-07-19T23:39:19.692+0000 I COMMAND [repl writer worker 0] CMD: drop db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.694+0000 m31202| 2015-07-19T23:39:19.693+0000 I COMMAND [repl writer worker 4] CMD: drop db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.694+0000 m31201| 2015-07-19T23:39:19.694+0000 I COMMAND [repl writer worker 5] CMD: drop db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.746+0000 m31100| 2015-07-19T23:39:19.746+0000 I SHARDING [conn12] remotely refreshing metadata for db5.coll5 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||55ac351fd2c1f750d1548385, current metadata version is 2|3||55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.747+0000 m31100| 2015-07-19T23:39:19.746+0000 W SHARDING [conn12] no chunks found when reloading db5.coll5, previous version was 0|0||55ac351fd2c1f750d1548385, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.747+0000 m31100| 2015-07-19T23:39:19.746+0000 I SHARDING [conn12] dropping metadata for db5.coll5 at shard version 2|3||55ac351fd2c1f750d1548385, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.747+0000 m31200| 2015-07-19T23:39:19.747+0000 I SHARDING [conn14] remotely refreshing metadata for db5.coll5 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||55ac351fd2c1f750d1548385, current metadata version is 2|5||55ac351fd2c1f750d1548385 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.747+0000 m31200| 2015-07-19T23:39:19.747+0000 W SHARDING [conn14] no chunks found when reloading db5.coll5, previous version was 0|0||55ac351fd2c1f750d1548385, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.748+0000 m31200| 2015-07-19T23:39:19.747+0000 I SHARDING [conn14] dropping metadata for db5.coll5 at shard version 2|5||55ac351fd2c1f750d1548385, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.748+0000 m30999| 2015-07-19T23:39:19.748+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:19.748+0000-55ac3527d2c1f750d1548389", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349159748), what: "dropCollection", ns: "db5.coll5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.799+0000 m30999| 2015-07-19T23:39:19.799+0000 I SHARDING [conn1] distributed lock 'db5.coll5/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.851+0000 m30999| 2015-07-19T23:39:19.851+0000 I COMMAND [conn1] DROP: db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.852+0000 m30999| 2015-07-19T23:39:19.851+0000 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.852+0000 m31100| 2015-07-19T23:39:19.851+0000 I COMMAND [conn52] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.852+0000 m31100| 2015-07-19T23:39:19.852+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 157ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.853+0000 m31100| 2015-07-19T23:39:19.852+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 157ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.853+0000 m30999| 2015-07-19T23:39:19.852+0000 I COMMAND [conn1] DROP: db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.853+0000 m30999| 2015-07-19T23:39:19.852+0000 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.853+0000 m31100| 2015-07-19T23:39:19.852+0000 I COMMAND [conn52] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.853+0000 m30999| 2015-07-19T23:39:19.853+0000 I COMMAND [conn1] DROP: db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.854+0000 m30999| 2015-07-19T23:39:19.853+0000 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.854+0000 m31100| 2015-07-19T23:39:19.853+0000 I COMMAND [conn52] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.858+0000 m31102| 2015-07-19T23:39:19.854+0000 I COMMAND [repl writer worker 5] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.858+0000 m31101| 2015-07-19T23:39:19.854+0000 I COMMAND [repl writer worker 10] CMD: drop db5.coll5_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.860+0000 m30999| 2015-07-19T23:39:19.855+0000 I COMMAND [conn1] DROP: db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.860+0000 m30999| 2015-07-19T23:39:19.855+0000 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.860+0000 m31100| 2015-07-19T23:39:19.855+0000 I COMMAND [conn52] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.860+0000 m31102| 2015-07-19T23:39:19.855+0000 I COMMAND [repl writer worker 8] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.860+0000 m31101| 2015-07-19T23:39:19.856+0000 I COMMAND [repl writer worker 15] CMD: drop db5.coll5_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.860+0000 m30999| 2015-07-19T23:39:19.856+0000 I COMMAND [conn1] DROP: db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.861+0000 m30999| 2015-07-19T23:39:19.856+0000 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.861+0000 m31102| 2015-07-19T23:39:19.856+0000 I COMMAND [repl writer worker 1] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.861+0000 m31100| 2015-07-19T23:39:19.857+0000 I COMMAND [conn52] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.861+0000 m31101| 2015-07-19T23:39:19.857+0000 I COMMAND [repl writer worker 12] CMD: drop db5.coll5_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.861+0000 m31102| 2015-07-19T23:39:19.857+0000 I COMMAND [repl writer worker 9] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.861+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.861+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.862+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.862+0000 jstests/concurrency/fsm_workloads/agg_sort.js: Workload completed in 6507 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.862+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.862+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.862+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.862+0000 m30999| 2015-07-19T23:39:19.858+0000 I COMMAND [conn1] DROP: db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.862+0000 m30999| 2015-07-19T23:39:19.858+0000 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.862+0000 m31100| 2015-07-19T23:39:19.858+0000 I COMMAND [conn52] CMD: drop db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.863+0000 m31101| 2015-07-19T23:39:19.858+0000 I COMMAND [repl writer worker 6] CMD: drop db5.coll5_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.863+0000 m31102| 2015-07-19T23:39:19.859+0000 I COMMAND [repl writer worker 2] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.863+0000 m31101| 2015-07-19T23:39:19.859+0000 I COMMAND [repl writer worker 2] CMD: drop db5.coll5_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.863+0000 m30999| 2015-07-19T23:39:19.859+0000 I COMMAND [conn1] DROP DATABASE: db5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.863+0000 m30999| 2015-07-19T23:39:19.859+0000 I SHARDING [conn1] DBConfig::dropDatabase: db5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.863+0000 m30999| 2015-07-19T23:39:19.859+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:19.859+0000-55ac3527d2c1f750d154838a", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349159859), what: "dropDatabase.start", ns: "db5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.961+0000 m30999| 2015-07-19T23:39:19.961+0000 I SHARDING [conn1] DBConfig::dropDatabase: db5 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.962+0000 m31100| 2015-07-19T23:39:19.961+0000 I COMMAND [conn27] dropDatabase db5 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.962+0000 m31100| 2015-07-19T23:39:19.961+0000 I COMMAND [conn27] dropDatabase db5 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.963+0000 m30999| 2015-07-19T23:39:19.962+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:19.962+0000-55ac3527d2c1f750d154838b", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349159962), what: "dropDatabase", ns: "db5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.963+0000 m31102| 2015-07-19T23:39:19.962+0000 I COMMAND [repl writer worker 10] dropDatabase db5 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.963+0000 m31102| 2015-07-19T23:39:19.962+0000 I COMMAND [repl writer worker 10] dropDatabase db5 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.963+0000 m31101| 2015-07-19T23:39:19.962+0000 I COMMAND [repl writer worker 1] dropDatabase db5 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:19.963+0000 m31101| 2015-07-19T23:39:19.962+0000 I COMMAND [repl writer worker 1] dropDatabase db5 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.009+0000 m30998| 2015-07-19T23:39:20.009+0000 I SHARDING [LockPinger] cluster test-configRS/ip-10-139-123-131:29000 pinged successfully at 2015-07-19T23:39:20.005+0000 by distributed lock pinger 'test-configRS/ip-10-139-123-131:29000/ip-10-139-123-131:30998:1437349129:1804289383', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.019+0000 m31100| 2015-07-19T23:39:20.018+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.022+0000 m31101| 2015-07-19T23:39:20.021+0000 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.022+0000 m31102| 2015-07-19T23:39:20.022+0000 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.023+0000 m31200| 2015-07-19T23:39:20.023+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 327ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.024+0000 m31200| 2015-07-19T23:39:20.023+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 327ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.029+0000 m31200| 2015-07-19T23:39:20.029+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.030+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.030+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.030+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.030+0000 jstests/concurrency/fsm_workloads/list_indexes.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.030+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.030+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.031+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.032+0000 m31201| 2015-07-19T23:39:20.032+0000 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.032+0000 m31202| 2015-07-19T23:39:20.032+0000 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.034+0000 m30999| 2015-07-19T23:39:20.033+0000 I SHARDING [conn1] distributed lock 'db6/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3528d2c1f750d154838c [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.035+0000 m30999| 2015-07-19T23:39:20.035+0000 I SHARDING [conn1] Placing [db6] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.035+0000 m30999| 2015-07-19T23:39:20.035+0000 I SHARDING [conn1] Enabling sharding for database [db6] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.086+0000 m30999| 2015-07-19T23:39:20.086+0000 I SHARDING [conn1] distributed lock 'db6/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.092+0000 m31200| 2015-07-19T23:39:20.092+0000 I INDEX [conn28] build index on: db6.coll6 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.092+0000 m31200| 2015-07-19T23:39:20.092+0000 I INDEX [conn28] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.093+0000 m31200| 2015-07-19T23:39:20.093+0000 I INDEX [conn28] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.094+0000 m30999| 2015-07-19T23:39:20.094+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db6.coll6", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.095+0000 m30999| 2015-07-19T23:39:20.095+0000 I SHARDING [conn1] distributed lock 'db6.coll6/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3528d2c1f750d154838d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.095+0000 m30999| 2015-07-19T23:39:20.095+0000 I SHARDING [conn1] enable sharding on: db6.coll6 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.096+0000 m30999| 2015-07-19T23:39:20.095+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:20.095+0000-55ac3528d2c1f750d154838e", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349160095), what: "shardCollection.start", ns: "db6.coll6", details: { shardKey: { _id: "hashed" }, collection: "db6.coll6", primary: "test-rs1:test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.102+0000 m31201| 2015-07-19T23:39:20.102+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.103+0000 m31201| 2015-07-19T23:39:20.102+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.103+0000 m31202| 2015-07-19T23:39:20.103+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.103+0000 m31202| 2015-07-19T23:39:20.103+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.104+0000 m31201| 2015-07-19T23:39:20.103+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.104+0000 m31202| 2015-07-19T23:39:20.104+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.146+0000 m30999| 2015-07-19T23:39:20.146+0000 I SHARDING [conn1] going to create 2 chunk(s) for: db6.coll6 using new epoch 55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.248+0000 m30999| 2015-07-19T23:39:20.248+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db6.coll6: 0ms sequenceNumber: 30 version: 1|1||55ac3528d2c1f750d154838f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.301+0000 m30999| 2015-07-19T23:39:20.300+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db6.coll6: 0ms sequenceNumber: 31 version: 1|1||55ac3528d2c1f750d154838f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.301+0000 m31200| 2015-07-19T23:39:20.301+0000 I SHARDING [conn23] remotely refreshing metadata for db6.coll6 with requested shard version 1|1||55ac3528d2c1f750d154838f, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.301+0000 m31200| 2015-07-19T23:39:20.301+0000 I SHARDING [conn23] collection db6.coll6 was previously unsharded, new metadata loaded with shard version 1|1||55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.302+0000 m31200| 2015-07-19T23:39:20.301+0000 I SHARDING [conn23] collection version was loaded at version 1|1||55ac3528d2c1f750d154838f, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.302+0000 m30999| 2015-07-19T23:39:20.301+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:20.301+0000-55ac3528d2c1f750d1548390", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349160301), what: "shardCollection", ns: "db6.coll6", details: { version: "1|1||55ac3528d2c1f750d154838f" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.353+0000 m30999| 2015-07-19T23:39:20.352+0000 I SHARDING [conn1] distributed lock 'db6.coll6/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.353+0000 m30999| 2015-07-19T23:39:20.353+0000 I SHARDING [conn1] moving chunk ns: db6.coll6 moving ( ns: db6.coll6, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.353+0000 m31200| 2015-07-19T23:39:20.353+0000 I SHARDING [conn18] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.354+0000 m31200| 2015-07-19T23:39:20.353+0000 I NETWORK [conn18] starting new replica set monitor for replica set test-rs1 with seeds [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.354+0000 m31200| 2015-07-19T23:39:20.353+0000 I NETWORK [conn18] ip-10-139-123-131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.354+0000 m31200| 2015-07-19T23:39:20.353+0000 I NETWORK [conn18] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.354+0000 m31200| 2015-07-19T23:39:20.353+0000 I NETWORK [conn18] ip-10-139-123-131:31201 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.354+0000 m31200| 2015-07-19T23:39:20.353+0000 I NETWORK [conn18] , [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.354+0000 m31200| 2015-07-19T23:39:20.353+0000 I NETWORK [conn18] ip-10-139-123-131:31202 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.355+0000 m31200| 2015-07-19T23:39:20.353+0000 I SHARDING [conn18] received moveChunk request: { moveChunk: "db6.coll6", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", to: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac3528d2c1f750d154838f') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.355+0000 m31200| 2015-07-19T23:39:20.355+0000 I SHARDING [conn18] distributed lock 'db6.coll6/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3528d9a63f6196b1725d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.355+0000 m31200| 2015-07-19T23:39:20.355+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:20.355+0000-55ac3528d9a63f6196b1725e", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349160355), what: "moveChunk.start", ns: "db6.coll6", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.406+0000 m31200| 2015-07-19T23:39:20.405+0000 I SHARDING [conn18] remotely refreshing metadata for db6.coll6 based on current shard version 1|1||55ac3528d2c1f750d154838f, current metadata version is 1|1||55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.406+0000 m31200| 2015-07-19T23:39:20.406+0000 I SHARDING [conn18] metadata of collection db6.coll6 already up to date (shard version : 1|1||55ac3528d2c1f750d154838f, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.406+0000 m31200| 2015-07-19T23:39:20.406+0000 I SHARDING [conn18] moveChunk request accepted at version 1|1||55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.407+0000 m31200| 2015-07-19T23:39:20.406+0000 I SHARDING [conn18] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.407+0000 m31100| 2015-07-19T23:39:20.407+0000 I SHARDING [conn19] remotely refreshing metadata for db6.coll6, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.407+0000 m31100| 2015-07-19T23:39:20.407+0000 I SHARDING [conn19] collection db6.coll6 was previously unsharded, new metadata loaded with shard version 0|0||55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.407+0000 m31100| 2015-07-19T23:39:20.407+0000 I SHARDING [conn19] collection version was loaded at version 1|1||55ac3528d2c1f750d154838f, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.408+0000 m31100| 2015-07-19T23:39:20.407+0000 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db6.coll6 from test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 at epoch 55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.409+0000 m31200| 2015-07-19T23:39:20.409+0000 I SHARDING [conn18] moveChunk data transfer progress: { active: true, ns: "db6.coll6", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.410+0000 m31100| 2015-07-19T23:39:20.409+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 385ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.410+0000 m31100| 2015-07-19T23:39:20.409+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 386ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.412+0000 m31200| 2015-07-19T23:39:20.411+0000 I SHARDING [conn18] moveChunk data transfer progress: { active: true, ns: "db6.coll6", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.412+0000 m31100| 2015-07-19T23:39:20.412+0000 I INDEX [migrateThread] build index on: db6.coll6 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.413+0000 m31100| 2015-07-19T23:39:20.412+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.415+0000 m31200| 2015-07-19T23:39:20.415+0000 I SHARDING [conn18] moveChunk data transfer progress: { active: true, ns: "db6.coll6", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.416+0000 m31100| 2015-07-19T23:39:20.416+0000 I INDEX [migrateThread] build index on: db6.coll6 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.416+0000 m31100| 2015-07-19T23:39:20.416+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.418+0000 m31100| 2015-07-19T23:39:20.418+0000 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.419+0000 m31100| 2015-07-19T23:39:20.419+0000 I SHARDING [migrateThread] Deleter starting delete for: db6.coll6 from { _id: MinKey } -> { _id: 0 }, with opId: 27185 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.419+0000 m31100| 2015-07-19T23:39:20.419+0000 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db6.coll6 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.423+0000 m31101| 2015-07-19T23:39:20.423+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.423+0000 m31101| 2015-07-19T23:39:20.423+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.423+0000 m31102| 2015-07-19T23:39:20.423+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.424+0000 m31102| 2015-07-19T23:39:20.423+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.424+0000 m31200| 2015-07-19T23:39:20.423+0000 I SHARDING [conn18] moveChunk data transfer progress: { active: true, ns: "db6.coll6", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.424+0000 m31101| 2015-07-19T23:39:20.424+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.425+0000 m31102| 2015-07-19T23:39:20.425+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.425+0000 m31100| 2015-07-19T23:39:20.425+0000 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.425+0000 m31100| 2015-07-19T23:39:20.425+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db6.coll6' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.440+0000 m31200| 2015-07-19T23:39:20.440+0000 I SHARDING [conn18] moveChunk data transfer progress: { active: true, ns: "db6.coll6", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.440+0000 m31200| 2015-07-19T23:39:20.440+0000 I SHARDING [conn18] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.440+0000 m31200| 2015-07-19T23:39:20.440+0000 I SHARDING [conn18] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.440+0000 m31200| 2015-07-19T23:39:20.440+0000 I SHARDING [conn18] moveChunk setting version to: 2|0||55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.441+0000 m31100| 2015-07-19T23:39:20.441+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47802 #109 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.445+0000 m31100| 2015-07-19T23:39:20.445+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db6.coll6' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.446+0000 m31100| 2015-07-19T23:39:20.445+0000 I SHARDING [migrateThread] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:20.445+0000-55ac352868c42881b59cba37", server: "ip-10-139-123-131", clientAddr: "", time: new Date(1437349160445), what: "moveChunk.to", ns: "db6.coll6", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 11, step 2 of 5: 5, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 20, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.497+0000 m31200| 2015-07-19T23:39:20.496+0000 I SHARDING [conn18] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db6.coll6", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.497+0000 m31200| 2015-07-19T23:39:20.496+0000 I SHARDING [conn18] moveChunk updating self version to: 2|1||55ac3528d2c1f750d154838f through { _id: 0 } -> { _id: MaxKey } for collection 'db6.coll6' [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.497+0000 m31200| 2015-07-19T23:39:20.497+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:20.497+0000-55ac3528d9a63f6196b1725f", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349160497), what: "moveChunk.commit", ns: "db6.coll6", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.548+0000 m31200| 2015-07-19T23:39:20.548+0000 I SHARDING [conn18] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.549+0000 m31200| 2015-07-19T23:39:20.548+0000 I SHARDING [conn18] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.549+0000 m31200| 2015-07-19T23:39:20.548+0000 I SHARDING [conn18] Deleter starting delete for: db6.coll6 from { _id: MinKey } -> { _id: 0 }, with opId: 23958 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.549+0000 m31200| 2015-07-19T23:39:20.548+0000 I SHARDING [conn18] rangeDeleter deleted 0 documents for db6.coll6 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.549+0000 m31200| 2015-07-19T23:39:20.548+0000 I SHARDING [conn18] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.549+0000 m31200| 2015-07-19T23:39:20.549+0000 I SHARDING [conn18] distributed lock 'db6.coll6/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.549+0000 m31200| 2015-07-19T23:39:20.549+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:20.549+0000-55ac3528d9a63f6196b17260", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349160549), what: "moveChunk.from", ns: "db6.coll6", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 52, step 3 of 6: 1, step 4 of 6: 32, step 5 of 6: 108, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.600+0000 m31200| 2015-07-19T23:39:20.600+0000 I COMMAND [conn18] command db6.coll6 command: moveChunk { moveChunk: "db6.coll6", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", to: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac3528d2c1f750d154838f') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 246ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.601+0000 m30999| 2015-07-19T23:39:20.600+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db6.coll6: 0ms sequenceNumber: 32 version: 2|1||55ac3528d2c1f750d154838f based on: 1|1||55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.601+0000 m31100| 2015-07-19T23:39:20.601+0000 I SHARDING [conn15] received splitChunk request: { splitChunk: "db6.coll6", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3528d2c1f750d154838f') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.602+0000 m31100| 2015-07-19T23:39:20.602+0000 I SHARDING [conn15] distributed lock 'db6.coll6/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac352868c42881b59cba38 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.602+0000 m31100| 2015-07-19T23:39:20.602+0000 I SHARDING [conn15] remotely refreshing metadata for db6.coll6 based on current shard version 0|0||55ac3528d2c1f750d154838f, current metadata version is 1|1||55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.602+0000 m31100| 2015-07-19T23:39:20.602+0000 I SHARDING [conn15] updating metadata for db6.coll6 from shard version 0|0||55ac3528d2c1f750d154838f to shard version 2|0||55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.603+0000 m31100| 2015-07-19T23:39:20.602+0000 I SHARDING [conn15] collection version was loaded at version 2|1||55ac3528d2c1f750d154838f, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.603+0000 m31100| 2015-07-19T23:39:20.602+0000 I SHARDING [conn15] splitChunk accepted at version 2|0||55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.603+0000 m31100| 2015-07-19T23:39:20.603+0000 I SHARDING [conn15] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:20.603+0000-55ac352868c42881b59cba39", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47537", time: new Date(1437349160603), what: "split", ns: "db6.coll6", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('55ac3528d2c1f750d154838f') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('55ac3528d2c1f750d154838f') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.655+0000 m31100| 2015-07-19T23:39:20.654+0000 I SHARDING [conn15] distributed lock 'db6.coll6/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.655+0000 m30999| 2015-07-19T23:39:20.655+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db6.coll6: 0ms sequenceNumber: 33 version: 2|3||55ac3528d2c1f750d154838f based on: 2|1||55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.655+0000 m31200| 2015-07-19T23:39:20.655+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db6.coll6", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3528d2c1f750d154838f') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.656+0000 m31200| 2015-07-19T23:39:20.656+0000 I SHARDING [conn18] distributed lock 'db6.coll6/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3528d9a63f6196b17261 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.657+0000 m31200| 2015-07-19T23:39:20.656+0000 I SHARDING [conn18] remotely refreshing metadata for db6.coll6 based on current shard version 2|0||55ac3528d2c1f750d154838f, current metadata version is 2|0||55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.657+0000 m31200| 2015-07-19T23:39:20.657+0000 I SHARDING [conn18] updating metadata for db6.coll6 from shard version 2|0||55ac3528d2c1f750d154838f to shard version 2|1||55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.657+0000 m31200| 2015-07-19T23:39:20.657+0000 I SHARDING [conn18] collection version was loaded at version 2|3||55ac3528d2c1f750d154838f, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.657+0000 m31200| 2015-07-19T23:39:20.657+0000 I SHARDING [conn18] splitChunk accepted at version 2|1||55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.658+0000 m31200| 2015-07-19T23:39:20.657+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:20.657+0000-55ac3528d9a63f6196b17262", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349160657), what: "split", ns: "db6.coll6", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('55ac3528d2c1f750d154838f') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('55ac3528d2c1f750d154838f') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.709+0000 m31200| 2015-07-19T23:39:20.709+0000 I SHARDING [conn18] distributed lock 'db6.coll6/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.709+0000 m30999| 2015-07-19T23:39:20.709+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db6.coll6: 0ms sequenceNumber: 34 version: 2|5||55ac3528d2c1f750d154838f based on: 2|3||55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.712+0000 m31100| 2015-07-19T23:39:20.712+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.713+0000 m31100| 2015-07-19T23:39:20.712+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.713+0000 m31200| 2015-07-19T23:39:20.712+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.713+0000 m31200| 2015-07-19T23:39:20.712+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.713+0000 m31100| 2015-07-19T23:39:20.713+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.719+0000 m31200| 2015-07-19T23:39:20.713+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.719+0000 m31100| 2015-07-19T23:39:20.713+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 290ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.724+0000 m31100| 2015-07-19T23:39:20.714+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:159 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 290ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.724+0000 m31200| 2015-07-19T23:39:20.713+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:159 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 617ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.724+0000 m31200| 2015-07-19T23:39:20.714+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:159 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 617ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.724+0000 m31202| 2015-07-19T23:39:20.716+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.726+0000 m31202| 2015-07-19T23:39:20.716+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.726+0000 m31100| 2015-07-19T23:39:20.716+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.726+0000 m31100| 2015-07-19T23:39:20.716+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.727+0000 m31201| 2015-07-19T23:39:20.716+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.727+0000 m31201| 2015-07-19T23:39:20.716+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.727+0000 m31101| 2015-07-19T23:39:20.716+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.727+0000 m31101| 2015-07-19T23:39:20.716+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.727+0000 m31202| 2015-07-19T23:39:20.717+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.727+0000 m31200| 2015-07-19T23:39:20.717+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.728+0000 m31200| 2015-07-19T23:39:20.717+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.728+0000 m31101| 2015-07-19T23:39:20.719+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.728+0000 m31200| 2015-07-19T23:39:20.719+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.728+0000 m31102| 2015-07-19T23:39:20.720+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.728+0000 m31102| 2015-07-19T23:39:20.720+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.728+0000 m31100| 2015-07-19T23:39:20.720+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.728+0000 m31201| 2015-07-19T23:39:20.720+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.728+0000 m31202| 2015-07-19T23:39:20.721+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.728+0000 m31202| 2015-07-19T23:39:20.721+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.729+0000 m31102| 2015-07-19T23:39:20.722+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.729+0000 m31202| 2015-07-19T23:39:20.724+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.729+0000 m31201| 2015-07-19T23:39:20.725+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.729+0000 m31201| 2015-07-19T23:39:20.725+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.729+0000 m31101| 2015-07-19T23:39:20.725+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.729+0000 m31101| 2015-07-19T23:39:20.725+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.729+0000 m31200| 2015-07-19T23:39:20.725+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.730+0000 m31200| 2015-07-19T23:39:20.725+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.730+0000 m31100| 2015-07-19T23:39:20.725+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.730+0000 m31100| 2015-07-19T23:39:20.725+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.730+0000 m31102| 2015-07-19T23:39:20.726+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.730+0000 m31102| 2015-07-19T23:39:20.726+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.730+0000 m31200| 2015-07-19T23:39:20.729+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.731+0000 m31102| 2015-07-19T23:39:20.729+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.731+0000 m31101| 2015-07-19T23:39:20.730+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.731+0000 m31201| 2015-07-19T23:39:20.730+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.731+0000 m31100| 2015-07-19T23:39:20.730+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.732+0000 m31202| 2015-07-19T23:39:20.732+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.733+0000 m31202| 2015-07-19T23:39:20.732+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.733+0000 m31200| 2015-07-19T23:39:20.733+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.733+0000 m31200| 2015-07-19T23:39:20.733+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.736+0000 m31201| 2015-07-19T23:39:20.734+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.736+0000 m31201| 2015-07-19T23:39:20.734+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.736+0000 m31200| 2015-07-19T23:39:20.734+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.736+0000 m31202| 2015-07-19T23:39:20.734+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.737+0000 m31100| 2015-07-19T23:39:20.734+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.739+0000 m31100| 2015-07-19T23:39:20.734+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.739+0000 m31102| 2015-07-19T23:39:20.735+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.744+0000 m31102| 2015-07-19T23:39:20.735+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.744+0000 m31101| 2015-07-19T23:39:20.736+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.745+0000 m31101| 2015-07-19T23:39:20.736+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.745+0000 m31101| 2015-07-19T23:39:20.737+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.745+0000 m31201| 2015-07-19T23:39:20.737+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.745+0000 m31100| 2015-07-19T23:39:20.738+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.746+0000 m31202| 2015-07-19T23:39:20.738+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.746+0000 m31202| 2015-07-19T23:39:20.738+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.746+0000 m31102| 2015-07-19T23:39:20.739+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.746+0000 m31201| 2015-07-19T23:39:20.740+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.746+0000 m31201| 2015-07-19T23:39:20.740+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.746+0000 m31101| 2015-07-19T23:39:20.740+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.747+0000 m31101| 2015-07-19T23:39:20.740+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.747+0000 m31100| 2015-07-19T23:39:20.740+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.751+0000 m31100| 2015-07-19T23:39:20.740+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.751+0000 m31202| 2015-07-19T23:39:20.741+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.753+0000 m31200| 2015-07-19T23:39:20.741+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.753+0000 m31200| 2015-07-19T23:39:20.741+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.754+0000 m31201| 2015-07-19T23:39:20.741+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.754+0000 m31101| 2015-07-19T23:39:20.742+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.754+0000 m31102| 2015-07-19T23:39:20.742+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.754+0000 m31102| 2015-07-19T23:39:20.742+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.754+0000 m31200| 2015-07-19T23:39:20.742+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.755+0000 m31100| 2015-07-19T23:39:20.742+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.755+0000 m31102| 2015-07-19T23:39:20.743+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.755+0000 m31201| 2015-07-19T23:39:20.746+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.755+0000 m31201| 2015-07-19T23:39:20.746+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.756+0000 m31201| 2015-07-19T23:39:20.748+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.756+0000 m31200| 2015-07-19T23:39:20.747+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.756+0000 m31200| 2015-07-19T23:39:20.747+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.756+0000 m31202| 2015-07-19T23:39:20.748+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.756+0000 m31202| 2015-07-19T23:39:20.748+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.756+0000 m31102| 2015-07-19T23:39:20.748+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.757+0000 m31102| 2015-07-19T23:39:20.748+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.757+0000 m31101| 2015-07-19T23:39:20.748+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.757+0000 m31101| 2015-07-19T23:39:20.749+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.757+0000 m31100| 2015-07-19T23:39:20.749+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.757+0000 m31100| 2015-07-19T23:39:20.749+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.757+0000 m31200| 2015-07-19T23:39:20.750+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.758+0000 m31101| 2015-07-19T23:39:20.750+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.758+0000 m31202| 2015-07-19T23:39:20.751+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.758+0000 m31100| 2015-07-19T23:39:20.752+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.758+0000 m31102| 2015-07-19T23:39:20.752+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.758+0000 m31201| 2015-07-19T23:39:20.752+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.758+0000 m31201| 2015-07-19T23:39:20.752+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.758+0000 m31202| 2015-07-19T23:39:20.753+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.759+0000 m31202| 2015-07-19T23:39:20.753+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.759+0000 m31201| 2015-07-19T23:39:20.753+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.760+0000 m31200| 2015-07-19T23:39:20.759+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.760+0000 m31200| 2015-07-19T23:39:20.759+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.761+0000 m31202| 2015-07-19T23:39:20.760+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.761+0000 m31102| 2015-07-19T23:39:20.760+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.764+0000 m31102| 2015-07-19T23:39:20.760+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.764+0000 m31101| 2015-07-19T23:39:20.760+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.764+0000 m31101| 2015-07-19T23:39:20.760+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.764+0000 m31100| 2015-07-19T23:39:20.760+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.764+0000 m31100| 2015-07-19T23:39:20.760+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.764+0000 m31200| 2015-07-19T23:39:20.761+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.765+0000 m31100| 2015-07-19T23:39:20.762+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.765+0000 m31102| 2015-07-19T23:39:20.762+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.765+0000 m31101| 2015-07-19T23:39:20.763+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.767+0000 m31201| 2015-07-19T23:39:20.765+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.767+0000 m31201| 2015-07-19T23:39:20.765+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.768+0000 m31200| 2015-07-19T23:39:20.766+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.768+0000 m31200| 2015-07-19T23:39:20.766+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.768+0000 m31102| 2015-07-19T23:39:20.766+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.768+0000 m31102| 2015-07-19T23:39:20.766+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.768+0000 m31100| 2015-07-19T23:39:20.766+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.768+0000 m31100| 2015-07-19T23:39:20.766+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.769+0000 m31101| 2015-07-19T23:39:20.766+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.769+0000 m31101| 2015-07-19T23:39:20.766+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.769+0000 m31202| 2015-07-19T23:39:20.766+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.769+0000 m31202| 2015-07-19T23:39:20.766+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.770+0000 m31201| 2015-07-19T23:39:20.767+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.770+0000 m31200| 2015-07-19T23:39:20.769+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.774+0000 m31202| 2015-07-19T23:39:20.772+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.776+0000 m31102| 2015-07-19T23:39:20.772+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.779+0000 m31101| 2015-07-19T23:39:20.772+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.779+0000 m31100| 2015-07-19T23:39:20.772+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.779+0000 m31201| 2015-07-19T23:39:20.773+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.780+0000 m31201| 2015-07-19T23:39:20.773+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.780+0000 m31100| 2015-07-19T23:39:20.774+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.780+0000 m31100| 2015-07-19T23:39:20.774+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.780+0000 m31200| 2015-07-19T23:39:20.774+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.781+0000 m31200| 2015-07-19T23:39:20.775+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.784+0000 m31201| 2015-07-19T23:39:20.775+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.784+0000 m31102| 2015-07-19T23:39:20.776+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.784+0000 m31102| 2015-07-19T23:39:20.776+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.785+0000 m31202| 2015-07-19T23:39:20.776+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.785+0000 m31202| 2015-07-19T23:39:20.776+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.785+0000 m31101| 2015-07-19T23:39:20.776+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.785+0000 m31101| 2015-07-19T23:39:20.776+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.786+0000 m31200| 2015-07-19T23:39:20.777+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.788+0000 m31100| 2015-07-19T23:39:20.777+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.788+0000 m31202| 2015-07-19T23:39:20.777+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.788+0000 m31101| 2015-07-19T23:39:20.778+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.788+0000 m31102| 2015-07-19T23:39:20.778+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.789+0000 m31100| 2015-07-19T23:39:20.781+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.789+0000 m31100| 2015-07-19T23:39:20.781+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.790+0000 m31201| 2015-07-19T23:39:20.781+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.790+0000 m31201| 2015-07-19T23:39:20.781+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.790+0000 m31202| 2015-07-19T23:39:20.782+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.791+0000 m31202| 2015-07-19T23:39:20.782+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.791+0000 m31101| 2015-07-19T23:39:20.782+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.792+0000 m31101| 2015-07-19T23:39:20.782+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.792+0000 m31102| 2015-07-19T23:39:20.782+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.792+0000 m31102| 2015-07-19T23:39:20.782+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.792+0000 m31100| 2015-07-19T23:39:20.783+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.792+0000 m31200| 2015-07-19T23:39:20.784+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.793+0000 m31200| 2015-07-19T23:39:20.784+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.793+0000 m31102| 2015-07-19T23:39:20.784+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.793+0000 m31201| 2015-07-19T23:39:20.784+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.793+0000 m31202| 2015-07-19T23:39:20.784+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.793+0000 m31200| 2015-07-19T23:39:20.786+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.793+0000 m31101| 2015-07-19T23:39:20.786+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.794+0000 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.794+0000 m31202| 2015-07-19T23:39:20.788+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.794+0000 m31202| 2015-07-19T23:39:20.788+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.794+0000 m31201| 2015-07-19T23:39:20.788+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.794+0000 m31201| 2015-07-19T23:39:20.788+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.795+0000 m31102| 2015-07-19T23:39:20.789+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.795+0000 m31102| 2015-07-19T23:39:20.789+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.795+0000 m31101| 2015-07-19T23:39:20.789+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.795+0000 m31101| 2015-07-19T23:39:20.789+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.795+0000 m31201| 2015-07-19T23:39:20.790+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.795+0000 m31202| 2015-07-19T23:39:20.790+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.795+0000 m31102| 2015-07-19T23:39:20.791+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.795+0000 m31101| 2015-07-19T23:39:20.792+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.850+0000 m31100| 2015-07-19T23:39:20.850+0000 I SHARDING [LockPinger] cluster test-configRS/ip-10-139-123-131:29000 pinged successfully at 2015-07-19T23:39:20.848+0000 by distributed lock pinger 'test-configRS/ip-10-139-123-131:29000/ip-10-139-123-131:31100:1437349130:1993228155', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.897+0000 m30999| 2015-07-19T23:39:20.897+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57237 #35 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.908+0000 m31201| 2015-07-19T23:39:20.908+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:36005 #10 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.910+0000 m31200| 2015-07-19T23:39:20.910+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39641 #85 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.911+0000 m31202| 2015-07-19T23:39:20.911+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:42041 #10 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.935+0000 m30998| 2015-07-19T23:39:20.935+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35882 #34 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.948+0000 m30998| 2015-07-19T23:39:20.948+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35883 #35 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.954+0000 m30998| 2015-07-19T23:39:20.954+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35884 #36 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.954+0000 m30999| 2015-07-19T23:39:20.954+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57244 #36 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.960+0000 m30999| 2015-07-19T23:39:20.960+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57245 #37 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.961+0000 m30999| 2015-07-19T23:39:20.961+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57246 #38 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.962+0000 m30999| 2015-07-19T23:39:20.962+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57247 #39 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.962+0000 m30998| 2015-07-19T23:39:20.962+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35889 #37 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.966+0000 m30998| 2015-07-19T23:39:20.966+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35890 #38 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.970+0000 setting random seed: 7063393197022 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.970+0000 setting random seed: 866164802573 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.970+0000 setting random seed: 2569506657309 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.971+0000 setting random seed: 4891905905678 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.971+0000 setting random seed: 1314165648072 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.971+0000 setting random seed: 7388652041554 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.972+0000 setting random seed: 3499959306791 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.973+0000 m31100| 2015-07-19T23:39:20.972+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.973+0000 setting random seed: 3859138009138 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.973+0000 m31100| 2015-07-19T23:39:20.972+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 186ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.973+0000 m31200| 2015-07-19T23:39:20.972+0000 I COMMAND [conn18] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.973+0000 m31100| 2015-07-19T23:39:20.972+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 183ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.974+0000 m31200| 2015-07-19T23:39:20.972+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:141 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 184ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.974+0000 m31200| 2015-07-19T23:39:20.973+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:141 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 184ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.974+0000 m31201| 2015-07-19T23:39:20.973+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.974+0000 m31100| 2015-07-19T23:39:20.973+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.974+0000 m31200| 2015-07-19T23:39:20.974+0000 I COMMAND [conn18] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.974+0000 setting random seed: 8073308756574 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.975+0000 m31202| 2015-07-19T23:39:20.975+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.978+0000 m31201| 2015-07-19T23:39:20.975+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.978+0000 m31102| 2015-07-19T23:39:20.975+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.978+0000 m31101| 2015-07-19T23:39:20.976+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.979+0000 m31102| 2015-07-19T23:39:20.976+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.979+0000 m31202| 2015-07-19T23:39:20.976+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.979+0000 m31101| 2015-07-19T23:39:20.977+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.979+0000 m31100| 2015-07-19T23:39:20.978+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.979+0000 setting random seed: 8104124134406 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.981+0000 m31200| 2015-07-19T23:39:20.979+0000 I COMMAND [conn18] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.982+0000 m31201| 2015-07-19T23:39:20.980+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.982+0000 m31202| 2015-07-19T23:39:20.980+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.982+0000 m31102| 2015-07-19T23:39:20.981+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.982+0000 m31101| 2015-07-19T23:39:20.981+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.982+0000 m31200| 2015-07-19T23:39:20.982+0000 I COMMAND [conn18] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.985+0000 m31202| 2015-07-19T23:39:20.982+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.986+0000 m31100| 2015-07-19T23:39:20.982+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.986+0000 m31201| 2015-07-19T23:39:20.983+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.986+0000 m31102| 2015-07-19T23:39:20.983+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.986+0000 m31101| 2015-07-19T23:39:20.983+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.986+0000 m30998| 2015-07-19T23:39:20.984+0000 I SHARDING [conn38] ChunkManager: time to load chunks for db6.coll6: 12ms sequenceNumber: 9 version: 2|5||55ac3528d2c1f750d154838f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.986+0000 m31100| 2015-07-19T23:39:20.985+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47816 #110 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.987+0000 m31100| 2015-07-19T23:39:20.986+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47817 #111 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.987+0000 m31100| 2015-07-19T23:39:20.986+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.987+0000 m31200| 2015-07-19T23:39:20.986+0000 I COMMAND [conn18] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.988+0000 m31100| 2015-07-19T23:39:20.987+0000 I COMMAND [conn110] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.988+0000 m31100| 2015-07-19T23:39:20.988+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47818 #112 (56 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.990+0000 m31100| 2015-07-19T23:39:20.988+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47819 #113 (57 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.990+0000 m31200| 2015-07-19T23:39:20.989+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39656 #86 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.990+0000 m31202| 2015-07-19T23:39:20.989+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.990+0000 m31201| 2015-07-19T23:39:20.989+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.991+0000 m31101| 2015-07-19T23:39:20.989+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.991+0000 m31102| 2015-07-19T23:39:20.989+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.992+0000 m31100| 2015-07-19T23:39:20.992+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47821 #114 (58 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.992+0000 m31200| 2015-07-19T23:39:20.992+0000 I COMMAND [conn86] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.996+0000 m31100| 2015-07-19T23:39:20.992+0000 I COMMAND [conn111] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.999+0000 m31102| 2015-07-19T23:39:20.993+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.999+0000 m31101| 2015-07-19T23:39:20.993+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.999+0000 m31100| 2015-07-19T23:39:20.994+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.999+0000 m31201| 2015-07-19T23:39:20.995+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:20.999+0000 m31202| 2015-07-19T23:39:20.995+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.000+0000 m31102| 2015-07-19T23:39:20.995+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.000+0000 m31101| 2015-07-19T23:39:20.996+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.000+0000 m31102| 2015-07-19T23:39:20.996+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.000+0000 m31101| 2015-07-19T23:39:20.996+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.000+0000 m31100| 2015-07-19T23:39:20.998+0000 I COMMAND [conn114] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.000+0000 m31200| 2015-07-19T23:39:20.999+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39658 #87 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.001+0000 m31200| 2015-07-19T23:39:20.999+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39659 #88 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.001+0000 m31200| 2015-07-19T23:39:20.999+0000 I COMMAND [conn86] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.001+0000 m31100| 2015-07-19T23:39:21.000+0000 I COMMAND [conn112] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.001+0000 m31200| 2015-07-19T23:39:21.000+0000 I COMMAND [conn88] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.004+0000 m31200| 2015-07-19T23:39:21.000+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.005+0000 m31101| 2015-07-19T23:39:21.001+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.005+0000 m31102| 2015-07-19T23:39:21.001+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.005+0000 m31101| 2015-07-19T23:39:21.002+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.005+0000 m31102| 2015-07-19T23:39:21.002+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.005+0000 m31201| 2015-07-19T23:39:21.002+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.005+0000 m31202| 2015-07-19T23:39:21.003+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.006+0000 m31202| 2015-07-19T23:39:21.004+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.006+0000 m31201| 2015-07-19T23:39:21.004+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.006+0000 m31202| 2015-07-19T23:39:21.005+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.006+0000 m31201| 2015-07-19T23:39:21.005+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.006+0000 m31200| 2015-07-19T23:39:21.006+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39660 #89 (35 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.006+0000 m31200| 2015-07-19T23:39:21.006+0000 I COMMAND [conn89] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.008+0000 m31201| 2015-07-19T23:39:21.007+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.008+0000 m31202| 2015-07-19T23:39:21.007+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.077+0000 m31200| 2015-07-19T23:39:21.076+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.077+0000 m31200| 2015-07-19T23:39:21.076+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.077+0000 m31100| 2015-07-19T23:39:21.077+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.077+0000 m31100| 2015-07-19T23:39:21.077+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.078+0000 m31200| 2015-07-19T23:39:21.078+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.091+0000 m31100| 2015-07-19T23:39:21.079+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.092+0000 m31201| 2015-07-19T23:39:21.080+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.092+0000 m31201| 2015-07-19T23:39:21.080+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.092+0000 m31202| 2015-07-19T23:39:21.080+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.092+0000 m31202| 2015-07-19T23:39:21.080+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.092+0000 m31200| 2015-07-19T23:39:21.080+0000 I INDEX [conn24] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.092+0000 m31200| 2015-07-19T23:39:21.080+0000 I INDEX [conn24] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.093+0000 m31100| 2015-07-19T23:39:21.081+0000 I INDEX [conn49] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.093+0000 m31100| 2015-07-19T23:39:21.081+0000 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.093+0000 m31200| 2015-07-19T23:39:21.081+0000 I INDEX [conn24] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.093+0000 m31201| 2015-07-19T23:39:21.083+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.094+0000 m31202| 2015-07-19T23:39:21.083+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.094+0000 m31100| 2015-07-19T23:39:21.083+0000 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.094+0000 m31200| 2015-07-19T23:39:21.084+0000 I INDEX [conn20] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.094+0000 m31200| 2015-07-19T23:39:21.084+0000 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.094+0000 m31101| 2015-07-19T23:39:21.085+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.094+0000 m31101| 2015-07-19T23:39:21.085+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.095+0000 m31102| 2015-07-19T23:39:21.085+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.095+0000 m31102| 2015-07-19T23:39:21.085+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.095+0000 m31202| 2015-07-19T23:39:21.086+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.096+0000 m31202| 2015-07-19T23:39:21.086+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.096+0000 m31201| 2015-07-19T23:39:21.086+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.096+0000 m31201| 2015-07-19T23:39:21.086+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.096+0000 m31101| 2015-07-19T23:39:21.086+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.097+0000 m31200| 2015-07-19T23:39:21.087+0000 I INDEX [conn20] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.097+0000 m30999| 2015-07-19T23:39:21.088+0000 I SHARDING [conn38] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.097+0000 m30999| 2015-07-19T23:39:21.088+0000 I SHARDING [conn38] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.097+0000 m31202| 2015-07-19T23:39:21.088+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.097+0000 m31200| 2015-07-19T23:39:21.088+0000 I NETWORK [conn24] end connection 10.139.123.131:39408 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.097+0000 m31102| 2015-07-19T23:39:21.088+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.097+0000 m31201| 2015-07-19T23:39:21.088+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.098+0000 m31101| 2015-07-19T23:39:21.089+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.098+0000 m31101| 2015-07-19T23:39:21.089+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.099+0000 m30999| 2015-07-19T23:39:21.089+0000 I SHARDING [conn35] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.099+0000 m30999| 2015-07-19T23:39:21.089+0000 I SHARDING [conn35] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.099+0000 m31200| 2015-07-19T23:39:21.089+0000 I NETWORK [conn23] end connection 10.139.123.131:39407 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.099+0000 m31200| 2015-07-19T23:39:21.091+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39661 #90 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.099+0000 m31100| 2015-07-19T23:39:21.091+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.099+0000 m31100| 2015-07-19T23:39:21.091+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.099+0000 m31202| 2015-07-19T23:39:21.091+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.099+0000 m31202| 2015-07-19T23:39:21.091+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.100+0000 m31201| 2015-07-19T23:39:21.091+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.100+0000 m31201| 2015-07-19T23:39:21.091+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.100+0000 m31102| 2015-07-19T23:39:21.092+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.100+0000 m31102| 2015-07-19T23:39:21.092+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.100+0000 m31200| 2015-07-19T23:39:21.091+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39662 #91 (35 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.100+0000 m31101| 2015-07-19T23:39:21.092+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.100+0000 m31201| 2015-07-19T23:39:21.092+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.101+0000 m31100| 2015-07-19T23:39:21.093+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.101+0000 m31202| 2015-07-19T23:39:21.093+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.103+0000 m30999| 2015-07-19T23:39:21.094+0000 I SHARDING [conn39] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.103+0000 m30999| 2015-07-19T23:39:21.095+0000 I SHARDING [conn39] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.103+0000 m31102| 2015-07-19T23:39:21.095+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.103+0000 m31200| 2015-07-19T23:39:21.095+0000 I NETWORK [conn20] end connection 10.139.123.131:39402 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.104+0000 m31100| 2015-07-19T23:39:21.097+0000 I INDEX [conn49] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.104+0000 m31100| 2015-07-19T23:39:21.097+0000 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.104+0000 m31200| 2015-07-19T23:39:21.098+0000 I INDEX [conn34] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.104+0000 m31200| 2015-07-19T23:39:21.098+0000 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.105+0000 m31200| 2015-07-19T23:39:21.098+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39663 #92 (35 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.105+0000 m31102| 2015-07-19T23:39:21.099+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.105+0000 m31102| 2015-07-19T23:39:21.099+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.105+0000 m31101| 2015-07-19T23:39:21.100+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.105+0000 m31101| 2015-07-19T23:39:21.100+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.105+0000 m31200| 2015-07-19T23:39:21.100+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39664 #93 (36 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.106+0000 m31200| 2015-07-19T23:39:21.102+0000 I INDEX [conn34] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.106+0000 m31102| 2015-07-19T23:39:21.102+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.108+0000 m31200| 2015-07-19T23:39:21.102+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39665 #94 (37 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.109+0000 m31101| 2015-07-19T23:39:21.102+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.114+0000 m31100| 2015-07-19T23:39:21.104+0000 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.115+0000 m31202| 2015-07-19T23:39:21.105+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.116+0000 m31202| 2015-07-19T23:39:21.105+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.117+0000 m31201| 2015-07-19T23:39:21.105+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.117+0000 m31201| 2015-07-19T23:39:21.105+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.117+0000 m31200| 2015-07-19T23:39:21.106+0000 I INDEX [conn91] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.117+0000 m31200| 2015-07-19T23:39:21.106+0000 I INDEX [conn91] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.118+0000 m31202| 2015-07-19T23:39:21.106+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.118+0000 m31201| 2015-07-19T23:39:21.107+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.121+0000 m31100| 2015-07-19T23:39:21.107+0000 I INDEX [conn33] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.121+0000 m31100| 2015-07-19T23:39:21.107+0000 I INDEX [conn33] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.121+0000 m31200| 2015-07-19T23:39:21.108+0000 I INDEX [conn91] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.122+0000 m31102| 2015-07-19T23:39:21.109+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.122+0000 m31102| 2015-07-19T23:39:21.110+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.122+0000 m31101| 2015-07-19T23:39:21.110+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.122+0000 m31101| 2015-07-19T23:39:21.110+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.123+0000 m31100| 2015-07-19T23:39:21.112+0000 I INDEX [conn33] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.123+0000 m31101| 2015-07-19T23:39:21.112+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.123+0000 m31102| 2015-07-19T23:39:21.113+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.123+0000 m31200| 2015-07-19T23:39:21.114+0000 I INDEX [conn26] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.123+0000 m31200| 2015-07-19T23:39:21.114+0000 I INDEX [conn26] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.123+0000 m31201| 2015-07-19T23:39:21.114+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.123+0000 m31201| 2015-07-19T23:39:21.114+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.124+0000 m31202| 2015-07-19T23:39:21.115+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.124+0000 m31202| 2015-07-19T23:39:21.115+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.124+0000 m31200| 2015-07-19T23:39:21.116+0000 I INDEX [conn26] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.125+0000 m31100| 2015-07-19T23:39:21.116+0000 I INDEX [conn53] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.125+0000 m31100| 2015-07-19T23:39:21.116+0000 I INDEX [conn53] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.125+0000 m31101| 2015-07-19T23:39:21.118+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.125+0000 m31101| 2015-07-19T23:39:21.118+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.125+0000 m31200| 2015-07-19T23:39:21.118+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39666 #95 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.125+0000 m31200| 2015-07-19T23:39:21.118+0000 I INDEX [conn22] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.125+0000 m31200| 2015-07-19T23:39:21.118+0000 I INDEX [conn22] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.126+0000 m31102| 2015-07-19T23:39:21.119+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.126+0000 m31102| 2015-07-19T23:39:21.119+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.126+0000 m31202| 2015-07-19T23:39:21.120+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.127+0000 m31200| 2015-07-19T23:39:21.120+0000 I INDEX [conn22] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.128+0000 m31101| 2015-07-19T23:39:21.120+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.128+0000 m31200| 2015-07-19T23:39:21.120+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39667 #96 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.128+0000 m31201| 2015-07-19T23:39:21.120+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.128+0000 m31100| 2015-07-19T23:39:21.121+0000 I INDEX [conn53] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.128+0000 m31102| 2015-07-19T23:39:21.121+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.128+0000 m31202| 2015-07-19T23:39:21.123+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.129+0000 m31202| 2015-07-19T23:39:21.123+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.131+0000 m31200| 2015-07-19T23:39:21.124+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39668 #97 (40 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.132+0000 m31100| 2015-07-19T23:39:21.125+0000 I INDEX [conn58] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.132+0000 m31100| 2015-07-19T23:39:21.125+0000 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.137+0000 m31201| 2015-07-19T23:39:21.126+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.138+0000 m31201| 2015-07-19T23:39:21.126+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.139+0000 m31202| 2015-07-19T23:39:21.126+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.140+0000 m31102| 2015-07-19T23:39:21.127+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.140+0000 m31102| 2015-07-19T23:39:21.127+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.144+0000 m31101| 2015-07-19T23:39:21.127+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.176+0000 m31101| 2015-07-19T23:39:21.127+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.176+0000 m31201| 2015-07-19T23:39:21.128+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.176+0000 m31100| 2015-07-19T23:39:21.129+0000 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.176+0000 m31102| 2015-07-19T23:39:21.129+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.176+0000 m31101| 2015-07-19T23:39:21.129+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.176+0000 m31202| 2015-07-19T23:39:21.130+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.176+0000 m31202| 2015-07-19T23:39:21.130+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.176+0000 m31201| 2015-07-19T23:39:21.131+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.176+0000 m31201| 2015-07-19T23:39:21.131+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.177+0000 m31100| 2015-07-19T23:39:21.131+0000 I INDEX [conn55] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.177+0000 m31100| 2015-07-19T23:39:21.131+0000 I INDEX [conn55] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.177+0000 m31200| 2015-07-19T23:39:21.131+0000 I INDEX [conn92] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.177+0000 m31200| 2015-07-19T23:39:21.131+0000 I INDEX [conn92] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.177+0000 m31102| 2015-07-19T23:39:21.132+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.177+0000 m31102| 2015-07-19T23:39:21.132+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.177+0000 m31200| 2015-07-19T23:39:21.133+0000 I INDEX [conn92] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.177+0000 m31202| 2015-07-19T23:39:21.133+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.177+0000 m31201| 2015-07-19T23:39:21.134+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.177+0000 m31200| 2015-07-19T23:39:21.133+0000 I COMMAND [conn18] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.178+0000 m31101| 2015-07-19T23:39:21.134+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.178+0000 m31101| 2015-07-19T23:39:21.134+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.178+0000 m31200| 2015-07-19T23:39:21.134+0000 I NETWORK [conn26] end connection 10.139.123.131:39413 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.178+0000 m31102| 2015-07-19T23:39:21.134+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.178+0000 m30999| 2015-07-19T23:39:21.135+0000 I SHARDING [conn36] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.179+0000 m30998| 2015-07-19T23:39:21.134+0000 I SHARDING [conn36] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.179+0000 m30998| 2015-07-19T23:39:21.134+0000 I SHARDING [conn36] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.179+0000 m30999| 2015-07-19T23:39:21.135+0000 I SHARDING [conn36] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.179+0000 m31100| 2015-07-19T23:39:21.135+0000 I INDEX [conn55] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.179+0000 m31200| 2015-07-19T23:39:21.135+0000 I NETWORK [conn91] end connection 10.139.123.131:39662 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.179+0000 m31200| 2015-07-19T23:39:21.136+0000 I COMMAND [conn89] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.180+0000 m31101| 2015-07-19T23:39:21.137+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.180+0000 m31100| 2015-07-19T23:39:21.137+0000 I INDEX [conn54] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.180+0000 m31100| 2015-07-19T23:39:21.137+0000 I INDEX [conn54] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.180+0000 m31201| 2015-07-19T23:39:21.138+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.180+0000 m31201| 2015-07-19T23:39:21.138+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.180+0000 m31202| 2015-07-19T23:39:21.139+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.180+0000 m31202| 2015-07-19T23:39:21.139+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.180+0000 m31201| 2015-07-19T23:39:21.140+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.180+0000 m31201| 2015-07-19T23:39:21.140+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.180+0000 m31102| 2015-07-19T23:39:21.140+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.181+0000 m31102| 2015-07-19T23:39:21.140+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.181+0000 m31200| 2015-07-19T23:39:21.141+0000 I INDEX [conn96] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.181+0000 m31200| 2015-07-19T23:39:21.141+0000 I INDEX [conn96] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.182+0000 m31100| 2015-07-19T23:39:21.141+0000 I INDEX [conn54] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.182+0000 m31202| 2015-07-19T23:39:21.141+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.182+0000 m31202| 2015-07-19T23:39:21.142+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.182+0000 m31102| 2015-07-19T23:39:21.142+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.182+0000 m31201| 2015-07-19T23:39:21.142+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.182+0000 m31202| 2015-07-19T23:39:21.143+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.183+0000 m31101| 2015-07-19T23:39:21.143+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.183+0000 m31101| 2015-07-19T23:39:21.143+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.183+0000 m31100| 2015-07-19T23:39:21.143+0000 I INDEX [conn37] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.183+0000 m31100| 2015-07-19T23:39:21.143+0000 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.184+0000 m31101| 2015-07-19T23:39:21.145+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.184+0000 m31200| 2015-07-19T23:39:21.145+0000 I INDEX [conn96] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.184+0000 m31102| 2015-07-19T23:39:21.146+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.184+0000 m31102| 2015-07-19T23:39:21.146+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.184+0000 m31100| 2015-07-19T23:39:21.147+0000 I INDEX [conn37] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.184+0000 m31100| 2015-07-19T23:39:21.148+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.184+0000 m31101| 2015-07-19T23:39:21.148+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.185+0000 m31101| 2015-07-19T23:39:21.148+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.185+0000 m31201| 2015-07-19T23:39:21.148+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.185+0000 m31201| 2015-07-19T23:39:21.148+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.185+0000 m31100| 2015-07-19T23:39:21.148+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.185+0000 m31202| 2015-07-19T23:39:21.149+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.185+0000 m31202| 2015-07-19T23:39:21.149+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.185+0000 m31100| 2015-07-19T23:39:21.149+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.185+0000 m31200| 2015-07-19T23:39:21.150+0000 I SHARDING [LockPinger] cluster test-configRS/ip-10-139-123-131:29000 pinged successfully at 2015-07-19T23:39:21.149+0000 by distributed lock pinger 'test-configRS/ip-10-139-123-131:29000/ip-10-139-123-131:31200:1437349131:182555922', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.185+0000 m31101| 2015-07-19T23:39:21.150+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.185+0000 m31102| 2015-07-19T23:39:21.151+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.186+0000 m31201| 2015-07-19T23:39:21.151+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.186+0000 m31202| 2015-07-19T23:39:21.151+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.186+0000 m31200| 2015-07-19T23:39:21.151+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39669 #98 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.186+0000 m31200| 2015-07-19T23:39:21.152+0000 I INDEX [conn95] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.186+0000 m31200| 2015-07-19T23:39:21.152+0000 I INDEX [conn95] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.186+0000 m31102| 2015-07-19T23:39:21.153+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.186+0000 m31102| 2015-07-19T23:39:21.153+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.186+0000 m31101| 2015-07-19T23:39:21.153+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.186+0000 m31101| 2015-07-19T23:39:21.153+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.186+0000 m31100| 2015-07-19T23:39:21.154+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.186+0000 m31101| 2015-07-19T23:39:21.155+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.187+0000 m31102| 2015-07-19T23:39:21.155+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.187+0000 m31101| 2015-07-19T23:39:21.155+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.187+0000 m31200| 2015-07-19T23:39:21.156+0000 I INDEX [conn95] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.187+0000 m31102| 2015-07-19T23:39:21.156+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.187+0000 m31101| 2015-07-19T23:39:21.156+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.187+0000 m31102| 2015-07-19T23:39:21.156+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.187+0000 m31101| 2015-07-19T23:39:21.157+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.187+0000 m31102| 2015-07-19T23:39:21.157+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.187+0000 m30998| 2015-07-19T23:39:21.157+0000 I SHARDING [conn37] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.188+0000 m30998| 2015-07-19T23:39:21.157+0000 I SHARDING [conn37] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.188+0000 m30999| 2015-07-19T23:39:21.158+0000 I SHARDING [conn37] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.188+0000 m31200| 2015-07-19T23:39:21.158+0000 I NETWORK [conn96] end connection 10.139.123.131:39667 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.188+0000 m31200| 2015-07-19T23:39:21.159+0000 I NETWORK [conn34] end connection 10.139.123.131:39489 (37 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.188+0000 m30998| 2015-07-19T23:39:21.158+0000 I SHARDING [conn34] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.188+0000 m30998| 2015-07-19T23:39:21.158+0000 I SHARDING [conn34] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.188+0000 m30999| 2015-07-19T23:39:21.158+0000 I SHARDING [conn37] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.188+0000 m31100| 2015-07-19T23:39:21.159+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.188+0000 m31102| 2015-07-19T23:39:21.158+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.188+0000 m31200| 2015-07-19T23:39:21.160+0000 I COMMAND [conn97] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.188+0000 m31101| 2015-07-19T23:39:21.160+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.189+0000 m31100| 2015-07-19T23:39:21.160+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.189+0000 m31200| 2015-07-19T23:39:21.160+0000 I NETWORK [conn92] end connection 10.139.123.131:39663 (36 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.189+0000 m31101| 2015-07-19T23:39:21.161+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.189+0000 m31102| 2015-07-19T23:39:21.161+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.189+0000 m31200| 2015-07-19T23:39:21.161+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.189+0000 m31200| 2015-07-19T23:39:21.162+0000 I COMMAND [conn18] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.189+0000 m31201| 2015-07-19T23:39:21.163+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.189+0000 m31201| 2015-07-19T23:39:21.163+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.189+0000 m31102| 2015-07-19T23:39:21.163+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.189+0000 m31101| 2015-07-19T23:39:21.163+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.189+0000 m31200| 2015-07-19T23:39:21.163+0000 I COMMAND [conn89] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.190+0000 m31202| 2015-07-19T23:39:21.164+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.190+0000 m31202| 2015-07-19T23:39:21.164+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.190+0000 m31100| 2015-07-19T23:39:21.165+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.190+0000 m31200| 2015-07-19T23:39:21.165+0000 I COMMAND [conn18] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.190+0000 m31100| 2015-07-19T23:39:21.166+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.190+0000 m31101| 2015-07-19T23:39:21.167+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.190+0000 m31200| 2015-07-19T23:39:21.167+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.190+0000 m31202| 2015-07-19T23:39:21.167+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.190+0000 m31201| 2015-07-19T23:39:21.167+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.190+0000 m31102| 2015-07-19T23:39:21.168+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.190+0000 m31202| 2015-07-19T23:39:21.168+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.191+0000 m31100| 2015-07-19T23:39:21.169+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.191+0000 m31201| 2015-07-19T23:39:21.169+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.191+0000 m31101| 2015-07-19T23:39:21.169+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.191+0000 m31200| 2015-07-19T23:39:21.169+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.191+0000 m31202| 2015-07-19T23:39:21.170+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.191+0000 m31201| 2015-07-19T23:39:21.170+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.191+0000 m31102| 2015-07-19T23:39:21.170+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.191+0000 m31202| 2015-07-19T23:39:21.171+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.191+0000 m31102| 2015-07-19T23:39:21.171+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.191+0000 m31201| 2015-07-19T23:39:21.171+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.191+0000 m31101| 2015-07-19T23:39:21.171+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.191+0000 m31202| 2015-07-19T23:39:21.172+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.192+0000 m31200| 2015-07-19T23:39:21.172+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39670 #99 (37 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.192+0000 m31201| 2015-07-19T23:39:21.173+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.192+0000 m31202| 2015-07-19T23:39:21.173+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.192+0000 m31201| 2015-07-19T23:39:21.174+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.192+0000 m31202| 2015-07-19T23:39:21.174+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.192+0000 m31201| 2015-07-19T23:39:21.174+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.192+0000 m31202| 2015-07-19T23:39:21.174+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.192+0000 m31201| 2015-07-19T23:39:21.175+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.192+0000 m31100| 2015-07-19T23:39:21.178+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.192+0000 m31200| 2015-07-19T23:39:21.178+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.192+0000 m31102| 2015-07-19T23:39:21.181+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.192+0000 m31202| 2015-07-19T23:39:21.181+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.193+0000 m31201| 2015-07-19T23:39:21.181+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.193+0000 m31101| 2015-07-19T23:39:21.182+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.251+0000 m31100| 2015-07-19T23:39:21.251+0000 I INDEX [conn49] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.252+0000 m31100| 2015-07-19T23:39:21.251+0000 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.252+0000 m31200| 2015-07-19T23:39:21.251+0000 I INDEX [conn93] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.252+0000 m31200| 2015-07-19T23:39:21.251+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.253+0000 m31100| 2015-07-19T23:39:21.252+0000 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.253+0000 m31200| 2015-07-19T23:39:21.252+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.255+0000 m31100| 2015-07-19T23:39:21.255+0000 I INDEX [conn54] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.255+0000 m31100| 2015-07-19T23:39:21.255+0000 I INDEX [conn54] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.256+0000 m31202| 2015-07-19T23:39:21.255+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.256+0000 m31202| 2015-07-19T23:39:21.255+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.256+0000 m31201| 2015-07-19T23:39:21.256+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.258+0000 m31201| 2015-07-19T23:39:21.256+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.259+0000 m31100| 2015-07-19T23:39:21.256+0000 I INDEX [conn54] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.260+0000 m31202| 2015-07-19T23:39:21.257+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.260+0000 m31101| 2015-07-19T23:39:21.257+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.260+0000 m31101| 2015-07-19T23:39:21.257+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.261+0000 m31102| 2015-07-19T23:39:21.258+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.261+0000 m31102| 2015-07-19T23:39:21.258+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.261+0000 m31201| 2015-07-19T23:39:21.259+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.261+0000 m31101| 2015-07-19T23:39:21.259+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.261+0000 m31102| 2015-07-19T23:39:21.260+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.261+0000 m31200| 2015-07-19T23:39:21.261+0000 I INDEX [conn99] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.262+0000 m31200| 2015-07-19T23:39:21.261+0000 I INDEX [conn99] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.266+0000 m31101| 2015-07-19T23:39:21.263+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.266+0000 m31101| 2015-07-19T23:39:21.263+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.266+0000 m31200| 2015-07-19T23:39:21.264+0000 I INDEX [conn99] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.266+0000 m31102| 2015-07-19T23:39:21.264+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.266+0000 m31102| 2015-07-19T23:39:21.264+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.267+0000 m31100| 2015-07-19T23:39:21.265+0000 I INDEX [conn49] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.269+0000 m31100| 2015-07-19T23:39:21.265+0000 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.269+0000 m31101| 2015-07-19T23:39:21.265+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.269+0000 m31200| 2015-07-19T23:39:21.268+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39671 #100 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.269+0000 m31100| 2015-07-19T23:39:21.268+0000 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.269+0000 m31102| 2015-07-19T23:39:21.268+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.270+0000 m31200| 2015-07-19T23:39:21.268+0000 I INDEX [conn94] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.270+0000 m31200| 2015-07-19T23:39:21.268+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.270+0000 m31100| 2015-07-19T23:39:21.268+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.271+0000 m31200| 2015-07-19T23:39:21.271+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39672 #101 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.272+0000 m31200| 2015-07-19T23:39:21.272+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.272+0000 m31200| 2015-07-19T23:39:21.272+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.274+0000 m31201| 2015-07-19T23:39:21.273+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.274+0000 m31201| 2015-07-19T23:39:21.273+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.276+0000 m31102| 2015-07-19T23:39:21.273+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.276+0000 m31102| 2015-07-19T23:39:21.273+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.276+0000 m31100| 2015-07-19T23:39:21.273+0000 I INDEX [conn54] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.276+0000 m31100| 2015-07-19T23:39:21.273+0000 I INDEX [conn54] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.276+0000 m31201| 2015-07-19T23:39:21.274+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.276+0000 m31202| 2015-07-19T23:39:21.274+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.277+0000 m31202| 2015-07-19T23:39:21.274+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.277+0000 m31100| 2015-07-19T23:39:21.275+0000 I INDEX [conn54] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.277+0000 m31101| 2015-07-19T23:39:21.275+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.277+0000 m31101| 2015-07-19T23:39:21.275+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.281+0000 m31202| 2015-07-19T23:39:21.280+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.282+0000 m31102| 2015-07-19T23:39:21.280+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.282+0000 m31100| 2015-07-19T23:39:21.281+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.282+0000 m31100| 2015-07-19T23:39:21.281+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.282+0000 m31200| 2015-07-19T23:39:21.281+0000 I INDEX [conn22] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.282+0000 m31200| 2015-07-19T23:39:21.281+0000 I INDEX [conn22] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.284+0000 m31102| 2015-07-19T23:39:21.281+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.284+0000 m31201| 2015-07-19T23:39:21.282+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.284+0000 m31201| 2015-07-19T23:39:21.282+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.285+0000 m31200| 2015-07-19T23:39:21.283+0000 I INDEX [conn22] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.285+0000 m31100| 2015-07-19T23:39:21.283+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.285+0000 m31101| 2015-07-19T23:39:21.284+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.285+0000 m31202| 2015-07-19T23:39:21.284+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.285+0000 m31202| 2015-07-19T23:39:21.284+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.288+0000 m31201| 2015-07-19T23:39:21.284+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.288+0000 m31101| 2015-07-19T23:39:21.285+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.288+0000 m31201| 2015-07-19T23:39:21.285+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.288+0000 m31102| 2015-07-19T23:39:21.286+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.291+0000 m31102| 2015-07-19T23:39:21.286+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.291+0000 m31202| 2015-07-19T23:39:21.286+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.291+0000 m31100| 2015-07-19T23:39:21.287+0000 I INDEX [conn33] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.292+0000 m31100| 2015-07-19T23:39:21.287+0000 I INDEX [conn33] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.292+0000 m31202| 2015-07-19T23:39:21.287+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.292+0000 m31200| 2015-07-19T23:39:21.287+0000 I INDEX [conn93] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.292+0000 m31200| 2015-07-19T23:39:21.287+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.292+0000 m31100| 2015-07-19T23:39:21.289+0000 I INDEX [conn33] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.292+0000 m31102| 2015-07-19T23:39:21.289+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.293+0000 m31200| 2015-07-19T23:39:21.289+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.293+0000 m31101| 2015-07-19T23:39:21.289+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.293+0000 m31101| 2015-07-19T23:39:21.289+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.293+0000 m31201| 2015-07-19T23:39:21.290+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.293+0000 m31201| 2015-07-19T23:39:21.290+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.293+0000 m31202| 2015-07-19T23:39:21.291+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.293+0000 m31202| 2015-07-19T23:39:21.291+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.296+0000 m31101| 2015-07-19T23:39:21.295+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.297+0000 m31202| 2015-07-19T23:39:21.295+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.297+0000 m31102| 2015-07-19T23:39:21.295+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.297+0000 m31102| 2015-07-19T23:39:21.295+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.297+0000 m31100| 2015-07-19T23:39:21.296+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.297+0000 m31100| 2015-07-19T23:39:21.296+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.297+0000 m31200| 2015-07-19T23:39:21.295+0000 I INDEX [conn90] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.297+0000 m31200| 2015-07-19T23:39:21.295+0000 I INDEX [conn90] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.298+0000 m31201| 2015-07-19T23:39:21.296+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.300+0000 m31200| 2015-07-19T23:39:21.300+0000 I INDEX [conn90] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.302+0000 m31100| 2015-07-19T23:39:21.300+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.302+0000 m31101| 2015-07-19T23:39:21.300+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.304+0000 m31101| 2015-07-19T23:39:21.300+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.304+0000 m31201| 2015-07-19T23:39:21.301+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.304+0000 m31201| 2015-07-19T23:39:21.301+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.304+0000 m31202| 2015-07-19T23:39:21.301+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.304+0000 m31202| 2015-07-19T23:39:21.301+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.304+0000 m31102| 2015-07-19T23:39:21.302+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.305+0000 m31100| 2015-07-19T23:39:21.303+0000 I INDEX [conn58] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.305+0000 m31100| 2015-07-19T23:39:21.303+0000 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.308+0000 m31201| 2015-07-19T23:39:21.306+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.309+0000 m31102| 2015-07-19T23:39:21.307+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.309+0000 m31102| 2015-07-19T23:39:21.307+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.309+0000 m31100| 2015-07-19T23:39:21.307+0000 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.309+0000 m31101| 2015-07-19T23:39:21.308+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.309+0000 m31200| 2015-07-19T23:39:21.308+0000 I INDEX [conn100] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.309+0000 m31200| 2015-07-19T23:39:21.308+0000 I INDEX [conn100] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.309+0000 m31202| 2015-07-19T23:39:21.308+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.310+0000 m31201| 2015-07-19T23:39:21.309+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.310+0000 m31201| 2015-07-19T23:39:21.309+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.312+0000 m31102| 2015-07-19T23:39:21.311+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.313+0000 m31100| 2015-07-19T23:39:21.312+0000 I INDEX [conn37] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.313+0000 m31100| 2015-07-19T23:39:21.312+0000 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.313+0000 m31101| 2015-07-19T23:39:21.312+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.313+0000 m31101| 2015-07-19T23:39:21.312+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.313+0000 m31200| 2015-07-19T23:39:21.313+0000 I INDEX [conn100] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.314+0000 m31202| 2015-07-19T23:39:21.313+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.315+0000 m31202| 2015-07-19T23:39:21.313+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.315+0000 m31201| 2015-07-19T23:39:21.313+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.315+0000 m31100| 2015-07-19T23:39:21.314+0000 I INDEX [conn37] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.318+0000 m31100| 2015-07-19T23:39:21.314+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.318+0000 m31102| 2015-07-19T23:39:21.314+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.318+0000 m31102| 2015-07-19T23:39:21.314+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.319+0000 m31200| 2015-07-19T23:39:21.315+0000 I INDEX [conn95] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.319+0000 m31200| 2015-07-19T23:39:21.315+0000 I INDEX [conn95] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.319+0000 m31102| 2015-07-19T23:39:21.316+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.319+0000 m31200| 2015-07-19T23:39:21.317+0000 I INDEX [conn95] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.321+0000 m31100| 2015-07-19T23:39:21.317+0000 I INDEX [conn55] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.321+0000 m31100| 2015-07-19T23:39:21.317+0000 I INDEX [conn55] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.321+0000 m31202| 2015-07-19T23:39:21.317+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.321+0000 m31101| 2015-07-19T23:39:21.317+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.322+0000 m31201| 2015-07-19T23:39:21.320+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.322+0000 m31201| 2015-07-19T23:39:21.320+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.322+0000 m31101| 2015-07-19T23:39:21.320+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.323+0000 m31101| 2015-07-19T23:39:21.320+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.323+0000 m31200| 2015-07-19T23:39:21.320+0000 I INDEX [conn101] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.323+0000 m31200| 2015-07-19T23:39:21.320+0000 I INDEX [conn101] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.323+0000 m31102| 2015-07-19T23:39:21.321+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.323+0000 m31102| 2015-07-19T23:39:21.321+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.324+0000 m31201| 2015-07-19T23:39:21.322+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.324+0000 m31202| 2015-07-19T23:39:21.322+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.324+0000 m31202| 2015-07-19T23:39:21.322+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.324+0000 m31100| 2015-07-19T23:39:21.322+0000 I INDEX [conn55] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.325+0000 m31200| 2015-07-19T23:39:21.323+0000 I INDEX [conn101] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.326+0000 m31201| 2015-07-19T23:39:21.324+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.328+0000 m31201| 2015-07-19T23:39:21.324+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.328+0000 m30998| 2015-07-19T23:39:21.325+0000 I SHARDING [conn38] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.329+0000 m30998| 2015-07-19T23:39:21.325+0000 I SHARDING [conn38] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.330+0000 m31100| 2015-07-19T23:39:21.325+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.331+0000 m31200| 2015-07-19T23:39:21.326+0000 I NETWORK [conn95] end connection 10.139.123.131:39666 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.331+0000 m31201| 2015-07-19T23:39:21.326+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.331+0000 m31101| 2015-07-19T23:39:21.326+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.331+0000 m31202| 2015-07-19T23:39:21.327+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.331+0000 m31100| 2015-07-19T23:39:21.327+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.337+0000 m31102| 2015-07-19T23:39:21.327+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.338+0000 m31200| 2015-07-19T23:39:21.327+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.338+0000 m31100| 2015-07-19T23:39:21.327+0000 I COMMAND [conn40] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.339+0000 m30999| 2015-07-19T23:39:21.328+0000 I SHARDING [conn39] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.339+0000 m30999| 2015-07-19T23:39:21.328+0000 I SHARDING [conn39] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.339+0000 m31200| 2015-07-19T23:39:21.328+0000 I NETWORK [conn100] end connection 10.139.123.131:39671 (37 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.340+0000 m31201| 2015-07-19T23:39:21.329+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.340+0000 m31201| 2015-07-19T23:39:21.329+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.340+0000 m31101| 2015-07-19T23:39:21.330+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.340+0000 m31101| 2015-07-19T23:39:21.330+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.340+0000 m31200| 2015-07-19T23:39:21.330+0000 I INDEX [conn99] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.342+0000 m31200| 2015-07-19T23:39:21.330+0000 I INDEX [conn99] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.342+0000 m31202| 2015-07-19T23:39:21.331+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.342+0000 m31202| 2015-07-19T23:39:21.331+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.342+0000 m31201| 2015-07-19T23:39:21.332+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.343+0000 m31200| 2015-07-19T23:39:21.332+0000 I INDEX [conn99] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.343+0000 m31102| 2015-07-19T23:39:21.333+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.349+0000 m31102| 2015-07-19T23:39:21.333+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.349+0000 m30998| 2015-07-19T23:39:21.333+0000 I SHARDING [conn36] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.349+0000 m30998| 2015-07-19T23:39:21.333+0000 I SHARDING [conn36] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.350+0000 m31200| 2015-07-19T23:39:21.333+0000 I NETWORK [conn101] end connection 10.139.123.131:39672 (36 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.351+0000 m31200| 2015-07-19T23:39:21.333+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.351+0000 m31200| 2015-07-19T23:39:21.334+0000 I COMMAND [conn18] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.351+0000 m31200| 2015-07-19T23:39:21.334+0000 I COMMAND [conn97] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.352+0000 m31201| 2015-07-19T23:39:21.335+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.352+0000 m31202| 2015-07-19T23:39:21.335+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.352+0000 m31101| 2015-07-19T23:39:21.335+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.352+0000 m31102| 2015-07-19T23:39:21.336+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.353+0000 m31100| 2015-07-19T23:39:21.336+0000 I COMMAND [conn40] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.354+0000 m31102| 2015-07-19T23:39:21.336+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.354+0000 m31100| 2015-07-19T23:39:21.336+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.354+0000 m31200| 2015-07-19T23:39:21.336+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.355+0000 m31200| 2015-07-19T23:39:21.337+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.355+0000 m31201| 2015-07-19T23:39:21.338+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.355+0000 m31201| 2015-07-19T23:39:21.338+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.355+0000 m31100| 2015-07-19T23:39:21.338+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.355+0000 m31202| 2015-07-19T23:39:21.339+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.355+0000 m31202| 2015-07-19T23:39:21.339+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.356+0000 m31101| 2015-07-19T23:39:21.339+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.356+0000 m31101| 2015-07-19T23:39:21.339+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.356+0000 m31200| 2015-07-19T23:39:21.338+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.356+0000 m31200| 2015-07-19T23:39:21.340+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39673 #102 (37 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.356+0000 m31101| 2015-07-19T23:39:21.340+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.356+0000 m31200| 2015-07-19T23:39:21.340+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39674 #103 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.356+0000 m31101| 2015-07-19T23:39:21.341+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.357+0000 m31201| 2015-07-19T23:39:21.342+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.357+0000 m31102| 2015-07-19T23:39:21.342+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.357+0000 m31102| 2015-07-19T23:39:21.342+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.357+0000 m31202| 2015-07-19T23:39:21.343+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.357+0000 m31202| 2015-07-19T23:39:21.343+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.359+0000 m31201| 2015-07-19T23:39:21.344+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.359+0000 m31201| 2015-07-19T23:39:21.344+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.359+0000 m31100| 2015-07-19T23:39:21.345+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.359+0000 m31102| 2015-07-19T23:39:21.345+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.359+0000 m31200| 2015-07-19T23:39:21.345+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.359+0000 m31101| 2015-07-19T23:39:21.346+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.360+0000 m31101| 2015-07-19T23:39:21.346+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.360+0000 m31201| 2015-07-19T23:39:21.346+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.360+0000 m31201| 2015-07-19T23:39:21.347+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.360+0000 m31201| 2015-07-19T23:39:21.347+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.360+0000 m31202| 2015-07-19T23:39:21.347+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.361+0000 m31202| 2015-07-19T23:39:21.347+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.361+0000 m31102| 2015-07-19T23:39:21.347+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.361+0000 m31201| 2015-07-19T23:39:21.348+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.361+0000 m31102| 2015-07-19T23:39:21.348+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.362+0000 m31202| 2015-07-19T23:39:21.349+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.362+0000 m31102| 2015-07-19T23:39:21.349+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.362+0000 m31101| 2015-07-19T23:39:21.349+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.362+0000 m31201| 2015-07-19T23:39:21.350+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.362+0000 m31102| 2015-07-19T23:39:21.350+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.362+0000 m31202| 2015-07-19T23:39:21.350+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.362+0000 m31102| 2015-07-19T23:39:21.351+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.362+0000 m31101| 2015-07-19T23:39:21.351+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.362+0000 m31102| 2015-07-19T23:39:21.352+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.362+0000 m31202| 2015-07-19T23:39:21.352+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.362+0000 m31102| 2015-07-19T23:39:21.353+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.362+0000 m31101| 2015-07-19T23:39:21.353+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.363+0000 m31202| 2015-07-19T23:39:21.354+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.363+0000 m31101| 2015-07-19T23:39:21.354+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.363+0000 m31202| 2015-07-19T23:39:21.355+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.363+0000 m31101| 2015-07-19T23:39:21.356+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.363+0000 m31202| 2015-07-19T23:39:21.356+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.364+0000 m31101| 2015-07-19T23:39:21.357+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.364+0000 m31202| 2015-07-19T23:39:21.357+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.364+0000 m31101| 2015-07-19T23:39:21.358+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.365+0000 m31202| 2015-07-19T23:39:21.358+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.365+0000 m31101| 2015-07-19T23:39:21.359+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.365+0000 m31100| 2015-07-19T23:39:21.360+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.365+0000 m31200| 2015-07-19T23:39:21.360+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.365+0000 m31202| 2015-07-19T23:39:21.361+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.366+0000 m31101| 2015-07-19T23:39:21.363+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.366+0000 m31102| 2015-07-19T23:39:21.363+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.366+0000 m31201| 2015-07-19T23:39:21.363+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.377+0000 m31100| 2015-07-19T23:39:21.377+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.377+0000 m31100| 2015-07-19T23:39:21.377+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.377+0000 m31200| 2015-07-19T23:39:21.377+0000 I INDEX [conn94] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.377+0000 m31200| 2015-07-19T23:39:21.377+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.381+0000 m31100| 2015-07-19T23:39:21.378+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.382+0000 m31200| 2015-07-19T23:39:21.378+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.382+0000 m31100| 2015-07-19T23:39:21.380+0000 I COMMAND [conn40] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.382+0000 m31101| 2015-07-19T23:39:21.380+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.382+0000 m31101| 2015-07-19T23:39:21.380+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.386+0000 m31102| 2015-07-19T23:39:21.380+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.387+0000 m31102| 2015-07-19T23:39:21.380+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.387+0000 m31200| 2015-07-19T23:39:21.380+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.388+0000 m31202| 2015-07-19T23:39:21.381+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.388+0000 m31202| 2015-07-19T23:39:21.381+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.388+0000 m31201| 2015-07-19T23:39:21.382+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.388+0000 m31201| 2015-07-19T23:39:21.382+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.388+0000 m31101| 2015-07-19T23:39:21.382+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.388+0000 m31102| 2015-07-19T23:39:21.385+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.388+0000 m31201| 2015-07-19T23:39:21.385+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.388+0000 m31202| 2015-07-19T23:39:21.385+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.388+0000 m31201| 2015-07-19T23:39:21.386+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.388+0000 m31202| 2015-07-19T23:39:21.386+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.430+0000 m31200| 2015-07-19T23:39:21.430+0000 I INDEX [conn103] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.430+0000 m31100| 2015-07-19T23:39:21.430+0000 I INDEX [conn37] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.431+0000 m31100| 2015-07-19T23:39:21.430+0000 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.431+0000 m31200| 2015-07-19T23:39:21.430+0000 I INDEX [conn103] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.431+0000 m31100| 2015-07-19T23:39:21.431+0000 I INDEX [conn37] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.432+0000 m31200| 2015-07-19T23:39:21.432+0000 I INDEX [conn103] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.433+0000 m31102| 2015-07-19T23:39:21.432+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.433+0000 m31101| 2015-07-19T23:39:21.432+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.435+0000 m31102| 2015-07-19T23:39:21.434+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.435+0000 m31102| 2015-07-19T23:39:21.434+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.436+0000 m31101| 2015-07-19T23:39:21.435+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.436+0000 m31101| 2015-07-19T23:39:21.435+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.438+0000 m31102| 2015-07-19T23:39:21.437+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.438+0000 m31101| 2015-07-19T23:39:21.437+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.441+0000 m31200| 2015-07-19T23:39:21.438+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39675 #104 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.441+0000 m31201| 2015-07-19T23:39:21.439+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.441+0000 m31201| 2015-07-19T23:39:21.439+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.442+0000 m31100| 2015-07-19T23:39:21.439+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.443+0000 m31100| 2015-07-19T23:39:21.439+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.443+0000 m31202| 2015-07-19T23:39:21.440+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.443+0000 m31202| 2015-07-19T23:39:21.440+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.443+0000 m31200| 2015-07-19T23:39:21.440+0000 I INDEX [conn94] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.443+0000 m31200| 2015-07-19T23:39:21.440+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.445+0000 m31201| 2015-07-19T23:39:21.441+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.445+0000 m31200| 2015-07-19T23:39:21.441+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.445+0000 m31200| 2015-07-19T23:39:21.442+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.445+0000 m31202| 2015-07-19T23:39:21.442+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.445+0000 m31100| 2015-07-19T23:39:21.444+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.446+0000 m31200| 2015-07-19T23:39:21.444+0000 I INDEX [conn90] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.446+0000 m31200| 2015-07-19T23:39:21.444+0000 I INDEX [conn90] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.446+0000 m31100| 2015-07-19T23:39:21.444+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.446+0000 m31201| 2015-07-19T23:39:21.445+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.446+0000 m31201| 2015-07-19T23:39:21.445+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.448+0000 m31100| 2015-07-19T23:39:21.447+0000 I INDEX [conn33] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.448+0000 m31100| 2015-07-19T23:39:21.447+0000 I INDEX [conn33] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.448+0000 m31202| 2015-07-19T23:39:21.447+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.448+0000 m31202| 2015-07-19T23:39:21.447+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.448+0000 m31200| 2015-07-19T23:39:21.447+0000 I INDEX [conn90] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.449+0000 m31201| 2015-07-19T23:39:21.448+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.449+0000 m31201| 2015-07-19T23:39:21.449+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.451+0000 m31202| 2015-07-19T23:39:21.450+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.452+0000 m31202| 2015-07-19T23:39:21.450+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.452+0000 m31101| 2015-07-19T23:39:21.451+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.452+0000 m31101| 2015-07-19T23:39:21.451+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.453+0000 m31102| 2015-07-19T23:39:21.451+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.454+0000 m31102| 2015-07-19T23:39:21.451+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.454+0000 m31201| 2015-07-19T23:39:21.451+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.454+0000 m31201| 2015-07-19T23:39:21.451+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.454+0000 m31200| 2015-07-19T23:39:21.452+0000 I INDEX [conn93] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.454+0000 m31200| 2015-07-19T23:39:21.452+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.455+0000 m31100| 2015-07-19T23:39:21.452+0000 I INDEX [conn33] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.455+0000 m31102| 2015-07-19T23:39:21.452+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.456+0000 m31102| 2015-07-19T23:39:21.453+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.456+0000 m31202| 2015-07-19T23:39:21.453+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.456+0000 m31202| 2015-07-19T23:39:21.453+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.457+0000 m31201| 2015-07-19T23:39:21.454+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.457+0000 m31200| 2015-07-19T23:39:21.454+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.458+0000 m31101| 2015-07-19T23:39:21.455+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.459+0000 m31100| 2015-07-19T23:39:21.455+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.459+0000 m31100| 2015-07-19T23:39:21.455+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.459+0000 m31101| 2015-07-19T23:39:21.455+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.459+0000 m31202| 2015-07-19T23:39:21.455+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.460+0000 m31100| 2015-07-19T23:39:21.456+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.460+0000 m31102| 2015-07-19T23:39:21.457+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.460+0000 m31102| 2015-07-19T23:39:21.457+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.460+0000 m31101| 2015-07-19T23:39:21.458+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.461+0000 m31101| 2015-07-19T23:39:21.458+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.465+0000 m31201| 2015-07-19T23:39:21.458+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.466+0000 m31201| 2015-07-19T23:39:21.458+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.466+0000 m31202| 2015-07-19T23:39:21.458+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.466+0000 m31202| 2015-07-19T23:39:21.458+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.467+0000 m31102| 2015-07-19T23:39:21.458+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.467+0000 m31100| 2015-07-19T23:39:21.458+0000 I INDEX [conn37] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.468+0000 m31100| 2015-07-19T23:39:21.458+0000 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.468+0000 m31201| 2015-07-19T23:39:21.459+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.468+0000 m31202| 2015-07-19T23:39:21.459+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.469+0000 m31101| 2015-07-19T23:39:21.460+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.469+0000 m31100| 2015-07-19T23:39:21.461+0000 I INDEX [conn37] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.470+0000 m31102| 2015-07-19T23:39:21.461+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.470+0000 m31102| 2015-07-19T23:39:21.461+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.470+0000 m31102| 2015-07-19T23:39:21.462+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.471+0000 m31100| 2015-07-19T23:39:21.463+0000 I INDEX [conn49] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.471+0000 m31100| 2015-07-19T23:39:21.463+0000 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.472+0000 m31101| 2015-07-19T23:39:21.463+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.473+0000 m31101| 2015-07-19T23:39:21.463+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.475+0000 m31101| 2015-07-19T23:39:21.464+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.475+0000 m31100| 2015-07-19T23:39:21.464+0000 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.475+0000 m31200| 2015-07-19T23:39:21.465+0000 I INDEX [conn103] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.475+0000 m31200| 2015-07-19T23:39:21.465+0000 I INDEX [conn103] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.476+0000 m31102| 2015-07-19T23:39:21.466+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.479+0000 m31102| 2015-07-19T23:39:21.466+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.480+0000 m31101| 2015-07-19T23:39:21.466+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.480+0000 m31101| 2015-07-19T23:39:21.466+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.481+0000 m31100| 2015-07-19T23:39:21.467+0000 I INDEX [conn58] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.481+0000 m31100| 2015-07-19T23:39:21.467+0000 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.481+0000 m31101| 2015-07-19T23:39:21.467+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.487+0000 m31102| 2015-07-19T23:39:21.468+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.487+0000 m31100| 2015-07-19T23:39:21.469+0000 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.491+0000 m31100| 2015-07-19T23:39:21.469+0000 I COMMAND [conn40] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.491+0000 m31101| 2015-07-19T23:39:21.470+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.491+0000 m31101| 2015-07-19T23:39:21.470+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.494+0000 m31102| 2015-07-19T23:39:21.471+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.501+0000 m31102| 2015-07-19T23:39:21.471+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.501+0000 m31101| 2015-07-19T23:39:21.471+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.503+0000 m31100| 2015-07-19T23:39:21.472+0000 I INDEX [conn55] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.503+0000 m31100| 2015-07-19T23:39:21.472+0000 I INDEX [conn55] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.503+0000 m31102| 2015-07-19T23:39:21.472+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.505+0000 m31200| 2015-07-19T23:39:21.472+0000 I INDEX [conn103] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.505+0000 m31100| 2015-07-19T23:39:21.473+0000 I INDEX [conn55] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.505+0000 m31200| 2015-07-19T23:39:21.474+0000 I INDEX [conn102] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.507+0000 m31200| 2015-07-19T23:39:21.474+0000 I INDEX [conn102] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.509+0000 m31101| 2015-07-19T23:39:21.474+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.509+0000 m31101| 2015-07-19T23:39:21.474+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.509+0000 m31202| 2015-07-19T23:39:21.476+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.509+0000 m31202| 2015-07-19T23:39:21.476+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.510+0000 m31100| 2015-07-19T23:39:21.477+0000 I INDEX [conn54] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.510+0000 m31102| 2015-07-19T23:39:21.477+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.511+0000 m31100| 2015-07-19T23:39:21.477+0000 I INDEX [conn54] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.511+0000 m31102| 2015-07-19T23:39:21.477+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.512+0000 m31202| 2015-07-19T23:39:21.478+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.512+0000 m31200| 2015-07-19T23:39:21.478+0000 I INDEX [conn102] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.512+0000 m31200| 2015-07-19T23:39:21.478+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.513+0000 m31201| 2015-07-19T23:39:21.479+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.513+0000 m31201| 2015-07-19T23:39:21.479+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.513+0000 m31102| 2015-07-19T23:39:21.479+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.513+0000 m31102| 2015-07-19T23:39:21.479+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.513+0000 m31101| 2015-07-19T23:39:21.480+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.513+0000 m31200| 2015-07-19T23:39:21.481+0000 I INDEX [conn104] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.514+0000 m31200| 2015-07-19T23:39:21.481+0000 I INDEX [conn104] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.514+0000 m31101| 2015-07-19T23:39:21.481+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.514+0000 m31202| 2015-07-19T23:39:21.481+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.514+0000 m31202| 2015-07-19T23:39:21.481+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.514+0000 m31100| 2015-07-19T23:39:21.481+0000 I INDEX [conn54] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.514+0000 m31201| 2015-07-19T23:39:21.481+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.515+0000 m31200| 2015-07-19T23:39:21.482+0000 I INDEX [conn104] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.515+0000 m31202| 2015-07-19T23:39:21.482+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.515+0000 m31202| 2015-07-19T23:39:21.483+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.515+0000 m31102| 2015-07-19T23:39:21.483+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.515+0000 m31102| 2015-07-19T23:39:21.483+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.515+0000 m31201| 2015-07-19T23:39:21.484+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.516+0000 m31201| 2015-07-19T23:39:21.484+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.516+0000 m31100| 2015-07-19T23:39:21.484+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.516+0000 m31100| 2015-07-19T23:39:21.484+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.516+0000 m31200| 2015-07-19T23:39:21.485+0000 I INDEX [conn99] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.516+0000 m31200| 2015-07-19T23:39:21.485+0000 I INDEX [conn99] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.516+0000 m31202| 2015-07-19T23:39:21.486+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.517+0000 m31202| 2015-07-19T23:39:21.486+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.517+0000 m31102| 2015-07-19T23:39:21.487+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.517+0000 m31101| 2015-07-19T23:39:21.487+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.517+0000 m31101| 2015-07-19T23:39:21.487+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.517+0000 m31200| 2015-07-19T23:39:21.488+0000 I INDEX [conn99] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.518+0000 m31201| 2015-07-19T23:39:21.488+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.518+0000 m31202| 2015-07-19T23:39:21.489+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.518+0000 m31201| 2015-07-19T23:39:21.489+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.518+0000 m31102| 2015-07-19T23:39:21.489+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.518+0000 m31102| 2015-07-19T23:39:21.489+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.518+0000 m31100| 2015-07-19T23:39:21.490+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.518+0000 m31100| 2015-07-19T23:39:21.490+0000 I COMMAND [conn40] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.519+0000 m31101| 2015-07-19T23:39:21.491+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.519+0000 m31102| 2015-07-19T23:39:21.491+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.519+0000 m31200| 2015-07-19T23:39:21.491+0000 I INDEX [conn22] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.519+0000 m31200| 2015-07-19T23:39:21.491+0000 I INDEX [conn22] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.519+0000 m31202| 2015-07-19T23:39:21.491+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.519+0000 m31202| 2015-07-19T23:39:21.492+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.520+0000 m31200| 2015-07-19T23:39:21.492+0000 I INDEX [conn22] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.520+0000 m31102| 2015-07-19T23:39:21.493+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.520+0000 m31102| 2015-07-19T23:39:21.493+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.520+0000 m31202| 2015-07-19T23:39:21.494+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.520+0000 m31201| 2015-07-19T23:39:21.495+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.521+0000 m31201| 2015-07-19T23:39:21.495+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.521+0000 m31200| 2015-07-19T23:39:21.495+0000 I INDEX [conn94] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.521+0000 m31200| 2015-07-19T23:39:21.495+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.521+0000 m31101| 2015-07-19T23:39:21.495+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.521+0000 m31101| 2015-07-19T23:39:21.495+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.521+0000 m31102| 2015-07-19T23:39:21.496+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.522+0000 m31200| 2015-07-19T23:39:21.496+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.522+0000 m31201| 2015-07-19T23:39:21.496+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.522+0000 m31200| 2015-07-19T23:39:21.497+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.522+0000 m31101| 2015-07-19T23:39:21.497+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.522+0000 m31202| 2015-07-19T23:39:21.498+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.522+0000 m31202| 2015-07-19T23:39:21.498+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.522+0000 m31100| 2015-07-19T23:39:21.498+0000 I COMMAND [conn40] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.523+0000 m31200| 2015-07-19T23:39:21.498+0000 I COMMAND [conn97] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.523+0000 m31200| 2015-07-19T23:39:21.499+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.523+0000 m31102| 2015-07-19T23:39:21.499+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.523+0000 m31200| 2015-07-19T23:39:21.499+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.523+0000 m31100| 2015-07-19T23:39:21.500+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.523+0000 m31200| 2015-07-19T23:39:21.500+0000 I COMMAND [conn86] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.523+0000 m31200| 2015-07-19T23:39:21.500+0000 I COMMAND [conn89] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.523+0000 m31100| 2015-07-19T23:39:21.501+0000 I COMMAND [conn114] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.524+0000 m31102| 2015-07-19T23:39:21.501+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.524+0000 m31100| 2015-07-19T23:39:21.501+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.524+0000 m31201| 2015-07-19T23:39:21.502+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.524+0000 m31201| 2015-07-19T23:39:21.502+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.524+0000 m31100| 2015-07-19T23:39:21.502+0000 I COMMAND [conn112] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.524+0000 m31101| 2015-07-19T23:39:21.502+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.524+0000 m31101| 2015-07-19T23:39:21.502+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.524+0000 m31100| 2015-07-19T23:39:21.502+0000 I COMMAND [conn110] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.524+0000 m31202| 2015-07-19T23:39:21.502+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.524+0000 m31200| 2015-07-19T23:39:21.503+0000 I COMMAND [conn88] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.524+0000 m31102| 2015-07-19T23:39:21.504+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.525+0000 m31200| 2015-07-19T23:39:21.504+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.525+0000 m31100| 2015-07-19T23:39:21.504+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.525+0000 m31101| 2015-07-19T23:39:21.504+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.525+0000 m31102| 2015-07-19T23:39:21.505+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.525+0000 m31101| 2015-07-19T23:39:21.505+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.525+0000 m31201| 2015-07-19T23:39:21.505+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.525+0000 m31102| 2015-07-19T23:39:21.506+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.525+0000 m31102| 2015-07-19T23:39:21.506+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.525+0000 m31101| 2015-07-19T23:39:21.507+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.525+0000 m31202| 2015-07-19T23:39:21.507+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.525+0000 m31202| 2015-07-19T23:39:21.507+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.525+0000 m31102| 2015-07-19T23:39:21.507+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.526+0000 m31101| 2015-07-19T23:39:21.508+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.526+0000 m31102| 2015-07-19T23:39:21.508+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.526+0000 m31201| 2015-07-19T23:39:21.509+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.526+0000 m31201| 2015-07-19T23:39:21.509+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.526+0000 m31202| 2015-07-19T23:39:21.509+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.526+0000 m31101| 2015-07-19T23:39:21.509+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.526+0000 m31202| 2015-07-19T23:39:21.510+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.526+0000 m31101| 2015-07-19T23:39:21.510+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.526+0000 m31201| 2015-07-19T23:39:21.510+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.526+0000 m31101| 2015-07-19T23:39:21.511+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.527+0000 m31202| 2015-07-19T23:39:21.511+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.527+0000 m31202| 2015-07-19T23:39:21.513+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.527+0000 m31101| 2015-07-19T23:39:21.513+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.527+0000 m31101| 2015-07-19T23:39:21.513+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.527+0000 m31201| 2015-07-19T23:39:21.513+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.527+0000 m31201| 2015-07-19T23:39:21.513+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.527+0000 m31202| 2015-07-19T23:39:21.514+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.527+0000 m31202| 2015-07-19T23:39:21.515+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.527+0000 m31201| 2015-07-19T23:39:21.515+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.527+0000 m31202| 2015-07-19T23:39:21.516+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.528+0000 m31201| 2015-07-19T23:39:21.516+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.528+0000 m31201| 2015-07-19T23:39:21.517+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.528+0000 m31202| 2015-07-19T23:39:21.517+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.528+0000 m31202| 2015-07-19T23:39:21.517+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.528+0000 m31201| 2015-07-19T23:39:21.517+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.528+0000 m31201| 2015-07-19T23:39:21.518+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.528+0000 m31201| 2015-07-19T23:39:21.518+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.528+0000 m31201| 2015-07-19T23:39:21.519+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.528+0000 m31201| 2015-07-19T23:39:21.519+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.528+0000 m31201| 2015-07-19T23:39:21.520+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.547+0000 m31100| 2015-07-19T23:39:21.547+0000 I INDEX [conn54] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.548+0000 m31100| 2015-07-19T23:39:21.547+0000 I INDEX [conn54] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.548+0000 m31200| 2015-07-19T23:39:21.547+0000 I INDEX [conn22] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.548+0000 m31200| 2015-07-19T23:39:21.547+0000 I INDEX [conn22] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.550+0000 m31100| 2015-07-19T23:39:21.548+0000 I INDEX [conn54] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.550+0000 m31200| 2015-07-19T23:39:21.548+0000 I INDEX [conn22] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.551+0000 m31200| 2015-07-19T23:39:21.550+0000 I COMMAND [conn88] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.551+0000 m31100| 2015-07-19T23:39:21.550+0000 I COMMAND [conn110] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.551+0000 m31101| 2015-07-19T23:39:21.551+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.551+0000 m31101| 2015-07-19T23:39:21.551+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.551+0000 m31202| 2015-07-19T23:39:21.551+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.552+0000 m31202| 2015-07-19T23:39:21.551+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.552+0000 m31201| 2015-07-19T23:39:21.552+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.552+0000 m31201| 2015-07-19T23:39:21.552+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.553+0000 m31102| 2015-07-19T23:39:21.552+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.553+0000 m31102| 2015-07-19T23:39:21.552+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.553+0000 m31101| 2015-07-19T23:39:21.553+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.554+0000 m31101| 2015-07-19T23:39:21.553+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.554+0000 m31102| 2015-07-19T23:39:21.553+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.554+0000 m31102| 2015-07-19T23:39:21.554+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.555+0000 m31201| 2015-07-19T23:39:21.554+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.555+0000 m31202| 2015-07-19T23:39:21.555+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.555+0000 m31201| 2015-07-19T23:39:21.555+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.556+0000 m31202| 2015-07-19T23:39:21.556+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.581+0000 m31200| 2015-07-19T23:39:21.581+0000 I INDEX [conn94] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.581+0000 m31100| 2015-07-19T23:39:21.581+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.581+0000 m31200| 2015-07-19T23:39:21.581+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.582+0000 m31100| 2015-07-19T23:39:21.581+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.582+0000 m31200| 2015-07-19T23:39:21.582+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.582+0000 m31100| 2015-07-19T23:39:21.582+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.585+0000 m31100| 2015-07-19T23:39:21.583+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.585+0000 m31200| 2015-07-19T23:39:21.584+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.586+0000 m31202| 2015-07-19T23:39:21.584+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.586+0000 m31202| 2015-07-19T23:39:21.584+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.586+0000 m31101| 2015-07-19T23:39:21.585+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.586+0000 m31101| 2015-07-19T23:39:21.586+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.590+0000 m31201| 2015-07-19T23:39:21.589+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.590+0000 m31201| 2015-07-19T23:39:21.589+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.590+0000 m31101| 2015-07-19T23:39:21.589+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.590+0000 m31102| 2015-07-19T23:39:21.590+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.590+0000 m31102| 2015-07-19T23:39:21.590+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.591+0000 m31201| 2015-07-19T23:39:21.590+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.592+0000 m31202| 2015-07-19T23:39:21.590+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.592+0000 m31101| 2015-07-19T23:39:21.590+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.592+0000 m31201| 2015-07-19T23:39:21.591+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.592+0000 m31202| 2015-07-19T23:39:21.591+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.593+0000 m31102| 2015-07-19T23:39:21.592+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.593+0000 m31102| 2015-07-19T23:39:21.592+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.600+0000 m31100| 2015-07-19T23:39:21.599+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.600+0000 m31100| 2015-07-19T23:39:21.599+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.601+0000 m31200| 2015-07-19T23:39:21.599+0000 I INDEX [conn94] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.601+0000 m31200| 2015-07-19T23:39:21.599+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.603+0000 m31200| 2015-07-19T23:39:21.602+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.603+0000 m31100| 2015-07-19T23:39:21.603+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.610+0000 m31200| 2015-07-19T23:39:21.608+0000 I INDEX [conn93] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.610+0000 m31200| 2015-07-19T23:39:21.608+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.610+0000 m31101| 2015-07-19T23:39:21.609+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.610+0000 m31101| 2015-07-19T23:39:21.609+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.611+0000 m31102| 2015-07-19T23:39:21.609+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.611+0000 m31102| 2015-07-19T23:39:21.609+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.611+0000 m31202| 2015-07-19T23:39:21.609+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.613+0000 m31202| 2015-07-19T23:39:21.609+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.613+0000 m31100| 2015-07-19T23:39:21.609+0000 I INDEX [conn49] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.613+0000 m31100| 2015-07-19T23:39:21.609+0000 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.614+0000 m31201| 2015-07-19T23:39:21.609+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.614+0000 m31201| 2015-07-19T23:39:21.609+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.614+0000 m31200| 2015-07-19T23:39:21.611+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.614+0000 m31102| 2015-07-19T23:39:21.612+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.614+0000 m31101| 2015-07-19T23:39:21.612+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.615+0000 m31100| 2015-07-19T23:39:21.612+0000 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.615+0000 m31202| 2015-07-19T23:39:21.612+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.615+0000 m31201| 2015-07-19T23:39:21.613+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.615+0000 m31200| 2015-07-19T23:39:21.614+0000 I INDEX [conn22] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.615+0000 m31200| 2015-07-19T23:39:21.614+0000 I INDEX [conn22] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.616+0000 m31100| 2015-07-19T23:39:21.615+0000 I INDEX [conn54] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.616+0000 m31100| 2015-07-19T23:39:21.615+0000 I INDEX [conn54] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.618+0000 m31200| 2015-07-19T23:39:21.617+0000 I INDEX [conn22] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.618+0000 m31201| 2015-07-19T23:39:21.618+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.619+0000 m31201| 2015-07-19T23:39:21.618+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.619+0000 m31202| 2015-07-19T23:39:21.618+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.619+0000 m31202| 2015-07-19T23:39:21.618+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.619+0000 m31102| 2015-07-19T23:39:21.618+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.619+0000 m31102| 2015-07-19T23:39:21.618+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.620+0000 m31101| 2015-07-19T23:39:21.618+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.620+0000 m31101| 2015-07-19T23:39:21.618+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.620+0000 m31100| 2015-07-19T23:39:21.619+0000 I INDEX [conn54] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.625+0000 m31201| 2015-07-19T23:39:21.622+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.625+0000 m31200| 2015-07-19T23:39:21.623+0000 I INDEX [conn99] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.625+0000 m31200| 2015-07-19T23:39:21.623+0000 I INDEX [conn99] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.625+0000 m31102| 2015-07-19T23:39:21.623+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.626+0000 m31100| 2015-07-19T23:39:21.623+0000 I INDEX [conn55] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.626+0000 m31100| 2015-07-19T23:39:21.623+0000 I INDEX [conn55] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.628+0000 m31101| 2015-07-19T23:39:21.623+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.628+0000 m31202| 2015-07-19T23:39:21.624+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.629+0000 m31201| 2015-07-19T23:39:21.625+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.629+0000 m31201| 2015-07-19T23:39:21.625+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.629+0000 m31100| 2015-07-19T23:39:21.626+0000 I INDEX [conn55] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.632+0000 m31102| 2015-07-19T23:39:21.626+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.632+0000 m31102| 2015-07-19T23:39:21.626+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.632+0000 m31200| 2015-07-19T23:39:21.627+0000 I INDEX [conn99] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.632+0000 m31202| 2015-07-19T23:39:21.627+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.632+0000 m31202| 2015-07-19T23:39:21.627+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.633+0000 m31101| 2015-07-19T23:39:21.628+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.633+0000 m31101| 2015-07-19T23:39:21.628+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.635+0000 m31201| 2015-07-19T23:39:21.628+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.635+0000 m31100| 2015-07-19T23:39:21.629+0000 I INDEX [conn33] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.637+0000 m31100| 2015-07-19T23:39:21.629+0000 I INDEX [conn33] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.637+0000 m31200| 2015-07-19T23:39:21.630+0000 I INDEX [conn90] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.637+0000 m31200| 2015-07-19T23:39:21.630+0000 I INDEX [conn90] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.637+0000 m31101| 2015-07-19T23:39:21.630+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.638+0000 m31102| 2015-07-19T23:39:21.630+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.639+0000 m31202| 2015-07-19T23:39:21.631+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.639+0000 m31100| 2015-07-19T23:39:21.632+0000 I INDEX [conn33] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.639+0000 m31201| 2015-07-19T23:39:21.633+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.639+0000 m31201| 2015-07-19T23:39:21.633+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.640+0000 m31200| 2015-07-19T23:39:21.633+0000 I INDEX [conn90] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.640+0000 m31102| 2015-07-19T23:39:21.634+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.640+0000 m31102| 2015-07-19T23:39:21.634+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.640+0000 m31101| 2015-07-19T23:39:21.633+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.641+0000 m31101| 2015-07-19T23:39:21.633+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.641+0000 m31202| 2015-07-19T23:39:21.634+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.643+0000 m31202| 2015-07-19T23:39:21.634+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.643+0000 m31201| 2015-07-19T23:39:21.635+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.643+0000 m31101| 2015-07-19T23:39:21.636+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.644+0000 m31100| 2015-07-19T23:39:21.636+0000 I INDEX [conn58] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.644+0000 m31100| 2015-07-19T23:39:21.636+0000 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.644+0000 m31202| 2015-07-19T23:39:21.636+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.644+0000 m31200| 2015-07-19T23:39:21.639+0000 I INDEX [conn102] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.648+0000 m31200| 2015-07-19T23:39:21.640+0000 I INDEX [conn102] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.648+0000 m31201| 2015-07-19T23:39:21.640+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.648+0000 m31201| 2015-07-19T23:39:21.640+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.648+0000 m31202| 2015-07-19T23:39:21.641+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.650+0000 m31202| 2015-07-19T23:39:21.641+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.651+0000 m31101| 2015-07-19T23:39:21.641+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.651+0000 m31101| 2015-07-19T23:39:21.641+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.651+0000 m31200| 2015-07-19T23:39:21.642+0000 I INDEX [conn102] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.651+0000 m31102| 2015-07-19T23:39:21.642+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.653+0000 m31201| 2015-07-19T23:39:21.642+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.653+0000 m31101| 2015-07-19T23:39:21.642+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.654+0000 m31100| 2015-07-19T23:39:21.644+0000 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.654+0000 m31202| 2015-07-19T23:39:21.644+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.654+0000 m31102| 2015-07-19T23:39:21.645+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.654+0000 m31102| 2015-07-19T23:39:21.645+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.655+0000 m31200| 2015-07-19T23:39:21.645+0000 I INDEX [conn103] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.655+0000 m31200| 2015-07-19T23:39:21.645+0000 I INDEX [conn103] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.658+0000 m31102| 2015-07-19T23:39:21.646+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.659+0000 m31201| 2015-07-19T23:39:21.646+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.660+0000 m31201| 2015-07-19T23:39:21.646+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.661+0000 m31200| 2015-07-19T23:39:21.647+0000 I INDEX [conn103] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.664+0000 m31202| 2015-07-19T23:39:21.647+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.664+0000 m31202| 2015-07-19T23:39:21.647+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.666+0000 m31100| 2015-07-19T23:39:21.647+0000 I INDEX [conn37] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.669+0000 m31100| 2015-07-19T23:39:21.647+0000 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.669+0000 m31201| 2015-07-19T23:39:21.649+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.670+0000 m31100| 2015-07-19T23:39:21.649+0000 I INDEX [conn37] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.670+0000 m31101| 2015-07-19T23:39:21.650+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.670+0000 m31101| 2015-07-19T23:39:21.650+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.670+0000 m31202| 2015-07-19T23:39:21.650+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.670+0000 m31200| 2015-07-19T23:39:21.650+0000 I INDEX [conn104] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.670+0000 m31200| 2015-07-19T23:39:21.650+0000 I INDEX [conn104] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.671+0000 m31201| 2015-07-19T23:39:21.651+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.671+0000 m31201| 2015-07-19T23:39:21.651+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.673+0000 m31102| 2015-07-19T23:39:21.651+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.674+0000 m31102| 2015-07-19T23:39:21.651+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.674+0000 m31201| 2015-07-19T23:39:21.653+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.674+0000 m31102| 2015-07-19T23:39:21.653+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.674+0000 m31200| 2015-07-19T23:39:21.652+0000 I INDEX [conn104] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.674+0000 m31200| 2015-07-19T23:39:21.652+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.676+0000 m31200| 2015-07-19T23:39:21.653+0000 I COMMAND [conn88] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.676+0000 m31200| 2015-07-19T23:39:21.653+0000 I COMMAND [conn18] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.676+0000 m31200| 2015-07-19T23:39:21.653+0000 I COMMAND [conn97] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.676+0000 m31202| 2015-07-19T23:39:21.654+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.677+0000 m31202| 2015-07-19T23:39:21.654+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.677+0000 m31100| 2015-07-19T23:39:21.654+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.677+0000 m31100| 2015-07-19T23:39:21.654+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.677+0000 m31101| 2015-07-19T23:39:21.656+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.677+0000 m31201| 2015-07-19T23:39:21.656+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.677+0000 m31201| 2015-07-19T23:39:21.656+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.678+0000 m31100| 2015-07-19T23:39:21.657+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.678+0000 m31202| 2015-07-19T23:39:21.657+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.678+0000 m31102| 2015-07-19T23:39:21.657+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.678+0000 m31102| 2015-07-19T23:39:21.657+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.678+0000 m31100| 2015-07-19T23:39:21.657+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.678+0000 m31200| 2015-07-19T23:39:21.657+0000 I INDEX [conn103] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.679+0000 m31200| 2015-07-19T23:39:21.657+0000 I INDEX [conn103] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.679+0000 m31100| 2015-07-19T23:39:21.658+0000 I COMMAND [conn110] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.679+0000 m31100| 2015-07-19T23:39:21.658+0000 I COMMAND [conn40] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.679+0000 m31201| 2015-07-19T23:39:21.658+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.679+0000 m31100| 2015-07-19T23:39:21.659+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.679+0000 m31200| 2015-07-19T23:39:21.659+0000 I INDEX [conn103] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.679+0000 m31200| 2015-07-19T23:39:21.660+0000 I COMMAND [conn86] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.680+0000 m31200| 2015-07-19T23:39:21.660+0000 I COMMAND [conn89] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.680+0000 m31201| 2015-07-19T23:39:21.660+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.680+0000 m31200| 2015-07-19T23:39:21.660+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.680+0000 m31201| 2015-07-19T23:39:21.661+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.680+0000 m30999| 2015-07-19T23:39:21.661+0000 I SHARDING [conn36] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.680+0000 m30999| 2015-07-19T23:39:21.661+0000 I SHARDING [conn36] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.681+0000 m31200| 2015-07-19T23:39:21.661+0000 I NETWORK [conn104] end connection 10.139.123.131:39675 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.681+0000 m31101| 2015-07-19T23:39:21.661+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.681+0000 m31101| 2015-07-19T23:39:21.661+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.681+0000 m31202| 2015-07-19T23:39:21.662+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.683+0000 m31202| 2015-07-19T23:39:21.662+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.683+0000 m31102| 2015-07-19T23:39:21.662+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.684+0000 m31201| 2015-07-19T23:39:21.663+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.684+0000 m31200| 2015-07-19T23:39:21.663+0000 I COMMAND [conn97] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.684+0000 m31201| 2015-07-19T23:39:21.663+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.686+0000 m31100| 2015-07-19T23:39:21.664+0000 I INDEX [conn37] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.686+0000 m31100| 2015-07-19T23:39:21.664+0000 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.686+0000 m31101| 2015-07-19T23:39:21.665+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.687+0000 m31202| 2015-07-19T23:39:21.665+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.687+0000 m31202| 2015-07-19T23:39:21.666+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.690+0000 m31100| 2015-07-19T23:39:21.666+0000 I INDEX [conn37] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.690+0000 m31102| 2015-07-19T23:39:21.666+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.690+0000 m31102| 2015-07-19T23:39:21.666+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.690+0000 m31202| 2015-07-19T23:39:21.666+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.691+0000 m31100| 2015-07-19T23:39:21.666+0000 I COMMAND [conn112] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.691+0000 m31201| 2015-07-19T23:39:21.667+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.691+0000 m31201| 2015-07-19T23:39:21.667+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.691+0000 m31100| 2015-07-19T23:39:21.667+0000 I COMMAND [conn114] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.691+0000 m31202| 2015-07-19T23:39:21.667+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.691+0000 m31100| 2015-07-19T23:39:21.668+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.692+0000 m31200| 2015-07-19T23:39:21.668+0000 I COMMAND [conn86] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.692+0000 m31100| 2015-07-19T23:39:21.668+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.692+0000 m31202| 2015-07-19T23:39:21.668+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.692+0000 m31100| 2015-07-19T23:39:21.669+0000 I COMMAND [conn112] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.692+0000 m31102| 2015-07-19T23:39:21.670+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.692+0000 m31201| 2015-07-19T23:39:21.670+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.693+0000 m31202| 2015-07-19T23:39:21.671+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.693+0000 m31202| 2015-07-19T23:39:21.671+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.693+0000 m31101| 2015-07-19T23:39:21.671+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.693+0000 m31101| 2015-07-19T23:39:21.671+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.693+0000 m31102| 2015-07-19T23:39:21.671+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.693+0000 m31201| 2015-07-19T23:39:21.671+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.693+0000 m31102| 2015-07-19T23:39:21.672+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.694+0000 m31202| 2015-07-19T23:39:21.672+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.694+0000 m31101| 2015-07-19T23:39:21.672+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.694+0000 m31101| 2015-07-19T23:39:21.673+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.694+0000 m31102| 2015-07-19T23:39:21.673+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.694+0000 m31201| 2015-07-19T23:39:21.673+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.694+0000 m31202| 2015-07-19T23:39:21.673+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.695+0000 m31101| 2015-07-19T23:39:21.674+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.695+0000 m31102| 2015-07-19T23:39:21.674+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.695+0000 m31202| 2015-07-19T23:39:21.674+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.696+0000 m31101| 2015-07-19T23:39:21.675+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.696+0000 m31101| 2015-07-19T23:39:21.675+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.696+0000 m31201| 2015-07-19T23:39:21.676+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.696+0000 m31201| 2015-07-19T23:39:21.676+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.696+0000 m31202| 2015-07-19T23:39:21.677+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.696+0000 m31102| 2015-07-19T23:39:21.678+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.696+0000 m31102| 2015-07-19T23:39:21.678+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.697+0000 m31101| 2015-07-19T23:39:21.678+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.697+0000 m31101| 2015-07-19T23:39:21.678+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.697+0000 m31201| 2015-07-19T23:39:21.679+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.697+0000 m31202| 2015-07-19T23:39:21.679+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.697+0000 m31202| 2015-07-19T23:39:21.680+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.697+0000 m31101| 2015-07-19T23:39:21.680+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.697+0000 m31102| 2015-07-19T23:39:21.680+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.697+0000 m31101| 2015-07-19T23:39:21.681+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.697+0000 m31102| 2015-07-19T23:39:21.681+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.697+0000 m31101| 2015-07-19T23:39:21.681+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.698+0000 m31102| 2015-07-19T23:39:21.682+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.698+0000 m31101| 2015-07-19T23:39:21.682+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.698+0000 m31102| 2015-07-19T23:39:21.683+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.698+0000 m31101| 2015-07-19T23:39:21.683+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.698+0000 m31102| 2015-07-19T23:39:21.683+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.698+0000 m31101| 2015-07-19T23:39:21.684+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.698+0000 m31102| 2015-07-19T23:39:21.684+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.698+0000 m31100| 2015-07-19T23:39:21.687+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.698+0000 m31100| 2015-07-19T23:39:21.687+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.698+0000 m31200| 2015-07-19T23:39:21.687+0000 I INDEX [conn90] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.698+0000 m31200| 2015-07-19T23:39:21.687+0000 I INDEX [conn90] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.699+0000 m31100| 2015-07-19T23:39:21.688+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.699+0000 m31200| 2015-07-19T23:39:21.688+0000 I INDEX [conn90] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.699+0000 m31100| 2015-07-19T23:39:21.689+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.699+0000 m31200| 2015-07-19T23:39:21.689+0000 I COMMAND [conn97] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.699+0000 m31201| 2015-07-19T23:39:21.691+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.699+0000 m31201| 2015-07-19T23:39:21.691+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.699+0000 m31202| 2015-07-19T23:39:21.691+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.699+0000 m31202| 2015-07-19T23:39:21.691+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.699+0000 m31102| 2015-07-19T23:39:21.692+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.699+0000 m31102| 2015-07-19T23:39:21.692+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.700+0000 m31101| 2015-07-19T23:39:21.691+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.700+0000 m31101| 2015-07-19T23:39:21.691+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.700+0000 m31201| 2015-07-19T23:39:21.692+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.700+0000 m31202| 2015-07-19T23:39:21.692+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.700+0000 m31201| 2015-07-19T23:39:21.693+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.700+0000 m31202| 2015-07-19T23:39:21.693+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.700+0000 m31102| 2015-07-19T23:39:21.694+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.700+0000 m31101| 2015-07-19T23:39:21.694+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.700+0000 m31102| 2015-07-19T23:39:21.695+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.700+0000 m31101| 2015-07-19T23:39:21.695+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.761+0000 m31100| 2015-07-19T23:39:21.760+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.761+0000 m31100| 2015-07-19T23:39:21.760+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.761+0000 m31200| 2015-07-19T23:39:21.760+0000 I INDEX [conn90] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.761+0000 m31200| 2015-07-19T23:39:21.760+0000 I INDEX [conn90] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.762+0000 m31200| 2015-07-19T23:39:21.761+0000 I INDEX [conn90] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.764+0000 m31100| 2015-07-19T23:39:21.763+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.764+0000 m31200| 2015-07-19T23:39:21.763+0000 I INDEX [conn103] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.765+0000 m31200| 2015-07-19T23:39:21.763+0000 I INDEX [conn103] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.765+0000 m31202| 2015-07-19T23:39:21.764+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.765+0000 m31202| 2015-07-19T23:39:21.764+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.765+0000 m31200| 2015-07-19T23:39:21.764+0000 I INDEX [conn103] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.766+0000 m31201| 2015-07-19T23:39:21.765+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.766+0000 m31201| 2015-07-19T23:39:21.765+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.767+0000 m31202| 2015-07-19T23:39:21.765+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.767+0000 m31100| 2015-07-19T23:39:21.766+0000 I INDEX [conn37] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.769+0000 m31100| 2015-07-19T23:39:21.766+0000 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.771+0000 m31200| 2015-07-19T23:39:21.766+0000 I INDEX [conn94] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.771+0000 m31200| 2015-07-19T23:39:21.766+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.771+0000 m31100| 2015-07-19T23:39:21.767+0000 I INDEX [conn37] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.772+0000 m31202| 2015-07-19T23:39:21.767+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.772+0000 m31202| 2015-07-19T23:39:21.767+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.774+0000 m31200| 2015-07-19T23:39:21.768+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.776+0000 m31101| 2015-07-19T23:39:21.768+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.776+0000 m31101| 2015-07-19T23:39:21.768+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.776+0000 m31201| 2015-07-19T23:39:21.769+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.777+0000 m31202| 2015-07-19T23:39:21.769+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.777+0000 m31200| 2015-07-19T23:39:21.770+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39676 #105 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.777+0000 m31102| 2015-07-19T23:39:21.770+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.778+0000 m31102| 2015-07-19T23:39:21.770+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.779+0000 m31101| 2015-07-19T23:39:21.770+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.779+0000 m31200| 2015-07-19T23:39:21.771+0000 I INDEX [conn93] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.781+0000 m31200| 2015-07-19T23:39:21.771+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.789+0000 m31200| 2015-07-19T23:39:21.771+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39677 #106 (40 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.791+0000 m31201| 2015-07-19T23:39:21.772+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.791+0000 m31201| 2015-07-19T23:39:21.772+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.791+0000 m31100| 2015-07-19T23:39:21.772+0000 I INDEX [conn33] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.791+0000 m31100| 2015-07-19T23:39:21.772+0000 I INDEX [conn33] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.792+0000 m31202| 2015-07-19T23:39:21.772+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.794+0000 m31202| 2015-07-19T23:39:21.772+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.794+0000 m31200| 2015-07-19T23:39:21.772+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.794+0000 m31201| 2015-07-19T23:39:21.774+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.794+0000 m31102| 2015-07-19T23:39:21.774+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.798+0000 m31100| 2015-07-19T23:39:21.774+0000 I INDEX [conn33] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.799+0000 m31202| 2015-07-19T23:39:21.775+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.799+0000 m31200| 2015-07-19T23:39:21.775+0000 I INDEX [conn99] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.799+0000 m31200| 2015-07-19T23:39:21.775+0000 I INDEX [conn99] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.800+0000 m31200| 2015-07-19T23:39:21.777+0000 I INDEX [conn99] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.800+0000 m31201| 2015-07-19T23:39:21.778+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.801+0000 m31201| 2015-07-19T23:39:21.778+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.801+0000 m31100| 2015-07-19T23:39:21.778+0000 I INDEX [conn49] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.801+0000 m31100| 2015-07-19T23:39:21.778+0000 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.801+0000 m31202| 2015-07-19T23:39:21.778+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.807+0000 m31202| 2015-07-19T23:39:21.778+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.808+0000 m31102| 2015-07-19T23:39:21.778+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.810+0000 m31102| 2015-07-19T23:39:21.778+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.810+0000 m31101| 2015-07-19T23:39:21.778+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.810+0000 m31101| 2015-07-19T23:39:21.778+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.810+0000 m31201| 2015-07-19T23:39:21.779+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.811+0000 m31100| 2015-07-19T23:39:21.780+0000 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.811+0000 m31200| 2015-07-19T23:39:21.779+0000 I INDEX [conn22] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.812+0000 m31200| 2015-07-19T23:39:21.779+0000 I INDEX [conn22] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.812+0000 m31202| 2015-07-19T23:39:21.780+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.812+0000 m31102| 2015-07-19T23:39:21.781+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.812+0000 m31101| 2015-07-19T23:39:21.781+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.812+0000 m31200| 2015-07-19T23:39:21.782+0000 I INDEX [conn22] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.813+0000 m31100| 2015-07-19T23:39:21.782+0000 I INDEX [conn58] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.813+0000 m31100| 2015-07-19T23:39:21.782+0000 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.814+0000 m31201| 2015-07-19T23:39:21.782+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.814+0000 m31201| 2015-07-19T23:39:21.782+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.814+0000 m31202| 2015-07-19T23:39:21.783+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.814+0000 m31202| 2015-07-19T23:39:21.783+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.815+0000 m31200| 2015-07-19T23:39:21.783+0000 I INDEX [conn103] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.815+0000 m31200| 2015-07-19T23:39:21.783+0000 I INDEX [conn103] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.816+0000 m31201| 2015-07-19T23:39:21.784+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.816+0000 m31102| 2015-07-19T23:39:21.784+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.816+0000 m31102| 2015-07-19T23:39:21.784+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.816+0000 m31100| 2015-07-19T23:39:21.784+0000 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.817+0000 m31202| 2015-07-19T23:39:21.785+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.817+0000 m31101| 2015-07-19T23:39:21.785+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.817+0000 m31101| 2015-07-19T23:39:21.785+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.817+0000 m31200| 2015-07-19T23:39:21.785+0000 I INDEX [conn103] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.817+0000 m31200| 2015-07-19T23:39:21.785+0000 I COMMAND [conn97] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.817+0000 m31200| 2015-07-19T23:39:21.786+0000 I COMMAND [conn86] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.818+0000 m31100| 2015-07-19T23:39:21.786+0000 I INDEX [conn55] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.818+0000 m31100| 2015-07-19T23:39:21.786+0000 I INDEX [conn55] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.818+0000 m31201| 2015-07-19T23:39:21.787+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.818+0000 m31201| 2015-07-19T23:39:21.787+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.818+0000 m31100| 2015-07-19T23:39:21.788+0000 I INDEX [conn55] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.818+0000 m31202| 2015-07-19T23:39:21.788+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.819+0000 m31202| 2015-07-19T23:39:21.788+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.819+0000 m31201| 2015-07-19T23:39:21.789+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.820+0000 m31100| 2015-07-19T23:39:21.789+0000 I INDEX [conn37] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.829+0000 m31100| 2015-07-19T23:39:21.789+0000 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.831+0000 m31101| 2015-07-19T23:39:21.789+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.832+0000 m31102| 2015-07-19T23:39:21.790+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.832+0000 m31202| 2015-07-19T23:39:21.790+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.832+0000 m31201| 2015-07-19T23:39:21.791+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.832+0000 m31201| 2015-07-19T23:39:21.791+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.832+0000 m31100| 2015-07-19T23:39:21.791+0000 I INDEX [conn37] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.832+0000 m31200| 2015-07-19T23:39:21.791+0000 I INDEX [conn105] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.833+0000 m31200| 2015-07-19T23:39:21.791+0000 I INDEX [conn105] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.833+0000 m31202| 2015-07-19T23:39:21.793+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.836+0000 m31202| 2015-07-19T23:39:21.793+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.836+0000 m31101| 2015-07-19T23:39:21.793+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.837+0000 m31101| 2015-07-19T23:39:21.793+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.837+0000 m31201| 2015-07-19T23:39:21.793+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.837+0000 m31102| 2015-07-19T23:39:21.793+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.837+0000 m31102| 2015-07-19T23:39:21.793+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.837+0000 m31100| 2015-07-19T23:39:21.794+0000 I INDEX [conn54] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.837+0000 m31100| 2015-07-19T23:39:21.794+0000 I INDEX [conn54] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.838+0000 m31202| 2015-07-19T23:39:21.795+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.838+0000 m31101| 2015-07-19T23:39:21.795+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.838+0000 m31102| 2015-07-19T23:39:21.795+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.838+0000 m31202| 2015-07-19T23:39:21.795+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.838+0000 m31100| 2015-07-19T23:39:21.795+0000 I INDEX [conn54] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.838+0000 m31201| 2015-07-19T23:39:21.796+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.839+0000 m31201| 2015-07-19T23:39:21.796+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.839+0000 m31200| 2015-07-19T23:39:21.796+0000 I INDEX [conn105] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.839+0000 m31102| 2015-07-19T23:39:21.797+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.839+0000 m31102| 2015-07-19T23:39:21.797+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.839+0000 m31101| 2015-07-19T23:39:21.797+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.839+0000 m31101| 2015-07-19T23:39:21.797+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.839+0000 m31202| 2015-07-19T23:39:21.797+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.839+0000 m31201| 2015-07-19T23:39:21.797+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.839+0000 m31100| 2015-07-19T23:39:21.798+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.839+0000 m31100| 2015-07-19T23:39:21.798+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.840+0000 m31201| 2015-07-19T23:39:21.798+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.840+0000 m31102| 2015-07-19T23:39:21.798+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.840+0000 m31201| 2015-07-19T23:39:21.798+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.840+0000 m31101| 2015-07-19T23:39:21.800+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.840+0000 m31100| 2015-07-19T23:39:21.800+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.840+0000 m31100| 2015-07-19T23:39:21.800+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.840+0000 m31102| 2015-07-19T23:39:21.800+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.840+0000 m31102| 2015-07-19T23:39:21.800+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.840+0000 m31102| 2015-07-19T23:39:21.802+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.840+0000 m31100| 2015-07-19T23:39:21.802+0000 I COMMAND [conn112] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.840+0000 m31100| 2015-07-19T23:39:21.803+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.841+0000 m31202| 2015-07-19T23:39:21.803+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.841+0000 m31202| 2015-07-19T23:39:21.803+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.841+0000 m31101| 2015-07-19T23:39:21.803+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.841+0000 m31101| 2015-07-19T23:39:21.803+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.841+0000 m31201| 2015-07-19T23:39:21.804+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.841+0000 m31201| 2015-07-19T23:39:21.804+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.841+0000 m31102| 2015-07-19T23:39:21.804+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.841+0000 m31102| 2015-07-19T23:39:21.804+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.841+0000 m31100| 2015-07-19T23:39:21.804+0000 I COMMAND [conn40] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.841+0000 m31101| 2015-07-19T23:39:21.805+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.842+0000 m31100| 2015-07-19T23:39:21.805+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.842+0000 m31102| 2015-07-19T23:39:21.805+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.918+0000 m31202| 2015-07-19T23:39:21.805+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.918+0000 m31201| 2015-07-19T23:39:21.806+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.918+0000 m31100| 2015-07-19T23:39:21.806+0000 I COMMAND [conn114] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.918+0000 m31102| 2015-07-19T23:39:21.808+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.919+0000 m31102| 2015-07-19T23:39:21.808+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.919+0000 m31101| 2015-07-19T23:39:21.808+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.919+0000 m31101| 2015-07-19T23:39:21.808+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.919+0000 m31100| 2015-07-19T23:39:21.808+0000 I INDEX [conn49] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.919+0000 m31100| 2015-07-19T23:39:21.808+0000 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.919+0000 m31101| 2015-07-19T23:39:21.809+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.919+0000 m31102| 2015-07-19T23:39:21.809+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.919+0000 m31100| 2015-07-19T23:39:21.809+0000 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.919+0000 m31102| 2015-07-19T23:39:21.812+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.919+0000 m31102| 2015-07-19T23:39:21.812+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.920+0000 m31101| 2015-07-19T23:39:21.811+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.920+0000 m31101| 2015-07-19T23:39:21.811+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.920+0000 m31102| 2015-07-19T23:39:21.814+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.920+0000 m31101| 2015-07-19T23:39:21.814+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.920+0000 m31102| 2015-07-19T23:39:21.814+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.921+0000 m31200| 2015-07-19T23:39:21.814+0000 I INDEX [conn106] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.921+0000 m31200| 2015-07-19T23:39:21.815+0000 I INDEX [conn106] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.921+0000 m31102| 2015-07-19T23:39:21.815+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.921+0000 m31102| 2015-07-19T23:39:21.816+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.921+0000 m31101| 2015-07-19T23:39:21.816+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.921+0000 m31101| 2015-07-19T23:39:21.816+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.921+0000 m31102| 2015-07-19T23:39:21.816+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.921+0000 m31200| 2015-07-19T23:39:21.817+0000 I INDEX [conn106] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.921+0000 m31102| 2015-07-19T23:39:21.817+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.921+0000 m31101| 2015-07-19T23:39:21.817+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.922+0000 m31101| 2015-07-19T23:39:21.817+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.922+0000 m31102| 2015-07-19T23:39:21.818+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.922+0000 m31101| 2015-07-19T23:39:21.818+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.922+0000 m31101| 2015-07-19T23:39:21.819+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.922+0000 m31200| 2015-07-19T23:39:21.819+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.922+0000 m31200| 2015-07-19T23:39:21.819+0000 I COMMAND [conn18] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.923+0000 m31200| 2015-07-19T23:39:21.820+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.923+0000 m31102| 2015-07-19T23:39:21.820+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.931+0000 m31102| 2015-07-19T23:39:21.820+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.931+0000 m31200| 2015-07-19T23:39:21.820+0000 I COMMAND [conn89] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.931+0000 m31101| 2015-07-19T23:39:21.821+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.932+0000 m31202| 2015-07-19T23:39:21.822+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.932+0000 m31202| 2015-07-19T23:39:21.822+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.932+0000 m31101| 2015-07-19T23:39:21.822+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.932+0000 m31200| 2015-07-19T23:39:21.823+0000 I INDEX [conn94] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.932+0000 m31200| 2015-07-19T23:39:21.823+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.933+0000 m31102| 2015-07-19T23:39:21.823+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.933+0000 m31201| 2015-07-19T23:39:21.823+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.933+0000 m31201| 2015-07-19T23:39:21.823+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.933+0000 m31101| 2015-07-19T23:39:21.823+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.933+0000 m31200| 2015-07-19T23:39:21.824+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.933+0000 m31202| 2015-07-19T23:39:21.824+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.934+0000 m31201| 2015-07-19T23:39:21.824+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.934+0000 m30998| 2015-07-19T23:39:21.825+0000 I SHARDING [conn35] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.934+0000 m30998| 2015-07-19T23:39:21.825+0000 I SHARDING [conn35] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.934+0000 m30999| 2015-07-19T23:39:21.825+0000 I SHARDING [conn36] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.934+0000 m30999| 2015-07-19T23:39:21.825+0000 I SHARDING [conn36] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.935+0000 m31201| 2015-07-19T23:39:21.825+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.935+0000 m31202| 2015-07-19T23:39:21.825+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.935+0000 m31200| 2015-07-19T23:39:21.825+0000 I NETWORK [conn105] end connection 10.139.123.131:39676 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.938+0000 m31201| 2015-07-19T23:39:21.826+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.938+0000 m31100| 2015-07-19T23:39:21.826+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.938+0000 m31202| 2015-07-19T23:39:21.826+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.939+0000 m31200| 2015-07-19T23:39:21.827+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.939+0000 m31200| 2015-07-19T23:39:21.827+0000 I NETWORK [conn106] end connection 10.139.123.131:39677 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.939+0000 m31200| 2015-07-19T23:39:21.827+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.939+0000 m31201| 2015-07-19T23:39:21.827+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.940+0000 m31101| 2015-07-19T23:39:21.827+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.940+0000 m31101| 2015-07-19T23:39:21.827+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.941+0000 m31200| 2015-07-19T23:39:21.827+0000 I COMMAND [conn89] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.942+0000 m31100| 2015-07-19T23:39:21.828+0000 I COMMAND [conn114] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.942+0000 m31202| 2015-07-19T23:39:21.828+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.942+0000 m31200| 2015-07-19T23:39:21.828+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.942+0000 m31201| 2015-07-19T23:39:21.829+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.942+0000 m31100| 2015-07-19T23:39:21.829+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.942+0000 m31102| 2015-07-19T23:39:21.829+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.942+0000 m31202| 2015-07-19T23:39:21.829+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.943+0000 m31101| 2015-07-19T23:39:21.830+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.943+0000 m31100| 2015-07-19T23:39:21.830+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.943+0000 m31101| 2015-07-19T23:39:21.830+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.943+0000 m31202| 2015-07-19T23:39:21.831+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.943+0000 m31202| 2015-07-19T23:39:21.831+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.944+0000 m31102| 2015-07-19T23:39:21.831+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.944+0000 m31102| 2015-07-19T23:39:21.832+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.944+0000 m31101| 2015-07-19T23:39:21.832+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.944+0000 m31101| 2015-07-19T23:39:21.833+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.944+0000 m31201| 2015-07-19T23:39:21.833+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.944+0000 m31201| 2015-07-19T23:39:21.833+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.944+0000 m31102| 2015-07-19T23:39:21.833+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.944+0000 m31101| 2015-07-19T23:39:21.834+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.944+0000 m31202| 2015-07-19T23:39:21.834+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.945+0000 m31202| 2015-07-19T23:39:21.835+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.945+0000 m31201| 2015-07-19T23:39:21.835+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.945+0000 m31201| 2015-07-19T23:39:21.835+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.945+0000 m31202| 2015-07-19T23:39:21.836+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.945+0000 m31201| 2015-07-19T23:39:21.836+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.945+0000 m31202| 2015-07-19T23:39:21.836+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.945+0000 m31201| 2015-07-19T23:39:21.836+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.945+0000 m31202| 2015-07-19T23:39:21.837+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.945+0000 m31201| 2015-07-19T23:39:21.837+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.945+0000 m31200| 2015-07-19T23:39:21.904+0000 I INDEX [conn94] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.945+0000 m31200| 2015-07-19T23:39:21.904+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.946+0000 m31100| 2015-07-19T23:39:21.905+0000 I INDEX [conn49] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.946+0000 m31100| 2015-07-19T23:39:21.905+0000 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.946+0000 m31200| 2015-07-19T23:39:21.906+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.946+0000 m31100| 2015-07-19T23:39:21.907+0000 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.946+0000 m31202| 2015-07-19T23:39:21.909+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.946+0000 m31202| 2015-07-19T23:39:21.909+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.946+0000 m31200| 2015-07-19T23:39:21.909+0000 I INDEX [conn103] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.946+0000 m31200| 2015-07-19T23:39:21.909+0000 I INDEX [conn103] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.946+0000 m31100| 2015-07-19T23:39:21.909+0000 I INDEX [conn54] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.946+0000 m31100| 2015-07-19T23:39:21.909+0000 I INDEX [conn54] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.947+0000 m31201| 2015-07-19T23:39:21.910+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.947+0000 m31201| 2015-07-19T23:39:21.910+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.947+0000 m31102| 2015-07-19T23:39:21.910+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.947+0000 m31102| 2015-07-19T23:39:21.910+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.947+0000 m31200| 2015-07-19T23:39:21.911+0000 I INDEX [conn103] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.947+0000 m31200| 2015-07-19T23:39:21.911+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.947+0000 m31100| 2015-07-19T23:39:21.912+0000 I INDEX [conn54] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.947+0000 m31202| 2015-07-19T23:39:21.912+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.947+0000 m31100| 2015-07-19T23:39:21.912+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.948+0000 m31201| 2015-07-19T23:39:21.912+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.948+0000 m31102| 2015-07-19T23:39:21.913+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.948+0000 m31101| 2015-07-19T23:39:21.913+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.948+0000 m31101| 2015-07-19T23:39:21.913+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.948+0000 m31100| 2015-07-19T23:39:21.913+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.948+0000 m31200| 2015-07-19T23:39:21.913+0000 I COMMAND [conn89] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.949+0000 m31101| 2015-07-19T23:39:21.915+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.949+0000 m31201| 2015-07-19T23:39:21.916+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.949+0000 m31201| 2015-07-19T23:39:21.916+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.949+0000 m31202| 2015-07-19T23:39:21.915+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.949+0000 m31202| 2015-07-19T23:39:21.915+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.949+0000 m31102| 2015-07-19T23:39:21.918+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.950+0000 m31102| 2015-07-19T23:39:21.918+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.950+0000 m31101| 2015-07-19T23:39:21.923+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.951+0000 m31101| 2015-07-19T23:39:21.923+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.951+0000 m31202| 2015-07-19T23:39:21.923+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.951+0000 m31201| 2015-07-19T23:39:21.923+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.952+0000 m31102| 2015-07-19T23:39:21.923+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.952+0000 m31100| 2015-07-19T23:39:21.924+0000 I INDEX [conn54] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.952+0000 m31102| 2015-07-19T23:39:21.924+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.952+0000 m31100| 2015-07-19T23:39:21.924+0000 I INDEX [conn54] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.953+0000 m31200| 2015-07-19T23:39:21.923+0000 I INDEX [conn103] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.953+0000 m31200| 2015-07-19T23:39:21.924+0000 I INDEX [conn103] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.953+0000 m31202| 2015-07-19T23:39:21.924+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.955+0000 m31201| 2015-07-19T23:39:21.925+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.955+0000 m31102| 2015-07-19T23:39:21.925+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.955+0000 m31200| 2015-07-19T23:39:21.925+0000 I INDEX [conn103] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.955+0000 m31101| 2015-07-19T23:39:21.925+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.955+0000 m31202| 2015-07-19T23:39:21.926+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.955+0000 m31201| 2015-07-19T23:39:21.926+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.956+0000 m31101| 2015-07-19T23:39:21.926+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.956+0000 m31100| 2015-07-19T23:39:21.927+0000 I INDEX [conn54] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.956+0000 m31200| 2015-07-19T23:39:21.927+0000 I INDEX [conn93] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.956+0000 m31200| 2015-07-19T23:39:21.927+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.956+0000 m31200| 2015-07-19T23:39:21.928+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.956+0000 m31101| 2015-07-19T23:39:21.929+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.956+0000 m31201| 2015-07-19T23:39:21.929+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.957+0000 m31201| 2015-07-19T23:39:21.929+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.957+0000 m31100| 2015-07-19T23:39:21.930+0000 I INDEX [conn49] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.957+0000 m31100| 2015-07-19T23:39:21.930+0000 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.957+0000 m31202| 2015-07-19T23:39:21.930+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.957+0000 m31202| 2015-07-19T23:39:21.930+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.957+0000 m31200| 2015-07-19T23:39:21.931+0000 I INDEX [conn94] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.958+0000 m31200| 2015-07-19T23:39:21.931+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.958+0000 m31201| 2015-07-19T23:39:21.935+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.958+0000 m31102| 2015-07-19T23:39:21.935+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.961+0000 m31200| 2015-07-19T23:39:21.935+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.962+0000 m31102| 2015-07-19T23:39:21.935+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.962+0000 m31100| 2015-07-19T23:39:21.936+0000 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.962+0000 m31101| 2015-07-19T23:39:21.936+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.962+0000 m31101| 2015-07-19T23:39:21.936+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.962+0000 m31202| 2015-07-19T23:39:21.936+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.963+0000 m31201| 2015-07-19T23:39:21.937+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.963+0000 m31201| 2015-07-19T23:39:21.937+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.963+0000 m31200| 2015-07-19T23:39:21.938+0000 I INDEX [conn22] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.963+0000 m31200| 2015-07-19T23:39:21.938+0000 I INDEX [conn22] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.963+0000 m31102| 2015-07-19T23:39:21.939+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.963+0000 m31201| 2015-07-19T23:39:21.939+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.964+0000 m31200| 2015-07-19T23:39:21.940+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39679 #107 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.964+0000 m31101| 2015-07-19T23:39:21.941+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.964+0000 m31200| 2015-07-19T23:39:21.941+0000 I INDEX [conn22] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.964+0000 m31100| 2015-07-19T23:39:21.946+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.964+0000 m31100| 2015-07-19T23:39:21.946+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.964+0000 m31102| 2015-07-19T23:39:21.947+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.965+0000 m31102| 2015-07-19T23:39:21.947+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.965+0000 m31202| 2015-07-19T23:39:21.946+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.965+0000 m31202| 2015-07-19T23:39:21.946+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.965+0000 m31201| 2015-07-19T23:39:21.947+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.966+0000 m31201| 2015-07-19T23:39:21.947+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.966+0000 m31101| 2015-07-19T23:39:21.948+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.967+0000 m31101| 2015-07-19T23:39:21.948+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.967+0000 m31200| 2015-07-19T23:39:21.948+0000 I INDEX [conn90] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.967+0000 m31200| 2015-07-19T23:39:21.948+0000 I INDEX [conn90] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.967+0000 m31201| 2015-07-19T23:39:21.949+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.967+0000 m31102| 2015-07-19T23:39:21.949+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.967+0000 m31100| 2015-07-19T23:39:21.950+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.968+0000 m31200| 2015-07-19T23:39:21.949+0000 I INDEX [conn90] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.969+0000 m31202| 2015-07-19T23:39:21.950+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.970+0000 m31101| 2015-07-19T23:39:21.951+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.970+0000 m31200| 2015-07-19T23:39:21.954+0000 I INDEX [conn99] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.970+0000 m31200| 2015-07-19T23:39:21.954+0000 I INDEX [conn99] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.972+0000 m31201| 2015-07-19T23:39:21.954+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.972+0000 m31201| 2015-07-19T23:39:21.954+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.973+0000 m31202| 2015-07-19T23:39:21.954+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.973+0000 m31202| 2015-07-19T23:39:21.954+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.973+0000 m31100| 2015-07-19T23:39:21.954+0000 I INDEX [conn37] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.973+0000 m31100| 2015-07-19T23:39:21.954+0000 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.973+0000 m31101| 2015-07-19T23:39:21.959+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.974+0000 m31101| 2015-07-19T23:39:21.959+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.974+0000 m31202| 2015-07-19T23:39:21.959+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.974+0000 m31200| 2015-07-19T23:39:21.959+0000 I INDEX [conn99] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.974+0000 m31102| 2015-07-19T23:39:21.959+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.974+0000 m31102| 2015-07-19T23:39:21.959+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.975+0000 m31100| 2015-07-19T23:39:21.960+0000 I INDEX [conn37] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.975+0000 m31201| 2015-07-19T23:39:21.960+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.975+0000 m31101| 2015-07-19T23:39:21.961+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.975+0000 m31200| 2015-07-19T23:39:21.961+0000 I INDEX [conn102] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.975+0000 m31200| 2015-07-19T23:39:21.961+0000 I INDEX [conn102] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.975+0000 m31202| 2015-07-19T23:39:21.961+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.976+0000 m31202| 2015-07-19T23:39:21.961+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.976+0000 m31102| 2015-07-19T23:39:21.963+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.976+0000 m31100| 2015-07-19T23:39:21.966+0000 I INDEX [conn33] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.977+0000 m31100| 2015-07-19T23:39:21.966+0000 I INDEX [conn33] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.978+0000 m31202| 2015-07-19T23:39:21.966+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.978+0000 m31101| 2015-07-19T23:39:21.966+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.978+0000 m31101| 2015-07-19T23:39:21.966+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.978+0000 m31102| 2015-07-19T23:39:21.967+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.978+0000 m31102| 2015-07-19T23:39:21.967+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.978+0000 m31201| 2015-07-19T23:39:21.967+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.979+0000 m31201| 2015-07-19T23:39:21.968+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.979+0000 m31200| 2015-07-19T23:39:21.968+0000 I INDEX [conn102] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.979+0000 m31100| 2015-07-19T23:39:21.971+0000 I INDEX [conn33] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.979+0000 m31102| 2015-07-19T23:39:21.972+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.979+0000 m31101| 2015-07-19T23:39:21.972+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.982+0000 m31202| 2015-07-19T23:39:21.972+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.986+0000 m31202| 2015-07-19T23:39:21.972+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.986+0000 m31201| 2015-07-19T23:39:21.972+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.986+0000 m31200| 2015-07-19T23:39:21.973+0000 I INDEX [conn107] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.986+0000 m31200| 2015-07-19T23:39:21.973+0000 I INDEX [conn107] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.987+0000 m31202| 2015-07-19T23:39:21.977+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.987+0000 m31100| 2015-07-19T23:39:21.977+0000 I INDEX [conn54] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.987+0000 m31100| 2015-07-19T23:39:21.977+0000 I INDEX [conn54] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.987+0000 m31102| 2015-07-19T23:39:21.977+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.987+0000 m31102| 2015-07-19T23:39:21.977+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.995+0000 m31101| 2015-07-19T23:39:21.977+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.997+0000 m31101| 2015-07-19T23:39:21.977+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:21.997+0000 m31201| 2015-07-19T23:39:21.978+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.002+0000 m31201| 2015-07-19T23:39:21.978+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.002+0000 m31200| 2015-07-19T23:39:21.979+0000 I INDEX [conn107] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.003+0000 m31200| 2015-07-19T23:39:21.979+0000 I COMMAND [conn18] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.003+0000 m31100| 2015-07-19T23:39:21.980+0000 I INDEX [conn54] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.005+0000 m31102| 2015-07-19T23:39:21.980+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.005+0000 m31202| 2015-07-19T23:39:21.980+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.005+0000 m31202| 2015-07-19T23:39:21.980+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.005+0000 m31200| 2015-07-19T23:39:21.981+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.006+0000 m31200| 2015-07-19T23:39:21.981+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.006+0000 m31101| 2015-07-19T23:39:21.982+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.006+0000 m31200| 2015-07-19T23:39:21.982+0000 I COMMAND [conn89] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.006+0000 m31200| 2015-07-19T23:39:21.982+0000 I COMMAND [conn86] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.006+0000 m31201| 2015-07-19T23:39:21.982+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.006+0000 m31100| 2015-07-19T23:39:21.982+0000 I INDEX [conn55] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.007+0000 m31100| 2015-07-19T23:39:21.982+0000 I INDEX [conn55] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.007+0000 m31202| 2015-07-19T23:39:21.982+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.007+0000 m31100| 2015-07-19T23:39:21.984+0000 I INDEX [conn55] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.007+0000 m31200| 2015-07-19T23:39:21.984+0000 I COMMAND [conn97] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.007+0000 m31201| 2015-07-19T23:39:21.986+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.007+0000 m31201| 2015-07-19T23:39:21.986+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.008+0000 m31202| 2015-07-19T23:39:21.986+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.008+0000 m31202| 2015-07-19T23:39:21.986+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.008+0000 m31100| 2015-07-19T23:39:21.986+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.008+0000 m31100| 2015-07-19T23:39:21.986+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.008+0000 m31200| 2015-07-19T23:39:21.987+0000 I COMMAND [conn88] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.008+0000 m31102| 2015-07-19T23:39:21.987+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.008+0000 m31102| 2015-07-19T23:39:21.987+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.009+0000 m31201| 2015-07-19T23:39:21.988+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.009+0000 m31101| 2015-07-19T23:39:21.988+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.009+0000 m31101| 2015-07-19T23:39:21.988+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.009+0000 m31100| 2015-07-19T23:39:21.988+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.009+0000 m31100| 2015-07-19T23:39:21.989+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.009+0000 m31102| 2015-07-19T23:39:21.989+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.010+0000 m31202| 2015-07-19T23:39:21.989+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.010+0000 m31100| 2015-07-19T23:39:21.989+0000 I COMMAND [conn40] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.010+0000 m30999| 2015-07-19T23:39:21.989+0000 I SHARDING [conn36] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.010+0000 m30999| 2015-07-19T23:39:21.990+0000 I SHARDING [conn36] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.010+0000 m31100| 2015-07-19T23:39:21.990+0000 I COMMAND [conn113] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.010+0000 m31200| 2015-07-19T23:39:21.990+0000 I NETWORK [conn107] end connection 10.139.123.131:39679 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.010+0000 m31201| 2015-07-19T23:39:21.990+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.010+0000 m31201| 2015-07-19T23:39:21.990+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.010+0000 m31100| 2015-07-19T23:39:21.990+0000 I COMMAND [conn114] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.010+0000 m31200| 2015-07-19T23:39:21.991+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.011+0000 m31100| 2015-07-19T23:39:21.991+0000 I COMMAND [conn112] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.011+0000 m31201| 2015-07-19T23:39:21.991+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.011+0000 m31102| 2015-07-19T23:39:21.992+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.011+0000 m31102| 2015-07-19T23:39:21.992+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.011+0000 m31100| 2015-07-19T23:39:21.992+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.011+0000 m31100| 2015-07-19T23:39:21.992+0000 I COMMAND [conn110] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.011+0000 m31201| 2015-07-19T23:39:21.993+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.011+0000 m31202| 2015-07-19T23:39:21.993+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.011+0000 m31202| 2015-07-19T23:39:21.993+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.011+0000 m31100| 2015-07-19T23:39:21.993+0000 I COMMAND [conn40] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.012+0000 m31101| 2015-07-19T23:39:21.993+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.012+0000 m31102| 2015-07-19T23:39:21.994+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.012+0000 m31201| 2015-07-19T23:39:21.994+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.012+0000 m31202| 2015-07-19T23:39:21.994+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.012+0000 m31201| 2015-07-19T23:39:21.994+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.012+0000 m31202| 2015-07-19T23:39:21.995+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.012+0000 m31201| 2015-07-19T23:39:21.995+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.012+0000 m31202| 2015-07-19T23:39:21.995+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.012+0000 m31102| 2015-07-19T23:39:21.996+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.012+0000 m31102| 2015-07-19T23:39:21.996+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.012+0000 m31101| 2015-07-19T23:39:21.996+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.013+0000 m31101| 2015-07-19T23:39:21.996+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.013+0000 m31202| 2015-07-19T23:39:21.996+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.013+0000 m31202| 2015-07-19T23:39:21.997+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.013+0000 m31201| 2015-07-19T23:39:21.997+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.013+0000 m31102| 2015-07-19T23:39:21.997+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.013+0000 m31202| 2015-07-19T23:39:21.998+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.013+0000 m31102| 2015-07-19T23:39:21.998+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.013+0000 m31101| 2015-07-19T23:39:21.998+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.013+0000 m31201| 2015-07-19T23:39:21.998+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.013+0000 m31202| 2015-07-19T23:39:21.999+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.013+0000 m31102| 2015-07-19T23:39:21.999+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.013+0000 m31201| 2015-07-19T23:39:22.000+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.014+0000 m31202| 2015-07-19T23:39:22.000+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.014+0000 m31101| 2015-07-19T23:39:22.001+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.014+0000 m31101| 2015-07-19T23:39:22.001+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.014+0000 m31102| 2015-07-19T23:39:22.001+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.014+0000 m31202| 2015-07-19T23:39:22.001+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.014+0000 m31102| 2015-07-19T23:39:22.002+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.014+0000 m31101| 2015-07-19T23:39:22.003+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.014+0000 m31101| 2015-07-19T23:39:22.003+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.014+0000 m31102| 2015-07-19T23:39:22.003+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.015+0000 m31101| 2015-07-19T23:39:22.004+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.015+0000 m31102| 2015-07-19T23:39:22.004+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.015+0000 m31101| 2015-07-19T23:39:22.004+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.015+0000 m31102| 2015-07-19T23:39:22.005+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.015+0000 m31101| 2015-07-19T23:39:22.005+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.015+0000 m31102| 2015-07-19T23:39:22.006+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.015+0000 m31101| 2015-07-19T23:39:22.006+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.015+0000 m31101| 2015-07-19T23:39:22.007+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.015+0000 m31101| 2015-07-19T23:39:22.007+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.016+0000 m31101| 2015-07-19T23:39:22.008+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.016+0000 m31100| 2015-07-19T23:39:22.015+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.016+0000 m31100| 2015-07-19T23:39:22.015+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.016+0000 m31200| 2015-07-19T23:39:22.015+0000 I INDEX [conn90] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.016+0000 m31200| 2015-07-19T23:39:22.015+0000 I INDEX [conn90] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.017+0000 m31100| 2015-07-19T23:39:22.016+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.018+0000 m31200| 2015-07-19T23:39:22.016+0000 I INDEX [conn90] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.018+0000 m31201| 2015-07-19T23:39:22.017+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.019+0000 m31100| 2015-07-19T23:39:22.019+0000 I INDEX [conn55] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.019+0000 m31100| 2015-07-19T23:39:22.019+0000 I INDEX [conn55] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.019+0000 m31200| 2015-07-19T23:39:22.019+0000 I INDEX [conn102] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.019+0000 m31200| 2015-07-19T23:39:22.019+0000 I INDEX [conn102] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.027+0000 m31202| 2015-07-19T23:39:22.021+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.027+0000 m31202| 2015-07-19T23:39:22.021+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.028+0000 m31100| 2015-07-19T23:39:22.021+0000 I INDEX [conn55] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.029+0000 m31201| 2015-07-19T23:39:22.021+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.029+0000 m31201| 2015-07-19T23:39:22.021+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.029+0000 m31101| 2015-07-19T23:39:22.021+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.029+0000 m31101| 2015-07-19T23:39:22.022+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.030+0000 m31102| 2015-07-19T23:39:22.022+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.033+0000 m31102| 2015-07-19T23:39:22.022+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.033+0000 m31201| 2015-07-19T23:39:22.023+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.035+0000 m31202| 2015-07-19T23:39:22.023+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.036+0000 m31200| 2015-07-19T23:39:22.023+0000 I INDEX [conn102] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.036+0000 m31100| 2015-07-19T23:39:22.024+0000 I COMMAND [conn110] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.036+0000 m31200| 2015-07-19T23:39:22.024+0000 I COMMAND [conn88] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.036+0000 m31100| 2015-07-19T23:39:22.025+0000 I COMMAND [conn40] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.036+0000 m31200| 2015-07-19T23:39:22.025+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.036+0000 m31101| 2015-07-19T23:39:22.026+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.036+0000 m31102| 2015-07-19T23:39:22.028+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.037+0000 m31201| 2015-07-19T23:39:22.028+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.037+0000 m31201| 2015-07-19T23:39:22.028+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.037+0000 m31101| 2015-07-19T23:39:22.030+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.037+0000 m31101| 2015-07-19T23:39:22.030+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.037+0000 m31202| 2015-07-19T23:39:22.030+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.037+0000 m31202| 2015-07-19T23:39:22.030+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.037+0000 m31102| 2015-07-19T23:39:22.031+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.037+0000 m31102| 2015-07-19T23:39:22.031+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.037+0000 m31101| 2015-07-19T23:39:22.032+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.037+0000 m31101| 2015-07-19T23:39:22.032+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.037+0000 m31201| 2015-07-19T23:39:22.032+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.038+0000 m31101| 2015-07-19T23:39:22.033+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.038+0000 m31201| 2015-07-19T23:39:22.033+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.038+0000 m31202| 2015-07-19T23:39:22.033+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.038+0000 m31102| 2015-07-19T23:39:22.034+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.038+0000 m31201| 2015-07-19T23:39:22.034+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.038+0000 m31102| 2015-07-19T23:39:22.034+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.038+0000 m31202| 2015-07-19T23:39:22.034+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.038+0000 m31102| 2015-07-19T23:39:22.035+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.038+0000 m31202| 2015-07-19T23:39:22.035+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.092+0000 m31200| 2015-07-19T23:39:22.092+0000 I INDEX [conn90] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.092+0000 m31200| 2015-07-19T23:39:22.092+0000 I INDEX [conn90] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.093+0000 m31100| 2015-07-19T23:39:22.092+0000 I INDEX [conn23] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.093+0000 m31100| 2015-07-19T23:39:22.092+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.095+0000 m31200| 2015-07-19T23:39:22.093+0000 I INDEX [conn90] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.095+0000 m31100| 2015-07-19T23:39:22.094+0000 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.096+0000 m31200| 2015-07-19T23:39:22.095+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39680 #108 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.098+0000 m31200| 2015-07-19T23:39:22.098+0000 I INDEX [conn93] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.099+0000 m31200| 2015-07-19T23:39:22.098+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.099+0000 m31202| 2015-07-19T23:39:22.098+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.099+0000 m31202| 2015-07-19T23:39:22.098+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.099+0000 m31100| 2015-07-19T23:39:22.098+0000 I INDEX [conn33] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.102+0000 m31100| 2015-07-19T23:39:22.098+0000 I INDEX [conn33] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.102+0000 m31201| 2015-07-19T23:39:22.099+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.104+0000 m31201| 2015-07-19T23:39:22.099+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.104+0000 m31200| 2015-07-19T23:39:22.099+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.104+0000 m31101| 2015-07-19T23:39:22.100+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.105+0000 m31101| 2015-07-19T23:39:22.100+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.106+0000 m31102| 2015-07-19T23:39:22.100+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.106+0000 m31102| 2015-07-19T23:39:22.100+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.106+0000 m31100| 2015-07-19T23:39:22.100+0000 I INDEX [conn33] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.106+0000 m31200| 2015-07-19T23:39:22.101+0000 I INDEX [conn102] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.106+0000 m31200| 2015-07-19T23:39:22.101+0000 I INDEX [conn102] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.106+0000 m31202| 2015-07-19T23:39:22.102+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.107+0000 m31102| 2015-07-19T23:39:22.102+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.107+0000 m31100| 2015-07-19T23:39:22.103+0000 I INDEX [conn55] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.107+0000 m31100| 2015-07-19T23:39:22.103+0000 I INDEX [conn55] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.107+0000 m31201| 2015-07-19T23:39:22.103+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.107+0000 m31200| 2015-07-19T23:39:22.103+0000 I INDEX [conn102] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.107+0000 m31101| 2015-07-19T23:39:22.104+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.108+0000 m31100| 2015-07-19T23:39:22.105+0000 I INDEX [conn55] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.108+0000 m31102| 2015-07-19T23:39:22.105+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.108+0000 m31102| 2015-07-19T23:39:22.105+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.109+0000 m31201| 2015-07-19T23:39:22.106+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.109+0000 m31201| 2015-07-19T23:39:22.106+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.109+0000 m31200| 2015-07-19T23:39:22.106+0000 I INDEX [conn22] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.109+0000 m31200| 2015-07-19T23:39:22.106+0000 I INDEX [conn22] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.111+0000 m31202| 2015-07-19T23:39:22.107+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.111+0000 m31202| 2015-07-19T23:39:22.107+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.111+0000 m31100| 2015-07-19T23:39:22.108+0000 I INDEX [conn54] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.111+0000 m31100| 2015-07-19T23:39:22.108+0000 I INDEX [conn54] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.112+0000 m31101| 2015-07-19T23:39:22.108+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.114+0000 m31101| 2015-07-19T23:39:22.108+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.114+0000 m31200| 2015-07-19T23:39:22.108+0000 I INDEX [conn22] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.114+0000 m31100| 2015-07-19T23:39:22.109+0000 I INDEX [conn54] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.114+0000 m31200| 2015-07-19T23:39:22.110+0000 I INDEX [conn99] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.117+0000 m31200| 2015-07-19T23:39:22.110+0000 I INDEX [conn99] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.117+0000 m31101| 2015-07-19T23:39:22.110+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.118+0000 m31202| 2015-07-19T23:39:22.110+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.120+0000 m31201| 2015-07-19T23:39:22.111+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.120+0000 m31102| 2015-07-19T23:39:22.111+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.120+0000 m31200| 2015-07-19T23:39:22.113+0000 I INDEX [conn99] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.123+0000 m31100| 2015-07-19T23:39:22.113+0000 I INDEX [conn37] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.123+0000 m31100| 2015-07-19T23:39:22.114+0000 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.123+0000 m31101| 2015-07-19T23:39:22.114+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.123+0000 m31101| 2015-07-19T23:39:22.114+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.124+0000 m31201| 2015-07-19T23:39:22.114+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.124+0000 m31201| 2015-07-19T23:39:22.114+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.124+0000 m31202| 2015-07-19T23:39:22.114+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.124+0000 m31202| 2015-07-19T23:39:22.114+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.124+0000 m31102| 2015-07-19T23:39:22.115+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.124+0000 m31102| 2015-07-19T23:39:22.115+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.125+0000 m31200| 2015-07-19T23:39:22.115+0000 I INDEX [conn94] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.125+0000 m31200| 2015-07-19T23:39:22.115+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.125+0000 m31101| 2015-07-19T23:39:22.116+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.125+0000 m31201| 2015-07-19T23:39:22.116+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.128+0000 m31200| 2015-07-19T23:39:22.116+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.129+0000 m31100| 2015-07-19T23:39:22.117+0000 I INDEX [conn37] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.129+0000 m31102| 2015-07-19T23:39:22.117+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.129+0000 m31202| 2015-07-19T23:39:22.118+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.129+0000 m31101| 2015-07-19T23:39:22.118+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.129+0000 m31101| 2015-07-19T23:39:22.118+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.129+0000 m31201| 2015-07-19T23:39:22.119+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.130+0000 m31201| 2015-07-19T23:39:22.119+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.130+0000 m31200| 2015-07-19T23:39:22.119+0000 I INDEX [conn103] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.132+0000 m31200| 2015-07-19T23:39:22.119+0000 I INDEX [conn103] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.132+0000 m31100| 2015-07-19T23:39:22.121+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.132+0000 m31100| 2015-07-19T23:39:22.121+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.132+0000 m31200| 2015-07-19T23:39:22.121+0000 I INDEX [conn103] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.132+0000 m31101| 2015-07-19T23:39:22.121+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.133+0000 m31202| 2015-07-19T23:39:22.122+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.133+0000 m31202| 2015-07-19T23:39:22.122+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.133+0000 m31201| 2015-07-19T23:39:22.121+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.133+0000 m31102| 2015-07-19T23:39:22.122+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.133+0000 m31102| 2015-07-19T23:39:22.122+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.133+0000 m31101| 2015-07-19T23:39:22.124+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.134+0000 m31101| 2015-07-19T23:39:22.124+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.134+0000 m31201| 2015-07-19T23:39:22.125+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.134+0000 m31201| 2015-07-19T23:39:22.125+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.134+0000 m31100| 2015-07-19T23:39:22.126+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.134+0000 m31202| 2015-07-19T23:39:22.126+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.134+0000 m31102| 2015-07-19T23:39:22.126+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.135+0000 m31201| 2015-07-19T23:39:22.127+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.135+0000 m31101| 2015-07-19T23:39:22.128+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.141+0000 m31100| 2015-07-19T23:39:22.128+0000 I INDEX [conn58] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.141+0000 m31100| 2015-07-19T23:39:22.128+0000 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.142+0000 m31102| 2015-07-19T23:39:22.130+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.142+0000 m31102| 2015-07-19T23:39:22.130+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.142+0000 m31101| 2015-07-19T23:39:22.130+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.142+0000 m31101| 2015-07-19T23:39:22.130+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.142+0000 m31100| 2015-07-19T23:39:22.130+0000 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.142+0000 m31202| 2015-07-19T23:39:22.132+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.143+0000 m31202| 2015-07-19T23:39:22.132+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.143+0000 m31102| 2015-07-19T23:39:22.132+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.143+0000 m31201| 2015-07-19T23:39:22.131+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.143+0000 m31201| 2015-07-19T23:39:22.131+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.143+0000 m31100| 2015-07-19T23:39:22.133+0000 I INDEX [conn49] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.143+0000 m31100| 2015-07-19T23:39:22.133+0000 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.144+0000 m31100| 2015-07-19T23:39:22.134+0000 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.144+0000 m31201| 2015-07-19T23:39:22.134+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.144+0000 m31101| 2015-07-19T23:39:22.134+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.144+0000 m31100| 2015-07-19T23:39:22.134+0000 I COMMAND [conn40] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.144+0000 m31202| 2015-07-19T23:39:22.135+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.144+0000 m31100| 2015-07-19T23:39:22.135+0000 I COMMAND [conn110] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.145+0000 m31102| 2015-07-19T23:39:22.135+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.145+0000 m31102| 2015-07-19T23:39:22.135+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.145+0000 m31100| 2015-07-19T23:39:22.135+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.158+0000 m31200| 2015-07-19T23:39:22.136+0000 I INDEX [conn108] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.158+0000 m31200| 2015-07-19T23:39:22.136+0000 I INDEX [conn108] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.158+0000 m31100| 2015-07-19T23:39:22.136+0000 I COMMAND [conn112] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.158+0000 m31201| 2015-07-19T23:39:22.137+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.159+0000 m31201| 2015-07-19T23:39:22.137+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.159+0000 m31102| 2015-07-19T23:39:22.137+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.159+0000 m31200| 2015-07-19T23:39:22.138+0000 I INDEX [conn108] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.161+0000 m31202| 2015-07-19T23:39:22.138+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.161+0000 m31202| 2015-07-19T23:39:22.138+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.161+0000 m31201| 2015-07-19T23:39:22.138+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.161+0000 m31101| 2015-07-19T23:39:22.139+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.161+0000 m31101| 2015-07-19T23:39:22.139+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.161+0000 m31200| 2015-07-19T23:39:22.139+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.162+0000 m31200| 2015-07-19T23:39:22.139+0000 I COMMAND [conn88] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.162+0000 m31200| 2015-07-19T23:39:22.140+0000 I COMMAND [conn97] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.162+0000 m31100| 2015-07-19T23:39:22.140+0000 I INDEX [conn37] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.162+0000 m31100| 2015-07-19T23:39:22.140+0000 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.162+0000 m31200| 2015-07-19T23:39:22.140+0000 I COMMAND [conn86] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.162+0000 m31202| 2015-07-19T23:39:22.141+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.163+0000 m31102| 2015-07-19T23:39:22.142+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.163+0000 m31102| 2015-07-19T23:39:22.142+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.163+0000 m31101| 2015-07-19T23:39:22.142+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.163+0000 m31200| 2015-07-19T23:39:22.142+0000 I INDEX [conn99] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.163+0000 m31200| 2015-07-19T23:39:22.142+0000 I INDEX [conn99] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.163+0000 m31100| 2015-07-19T23:39:22.144+0000 I INDEX [conn37] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.164+0000 m31201| 2015-07-19T23:39:22.144+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.164+0000 m31201| 2015-07-19T23:39:22.144+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.164+0000 m31202| 2015-07-19T23:39:22.145+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.164+0000 m31202| 2015-07-19T23:39:22.145+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.164+0000 m31200| 2015-07-19T23:39:22.145+0000 I INDEX [conn99] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.167+0000 m31101| 2015-07-19T23:39:22.146+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.168+0000 m31101| 2015-07-19T23:39:22.146+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.168+0000 m31100| 2015-07-19T23:39:22.147+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.168+0000 m31100| 2015-07-19T23:39:22.147+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.168+0000 m31102| 2015-07-19T23:39:22.147+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.168+0000 m31201| 2015-07-19T23:39:22.148+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.168+0000 m31202| 2015-07-19T23:39:22.148+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.169+0000 m31200| 2015-07-19T23:39:22.148+0000 I INDEX [conn94] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.171+0000 m31200| 2015-07-19T23:39:22.148+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.171+0000 m31201| 2015-07-19T23:39:22.149+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.172+0000 m31100| 2015-07-19T23:39:22.149+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.172+0000 m31101| 2015-07-19T23:39:22.149+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.173+0000 m31102| 2015-07-19T23:39:22.150+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.173+0000 m31102| 2015-07-19T23:39:22.150+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.173+0000 m31200| 2015-07-19T23:39:22.150+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.173+0000 m31101| 2015-07-19T23:39:22.150+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.173+0000 m31100| 2015-07-19T23:39:22.150+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.173+0000 m31201| 2015-07-19T23:39:22.151+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.173+0000 m31200| 2015-07-19T23:39:22.150+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.174+0000 m31200| 2015-07-19T23:39:22.151+0000 I COMMAND [conn86] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.174+0000 m31200| 2015-07-19T23:39:22.151+0000 I COMMAND [conn88] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.174+0000 m31101| 2015-07-19T23:39:22.151+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.174+0000 m31100| 2015-07-19T23:39:22.152+0000 I COMMAND [conn112] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.174+0000 m31202| 2015-07-19T23:39:22.152+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.174+0000 m31202| 2015-07-19T23:39:22.152+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.174+0000 m31101| 2015-07-19T23:39:22.152+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.175+0000 m31200| 2015-07-19T23:39:22.152+0000 I COMMAND [conn97] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.175+0000 m31201| 2015-07-19T23:39:22.153+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.175+0000 m31100| 2015-07-19T23:39:22.153+0000 I COMMAND [conn110] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.175+0000 m31200| 2015-07-19T23:39:22.153+0000 I COMMAND [conn86] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.175+0000 m31101| 2015-07-19T23:39:22.153+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.175+0000 m31100| 2015-07-19T23:39:22.154+0000 I COMMAND [conn40] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.175+0000 m31200| 2015-07-19T23:39:22.154+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.176+0000 m31202| 2015-07-19T23:39:22.154+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.176+0000 m31201| 2015-07-19T23:39:22.154+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.176+0000 m31100| 2015-07-19T23:39:22.154+0000 I COMMAND [conn114] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.176+0000 m31202| 2015-07-19T23:39:22.155+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.176+0000 m31202| 2015-07-19T23:39:22.156+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.176+0000 m31101| 2015-07-19T23:39:22.156+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.177+0000 m31101| 2015-07-19T23:39:22.156+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.177+0000 m31100| 2015-07-19T23:39:22.156+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.177+0000 m31102| 2015-07-19T23:39:22.156+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.177+0000 m31202| 2015-07-19T23:39:22.157+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.177+0000 m31201| 2015-07-19T23:39:22.157+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.177+0000 m31201| 2015-07-19T23:39:22.157+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.177+0000 m31101| 2015-07-19T23:39:22.157+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.178+0000 m31202| 2015-07-19T23:39:22.158+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.178+0000 m31102| 2015-07-19T23:39:22.159+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.178+0000 m31201| 2015-07-19T23:39:22.159+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.178+0000 m31102| 2015-07-19T23:39:22.160+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.178+0000 m31101| 2015-07-19T23:39:22.160+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.178+0000 m31101| 2015-07-19T23:39:22.160+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.179+0000 m31102| 2015-07-19T23:39:22.160+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.179+0000 m31102| 2015-07-19T23:39:22.161+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.179+0000 m31202| 2015-07-19T23:39:22.162+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.179+0000 m31202| 2015-07-19T23:39:22.162+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.179+0000 m31201| 2015-07-19T23:39:22.162+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.179+0000 m31201| 2015-07-19T23:39:22.162+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.180+0000 m31102| 2015-07-19T23:39:22.164+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.180+0000 m31102| 2015-07-19T23:39:22.164+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.180+0000 m31201| 2015-07-19T23:39:22.164+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.180+0000 m31202| 2015-07-19T23:39:22.164+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.180+0000 m31101| 2015-07-19T23:39:22.164+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.180+0000 m31201| 2015-07-19T23:39:22.165+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.180+0000 m31102| 2015-07-19T23:39:22.165+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.180+0000 m31101| 2015-07-19T23:39:22.166+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.181+0000 m31201| 2015-07-19T23:39:22.166+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.181+0000 m31201| 2015-07-19T23:39:22.167+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.181+0000 m31202| 2015-07-19T23:39:22.167+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.181+0000 m31202| 2015-07-19T23:39:22.167+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.181+0000 m31201| 2015-07-19T23:39:22.168+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.181+0000 m31102| 2015-07-19T23:39:22.168+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.181+0000 m31102| 2015-07-19T23:39:22.168+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.181+0000 m31201| 2015-07-19T23:39:22.168+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.181+0000 m31201| 2015-07-19T23:39:22.169+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.181+0000 m31101| 2015-07-19T23:39:22.169+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.181+0000 m31202| 2015-07-19T23:39:22.169+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.182+0000 m31202| 2015-07-19T23:39:22.170+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.182+0000 m31202| 2015-07-19T23:39:22.170+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.182+0000 m31102| 2015-07-19T23:39:22.170+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.182+0000 m31101| 2015-07-19T23:39:22.171+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.182+0000 m31101| 2015-07-19T23:39:22.171+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.182+0000 m31202| 2015-07-19T23:39:22.171+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.182+0000 m31102| 2015-07-19T23:39:22.171+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.182+0000 m31101| 2015-07-19T23:39:22.172+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.182+0000 m31102| 2015-07-19T23:39:22.172+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.182+0000 m31101| 2015-07-19T23:39:22.173+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.182+0000 m31202| 2015-07-19T23:39:22.173+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.183+0000 m31102| 2015-07-19T23:39:22.173+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.183+0000 m31202| 2015-07-19T23:39:22.174+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.183+0000 m31102| 2015-07-19T23:39:22.174+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.183+0000 m31202| 2015-07-19T23:39:22.175+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.183+0000 m31102| 2015-07-19T23:39:22.175+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.183+0000 m31102| 2015-07-19T23:39:22.176+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.242+0000 m31200| 2015-07-19T23:39:22.242+0000 I INDEX [conn90] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.243+0000 m31200| 2015-07-19T23:39:22.242+0000 I INDEX [conn90] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.243+0000 m31100| 2015-07-19T23:39:22.243+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.244+0000 m31100| 2015-07-19T23:39:22.243+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.244+0000 m31200| 2015-07-19T23:39:22.244+0000 I INDEX [conn90] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.245+0000 m31100| 2015-07-19T23:39:22.244+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.249+0000 m31200| 2015-07-19T23:39:22.246+0000 I INDEX [conn103] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.251+0000 m31200| 2015-07-19T23:39:22.246+0000 I INDEX [conn103] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.251+0000 m31102| 2015-07-19T23:39:22.247+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.251+0000 m31102| 2015-07-19T23:39:22.247+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.252+0000 m31201| 2015-07-19T23:39:22.247+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.252+0000 m31201| 2015-07-19T23:39:22.247+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.254+0000 m31100| 2015-07-19T23:39:22.248+0000 I INDEX [conn37] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.254+0000 m31100| 2015-07-19T23:39:22.248+0000 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.254+0000 m31202| 2015-07-19T23:39:22.248+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.255+0000 m31202| 2015-07-19T23:39:22.248+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.255+0000 m31200| 2015-07-19T23:39:22.248+0000 I INDEX [conn103] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.257+0000 m31102| 2015-07-19T23:39:22.250+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.258+0000 m31100| 2015-07-19T23:39:22.250+0000 I INDEX [conn37] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.259+0000 m31101| 2015-07-19T23:39:22.250+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.259+0000 m31101| 2015-07-19T23:39:22.250+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.259+0000 m31200| 2015-07-19T23:39:22.251+0000 I INDEX [conn22] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.260+0000 m31200| 2015-07-19T23:39:22.251+0000 I INDEX [conn22] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.260+0000 m31200| 2015-07-19T23:39:22.252+0000 I INDEX [conn22] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.260+0000 m31100| 2015-07-19T23:39:22.253+0000 I INDEX [conn49] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.260+0000 m31100| 2015-07-19T23:39:22.253+0000 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.260+0000 m31201| 2015-07-19T23:39:22.253+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.260+0000 m31202| 2015-07-19T23:39:22.253+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.261+0000 m31200| 2015-07-19T23:39:22.254+0000 I INDEX [conn94] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.261+0000 m31200| 2015-07-19T23:39:22.254+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.262+0000 m31102| 2015-07-19T23:39:22.254+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.262+0000 m31102| 2015-07-19T23:39:22.254+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.262+0000 m31101| 2015-07-19T23:39:22.255+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.262+0000 m31100| 2015-07-19T23:39:22.256+0000 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.263+0000 m31200| 2015-07-19T23:39:22.256+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.265+0000 m31102| 2015-07-19T23:39:22.257+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.265+0000 m31100| 2015-07-19T23:39:22.258+0000 I INDEX [conn58] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.265+0000 m31100| 2015-07-19T23:39:22.258+0000 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.265+0000 m31200| 2015-07-19T23:39:22.259+0000 I INDEX [conn99] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.266+0000 m31200| 2015-07-19T23:39:22.259+0000 I INDEX [conn99] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.266+0000 m31201| 2015-07-19T23:39:22.259+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.266+0000 m31201| 2015-07-19T23:39:22.259+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.266+0000 m31101| 2015-07-19T23:39:22.260+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.266+0000 m31101| 2015-07-19T23:39:22.260+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.266+0000 m31200| 2015-07-19T23:39:22.261+0000 I INDEX [conn99] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.267+0000 m31102| 2015-07-19T23:39:22.261+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.267+0000 m31102| 2015-07-19T23:39:22.261+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.268+0000 m31202| 2015-07-19T23:39:22.262+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.268+0000 m31202| 2015-07-19T23:39:22.262+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.268+0000 m31200| 2015-07-19T23:39:22.262+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39681 #109 (40 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.271+0000 m31102| 2015-07-19T23:39:22.263+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.271+0000 m31200| 2015-07-19T23:39:22.264+0000 I INDEX [conn108] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.271+0000 m31200| 2015-07-19T23:39:22.264+0000 I INDEX [conn108] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.271+0000 m31100| 2015-07-19T23:39:22.264+0000 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.271+0000 m31101| 2015-07-19T23:39:22.264+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.271+0000 m31200| 2015-07-19T23:39:22.265+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39682 #110 (41 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.273+0000 m31200| 2015-07-19T23:39:22.267+0000 I INDEX [conn108] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.273+0000 m31101| 2015-07-19T23:39:22.267+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.273+0000 m31101| 2015-07-19T23:39:22.267+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.273+0000 m31100| 2015-07-19T23:39:22.268+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.273+0000 m31100| 2015-07-19T23:39:22.268+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.274+0000 m31202| 2015-07-19T23:39:22.268+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.274+0000 m31201| 2015-07-19T23:39:22.269+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.274+0000 m31200| 2015-07-19T23:39:22.269+0000 I INDEX [conn102] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.274+0000 m31200| 2015-07-19T23:39:22.269+0000 I INDEX [conn102] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.274+0000 m31101| 2015-07-19T23:39:22.272+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.274+0000 m31200| 2015-07-19T23:39:22.272+0000 I INDEX [conn102] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.275+0000 m31202| 2015-07-19T23:39:22.273+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.275+0000 m31202| 2015-07-19T23:39:22.273+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.276+0000 m31102| 2015-07-19T23:39:22.273+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.276+0000 m31102| 2015-07-19T23:39:22.273+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.276+0000 m31201| 2015-07-19T23:39:22.272+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.277+0000 m31201| 2015-07-19T23:39:22.272+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.278+0000 m31100| 2015-07-19T23:39:22.275+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.278+0000 m31202| 2015-07-19T23:39:22.276+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.278+0000 m31201| 2015-07-19T23:39:22.276+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.278+0000 m31200| 2015-07-19T23:39:22.277+0000 I INDEX [conn93] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.280+0000 m31200| 2015-07-19T23:39:22.277+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.281+0000 m31100| 2015-07-19T23:39:22.277+0000 I INDEX [conn37] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.281+0000 m31100| 2015-07-19T23:39:22.277+0000 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.283+0000 m31101| 2015-07-19T23:39:22.278+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.283+0000 m31101| 2015-07-19T23:39:22.278+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.283+0000 m31100| 2015-07-19T23:39:22.279+0000 I INDEX [conn37] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.283+0000 m31200| 2015-07-19T23:39:22.279+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.286+0000 m30999| 2015-07-19T23:39:22.280+0000 I SHARDING [conn36] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.286+0000 m30999| 2015-07-19T23:39:22.280+0000 I SHARDING [conn36] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.286+0000 m31200| 2015-07-19T23:39:22.280+0000 I COMMAND [conn86] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.287+0000 m31200| 2015-07-19T23:39:22.280+0000 I NETWORK [conn108] end connection 10.139.123.131:39680 (40 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.287+0000 m31102| 2015-07-19T23:39:22.280+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.287+0000 m31101| 2015-07-19T23:39:22.281+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.287+0000 m31100| 2015-07-19T23:39:22.282+0000 I INDEX [conn54] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.287+0000 m31100| 2015-07-19T23:39:22.282+0000 I INDEX [conn54] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.288+0000 m31102| 2015-07-19T23:39:22.283+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.288+0000 m31102| 2015-07-19T23:39:22.283+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.288+0000 m30998| 2015-07-19T23:39:22.284+0000 I NETWORK [conn37] end connection 10.139.123.131:35889 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.288+0000 m31100| 2015-07-19T23:39:22.284+0000 I INDEX [conn54] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.289+0000 m31201| 2015-07-19T23:39:22.285+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.289+0000 m31201| 2015-07-19T23:39:22.285+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.289+0000 m31202| 2015-07-19T23:39:22.285+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.289+0000 m31202| 2015-07-19T23:39:22.285+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.289+0000 m30999| 2015-07-19T23:39:22.285+0000 I NETWORK [conn38] end connection 10.139.123.131:57246 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.292+0000 m31101| 2015-07-19T23:39:22.286+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.293+0000 m31101| 2015-07-19T23:39:22.286+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.293+0000 m31102| 2015-07-19T23:39:22.288+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.294+0000 m31201| 2015-07-19T23:39:22.289+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.294+0000 m31202| 2015-07-19T23:39:22.290+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.294+0000 m31102| 2015-07-19T23:39:22.290+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.295+0000 m31102| 2015-07-19T23:39:22.290+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.295+0000 m31100| 2015-07-19T23:39:22.291+0000 I INDEX [conn33] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.295+0000 m31100| 2015-07-19T23:39:22.291+0000 I INDEX [conn33] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.295+0000 m31101| 2015-07-19T23:39:22.291+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.295+0000 m31100| 2015-07-19T23:39:22.293+0000 I INDEX [conn33] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.295+0000 m31102| 2015-07-19T23:39:22.293+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.296+0000 m31200| 2015-07-19T23:39:22.293+0000 I INDEX [conn110] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.296+0000 m31200| 2015-07-19T23:39:22.293+0000 I INDEX [conn110] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.296+0000 m31201| 2015-07-19T23:39:22.293+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.296+0000 m31201| 2015-07-19T23:39:22.293+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.298+0000 m31202| 2015-07-19T23:39:22.298+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.299+0000 m31202| 2015-07-19T23:39:22.298+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.307+0000 m31102| 2015-07-19T23:39:22.300+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.307+0000 m31102| 2015-07-19T23:39:22.300+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.307+0000 m31101| 2015-07-19T23:39:22.301+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.308+0000 m31101| 2015-07-19T23:39:22.301+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.308+0000 m31100| 2015-07-19T23:39:22.301+0000 I INDEX [conn55] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.308+0000 m31100| 2015-07-19T23:39:22.301+0000 I INDEX [conn55] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.308+0000 m31102| 2015-07-19T23:39:22.302+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.308+0000 m31201| 2015-07-19T23:39:22.302+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.309+0000 m31200| 2015-07-19T23:39:22.303+0000 I INDEX [conn110] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.311+0000 m31202| 2015-07-19T23:39:22.303+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.343+0000 m31102| 2015-07-19T23:39:22.304+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.343+0000 m31102| 2015-07-19T23:39:22.304+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.343+0000 m31201| 2015-07-19T23:39:22.305+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.343+0000 m31201| 2015-07-19T23:39:22.305+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.344+0000 m31100| 2015-07-19T23:39:22.306+0000 I INDEX [conn55] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.344+0000 m31101| 2015-07-19T23:39:22.306+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.344+0000 m31102| 2015-07-19T23:39:22.307+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.344+0000 m31201| 2015-07-19T23:39:22.309+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.344+0000 m31200| 2015-07-19T23:39:22.309+0000 I INDEX [conn109] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.345+0000 m31200| 2015-07-19T23:39:22.309+0000 I INDEX [conn109] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.345+0000 m31101| 2015-07-19T23:39:22.310+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.345+0000 m31101| 2015-07-19T23:39:22.311+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.345+0000 m31100| 2015-07-19T23:39:22.310+0000 I INDEX [conn49] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.346+0000 m31100| 2015-07-19T23:39:22.311+0000 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.346+0000 m31202| 2015-07-19T23:39:22.311+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.346+0000 m31202| 2015-07-19T23:39:22.311+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.347+0000 m31201| 2015-07-19T23:39:22.312+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.347+0000 m31201| 2015-07-19T23:39:22.312+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.347+0000 m31200| 2015-07-19T23:39:22.312+0000 I INDEX [conn109] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.347+0000 m31200| 2015-07-19T23:39:22.313+0000 I COMMAND [conn97] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.347+0000 m31200| 2015-07-19T23:39:22.314+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.348+0000 m31200| 2015-07-19T23:39:22.315+0000 I COMMAND [conn88] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.348+0000 m31100| 2015-07-19T23:39:22.315+0000 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.348+0000 m31200| 2015-07-19T23:39:22.315+0000 I COMMAND [conn89] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.348+0000 m30998| 2015-07-19T23:39:22.316+0000 I SHARDING [conn38] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.349+0000 m30998| 2015-07-19T23:39:22.316+0000 I SHARDING [conn38] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.349+0000 m31200| 2015-07-19T23:39:22.316+0000 I COMMAND [conn18] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.349+0000 m31102| 2015-07-19T23:39:22.319+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.349+0000 m31102| 2015-07-19T23:39:22.319+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.349+0000 m31100| 2015-07-19T23:39:22.316+0000 I COMMAND [conn114] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.349+0000 m31101| 2015-07-19T23:39:22.320+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.350+0000 m31202| 2015-07-19T23:39:22.320+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.350+0000 m31201| 2015-07-19T23:39:22.320+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.350+0000 m31100| 2015-07-19T23:39:22.320+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.350+0000 m31200| 2015-07-19T23:39:22.321+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.350+0000 m31100| 2015-07-19T23:39:22.322+0000 I COMMAND [conn40] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.351+0000 m31200| 2015-07-19T23:39:22.322+0000 I NETWORK [conn109] end connection 10.139.123.131:39681 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.351+0000 m31100| 2015-07-19T23:39:22.322+0000 I COMMAND [conn110] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.351+0000 m30999| 2015-07-19T23:39:22.323+0000 I NETWORK [conn37] end connection 10.139.123.131:57245 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.351+0000 m31100| 2015-07-19T23:39:22.323+0000 I COMMAND [conn112] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.351+0000 m31101| 2015-07-19T23:39:22.324+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.351+0000 m31101| 2015-07-19T23:39:22.324+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.352+0000 m31201| 2015-07-19T23:39:22.324+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.352+0000 m31201| 2015-07-19T23:39:22.324+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.352+0000 m31100| 2015-07-19T23:39:22.325+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.352+0000 m31100| 2015-07-19T23:39:22.325+0000 I COMMAND [conn114] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.352+0000 m31201| 2015-07-19T23:39:22.326+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.352+0000 m31202| 2015-07-19T23:39:22.326+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.353+0000 m31202| 2015-07-19T23:39:22.326+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.353+0000 m31201| 2015-07-19T23:39:22.326+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.353+0000 m31102| 2015-07-19T23:39:22.327+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.353+0000 m31101| 2015-07-19T23:39:22.329+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.353+0000 m31201| 2015-07-19T23:39:22.329+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.353+0000 m31201| 2015-07-19T23:39:22.329+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.354+0000 m31202| 2015-07-19T23:39:22.329+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.354+0000 m31102| 2015-07-19T23:39:22.330+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.354+0000 m31102| 2015-07-19T23:39:22.330+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.354+0000 m31201| 2015-07-19T23:39:22.331+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.354+0000 m31102| 2015-07-19T23:39:22.332+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.354+0000 m31202| 2015-07-19T23:39:22.332+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.355+0000 m31202| 2015-07-19T23:39:22.332+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.355+0000 m31101| 2015-07-19T23:39:22.333+0000 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.355+0000 m31101| 2015-07-19T23:39:22.333+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.355+0000 m31102| 2015-07-19T23:39:22.333+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.355+0000 m31201| 2015-07-19T23:39:22.333+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.355+0000 m31201| 2015-07-19T23:39:22.333+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.356+0000 m31202| 2015-07-19T23:39:22.334+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.356+0000 m31102| 2015-07-19T23:39:22.334+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.356+0000 m31202| 2015-07-19T23:39:22.334+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.356+0000 m31101| 2015-07-19T23:39:22.335+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.356+0000 m31201| 2015-07-19T23:39:22.335+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.356+0000 m31201| 2015-07-19T23:39:22.336+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.357+0000 m31102| 2015-07-19T23:39:22.336+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.357+0000 m31201| 2015-07-19T23:39:22.337+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.357+0000 m31101| 2015-07-19T23:39:22.337+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.357+0000 m31101| 2015-07-19T23:39:22.337+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.357+0000 m31202| 2015-07-19T23:39:22.337+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.357+0000 m31202| 2015-07-19T23:39:22.337+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.357+0000 m31201| 2015-07-19T23:39:22.337+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.358+0000 m31102| 2015-07-19T23:39:22.337+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.358+0000 m31201| 2015-07-19T23:39:22.338+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.358+0000 m31202| 2015-07-19T23:39:22.338+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.358+0000 m31101| 2015-07-19T23:39:22.338+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.358+0000 m31102| 2015-07-19T23:39:22.338+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.358+0000 m31201| 2015-07-19T23:39:22.339+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.359+0000 m31101| 2015-07-19T23:39:22.339+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.359+0000 m31201| 2015-07-19T23:39:22.339+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.359+0000 m31101| 2015-07-19T23:39:22.340+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.359+0000 m31102| 2015-07-19T23:39:22.340+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.359+0000 m31101| 2015-07-19T23:39:22.341+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.359+0000 m31102| 2015-07-19T23:39:22.341+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.360+0000 m31202| 2015-07-19T23:39:22.341+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.360+0000 m31202| 2015-07-19T23:39:22.341+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.360+0000 m31101| 2015-07-19T23:39:22.342+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.360+0000 m31101| 2015-07-19T23:39:22.343+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.360+0000 m31101| 2015-07-19T23:39:22.344+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.360+0000 m31101| 2015-07-19T23:39:22.344+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.361+0000 m31202| 2015-07-19T23:39:22.344+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.361+0000 m31202| 2015-07-19T23:39:22.345+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.361+0000 m31202| 2015-07-19T23:39:22.346+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.361+0000 m31202| 2015-07-19T23:39:22.346+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.361+0000 m31202| 2015-07-19T23:39:22.347+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.361+0000 m31202| 2015-07-19T23:39:22.348+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.361+0000 m31202| 2015-07-19T23:39:22.349+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.423+0000 m31200| 2015-07-19T23:39:22.423+0000 I INDEX [conn102] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.424+0000 m31200| 2015-07-19T23:39:22.423+0000 I INDEX [conn102] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.424+0000 m31100| 2015-07-19T23:39:22.423+0000 I INDEX [conn55] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.424+0000 m31100| 2015-07-19T23:39:22.423+0000 I INDEX [conn55] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.424+0000 m31200| 2015-07-19T23:39:22.424+0000 I INDEX [conn102] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.427+0000 m31100| 2015-07-19T23:39:22.427+0000 I INDEX [conn55] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.431+0000 m31201| 2015-07-19T23:39:22.429+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.437+0000 m31201| 2015-07-19T23:39:22.429+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.437+0000 m31200| 2015-07-19T23:39:22.429+0000 I INDEX [conn110] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.438+0000 m31200| 2015-07-19T23:39:22.429+0000 I INDEX [conn110] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.438+0000 m31202| 2015-07-19T23:39:22.430+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.438+0000 m31202| 2015-07-19T23:39:22.430+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.438+0000 m31100| 2015-07-19T23:39:22.431+0000 I INDEX [conn49] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.438+0000 m31100| 2015-07-19T23:39:22.431+0000 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.438+0000 m31201| 2015-07-19T23:39:22.431+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.438+0000 m31101| 2015-07-19T23:39:22.432+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.438+0000 m31101| 2015-07-19T23:39:22.432+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.439+0000 m31200| 2015-07-19T23:39:22.432+0000 I INDEX [conn110] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.439+0000 m31102| 2015-07-19T23:39:22.433+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.439+0000 m31102| 2015-07-19T23:39:22.433+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.439+0000 m31202| 2015-07-19T23:39:22.433+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.439+0000 m31100| 2015-07-19T23:39:22.434+0000 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.439+0000 m31200| 2015-07-19T23:39:22.434+0000 I INDEX [conn93] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.439+0000 m31200| 2015-07-19T23:39:22.434+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.439+0000 m31200| 2015-07-19T23:39:22.435+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.440+0000 m31100| 2015-07-19T23:39:22.437+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.440+0000 m31100| 2015-07-19T23:39:22.437+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.442+0000 m31101| 2015-07-19T23:39:22.437+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.445+0000 m31102| 2015-07-19T23:39:22.437+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.445+0000 m31100| 2015-07-19T23:39:22.440+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.445+0000 m31102| 2015-07-19T23:39:22.440+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.446+0000 m31102| 2015-07-19T23:39:22.440+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.446+0000 m31200| 2015-07-19T23:39:22.441+0000 I INDEX [conn99] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.446+0000 m31200| 2015-07-19T23:39:22.441+0000 I INDEX [conn99] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.446+0000 m31201| 2015-07-19T23:39:22.441+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.446+0000 m31201| 2015-07-19T23:39:22.441+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.446+0000 m31202| 2015-07-19T23:39:22.440+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.446+0000 m31202| 2015-07-19T23:39:22.440+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.446+0000 m31101| 2015-07-19T23:39:22.442+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.446+0000 m31101| 2015-07-19T23:39:22.442+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.446+0000 m31102| 2015-07-19T23:39:22.442+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.446+0000 m31200| 2015-07-19T23:39:22.442+0000 I INDEX [conn99] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.447+0000 m31100| 2015-07-19T23:39:22.444+0000 I INDEX [conn54] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.447+0000 m31100| 2015-07-19T23:39:22.444+0000 I INDEX [conn54] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.448+0000 m31201| 2015-07-19T23:39:22.444+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.448+0000 m31202| 2015-07-19T23:39:22.444+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.450+0000 m31101| 2015-07-19T23:39:22.447+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.450+0000 m31200| 2015-07-19T23:39:22.447+0000 I INDEX [conn103] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.450+0000 m31200| 2015-07-19T23:39:22.448+0000 I INDEX [conn103] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.450+0000 m31102| 2015-07-19T23:39:22.449+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.450+0000 m31102| 2015-07-19T23:39:22.449+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.451+0000 m31202| 2015-07-19T23:39:22.449+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.451+0000 m31202| 2015-07-19T23:39:22.449+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.453+0000 m31200| 2015-07-19T23:39:22.449+0000 I INDEX [conn103] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.454+0000 m31201| 2015-07-19T23:39:22.450+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.463+0000 m31201| 2015-07-19T23:39:22.450+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.463+0000 m31100| 2015-07-19T23:39:22.450+0000 I INDEX [conn54] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.478+0000 m31102| 2015-07-19T23:39:22.451+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.480+0000 m31200| 2015-07-19T23:39:22.452+0000 I INDEX [conn94] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.480+0000 m31200| 2015-07-19T23:39:22.452+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.486+0000 m31202| 2015-07-19T23:39:22.452+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.486+0000 m31101| 2015-07-19T23:39:22.453+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.486+0000 m31101| 2015-07-19T23:39:22.453+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.486+0000 m31100| 2015-07-19T23:39:22.453+0000 I INDEX [conn37] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.487+0000 m31100| 2015-07-19T23:39:22.453+0000 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.487+0000 m31102| 2015-07-19T23:39:22.454+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.487+0000 m31102| 2015-07-19T23:39:22.454+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.487+0000 m31200| 2015-07-19T23:39:22.454+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.487+0000 m31101| 2015-07-19T23:39:22.454+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.488+0000 m31202| 2015-07-19T23:39:22.455+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.488+0000 m31202| 2015-07-19T23:39:22.455+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.488+0000 m31102| 2015-07-19T23:39:22.455+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.488+0000 m31201| 2015-07-19T23:39:22.456+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.488+0000 m31200| 2015-07-19T23:39:22.456+0000 I INDEX [conn22] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.488+0000 m31200| 2015-07-19T23:39:22.456+0000 I INDEX [conn22] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.489+0000 m31101| 2015-07-19T23:39:22.458+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.489+0000 m31101| 2015-07-19T23:39:22.458+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.489+0000 m31202| 2015-07-19T23:39:22.458+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.489+0000 m31100| 2015-07-19T23:39:22.458+0000 I INDEX [conn37] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.490+0000 m31200| 2015-07-19T23:39:22.458+0000 I INDEX [conn22] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.490+0000 m30999| 2015-07-19T23:39:22.459+0000 I SHARDING [conn39] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.490+0000 m30999| 2015-07-19T23:39:22.459+0000 I SHARDING [conn39] retrying command: { listIndexes: "coll6", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.490+0000 m31200| 2015-07-19T23:39:22.460+0000 I COMMAND [conn18] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.490+0000 m31201| 2015-07-19T23:39:22.460+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.490+0000 m31201| 2015-07-19T23:39:22.460+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.490+0000 m31200| 2015-07-19T23:39:22.460+0000 I COMMAND [conn87] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.490+0000 m31200| 2015-07-19T23:39:22.462+0000 I COMMAND [conn89] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.491+0000 m31100| 2015-07-19T23:39:22.462+0000 I INDEX [conn33] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.491+0000 m31100| 2015-07-19T23:39:22.462+0000 I INDEX [conn33] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.491+0000 m31200| 2015-07-19T23:39:22.462+0000 I NETWORK [conn110] end connection 10.139.123.131:39682 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.491+0000 m31102| 2015-07-19T23:39:22.463+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.491+0000 m31102| 2015-07-19T23:39:22.463+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.491+0000 m31201| 2015-07-19T23:39:22.463+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.491+0000 m31202| 2015-07-19T23:39:22.464+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.491+0000 m31202| 2015-07-19T23:39:22.464+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.491+0000 m30998| 2015-07-19T23:39:22.465+0000 I NETWORK [conn35] end connection 10.139.123.131:35883 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.491+0000 m31100| 2015-07-19T23:39:22.465+0000 I INDEX [conn33] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.491+0000 m31101| 2015-07-19T23:39:22.465+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.492+0000 m31200| 2015-07-19T23:39:22.465+0000 I COMMAND [conn98] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.492+0000 m31102| 2015-07-19T23:39:22.466+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.492+0000 m31202| 2015-07-19T23:39:22.466+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.492+0000 m31200| 2015-07-19T23:39:22.467+0000 I COMMAND [conn97] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.492+0000 m31201| 2015-07-19T23:39:22.468+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.492+0000 m31201| 2015-07-19T23:39:22.468+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.492+0000 m31101| 2015-07-19T23:39:22.468+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.492+0000 m31101| 2015-07-19T23:39:22.468+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.492+0000 m31201| 2015-07-19T23:39:22.469+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.493+0000 m31100| 2015-07-19T23:39:22.470+0000 I INDEX [conn58] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.493+0000 m31100| 2015-07-19T23:39:22.470+0000 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.493+0000 m31202| 2015-07-19T23:39:22.471+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.493+0000 m31202| 2015-07-19T23:39:22.471+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.493+0000 m31102| 2015-07-19T23:39:22.471+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.493+0000 m31102| 2015-07-19T23:39:22.471+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.494+0000 m31201| 2015-07-19T23:39:22.472+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.494+0000 m31201| 2015-07-19T23:39:22.472+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.494+0000 m31101| 2015-07-19T23:39:22.472+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.494+0000 m31100| 2015-07-19T23:39:22.472+0000 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.494+0000 m31100| 2015-07-19T23:39:22.473+0000 I COMMAND [conn15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.494+0000 m31202| 2015-07-19T23:39:22.473+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.495+0000 m31200| 2015-07-19T23:39:22.473+0000 I COMMAND [conn88] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.495+0000 m31102| 2015-07-19T23:39:22.474+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.495+0000 m31201| 2015-07-19T23:39:22.475+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.495+0000 m31101| 2015-07-19T23:39:22.475+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.495+0000 m31101| 2015-07-19T23:39:22.475+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.495+0000 m31100| 2015-07-19T23:39:22.474+0000 I COMMAND [conn114] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.496+0000 m31100| 2015-07-19T23:39:22.476+0000 I COMMAND [conn112] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.496+0000 m31100| 2015-07-19T23:39:22.477+0000 I COMMAND [conn40] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.496+0000 m31202| 2015-07-19T23:39:22.477+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.496+0000 m31202| 2015-07-19T23:39:22.477+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.496+0000 m31101| 2015-07-19T23:39:22.477+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.496+0000 m31201| 2015-07-19T23:39:22.478+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.497+0000 m31201| 2015-07-19T23:39:22.478+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.497+0000 m31102| 2015-07-19T23:39:22.478+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.497+0000 m31102| 2015-07-19T23:39:22.478+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.497+0000 m31101| 2015-07-19T23:39:22.479+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.497+0000 m31101| 2015-07-19T23:39:22.479+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.497+0000 m31100| 2015-07-19T23:39:22.479+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.498+0000 m31102| 2015-07-19T23:39:22.480+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.498+0000 m31100| 2015-07-19T23:39:22.480+0000 I COMMAND [conn110] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.498+0000 m31101| 2015-07-19T23:39:22.480+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.498+0000 m31202| 2015-07-19T23:39:22.480+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.498+0000 m31201| 2015-07-19T23:39:22.481+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.498+0000 m31102| 2015-07-19T23:39:22.481+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.498+0000 m31101| 2015-07-19T23:39:22.481+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.499+0000 m31202| 2015-07-19T23:39:22.481+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.499+0000 m31101| 2015-07-19T23:39:22.482+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.499+0000 m31102| 2015-07-19T23:39:22.482+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.499+0000 m31201| 2015-07-19T23:39:22.482+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.499+0000 m31101| 2015-07-19T23:39:22.483+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.499+0000 m31102| 2015-07-19T23:39:22.483+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.500+0000 m31202| 2015-07-19T23:39:22.483+0000 I COMMAND [repl writer worker 13] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.500+0000 m31101| 2015-07-19T23:39:22.483+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.500+0000 m31102| 2015-07-19T23:39:22.484+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.500+0000 m31201| 2015-07-19T23:39:22.483+0000 I COMMAND [repl writer worker 8] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.500+0000 m31101| 2015-07-19T23:39:22.484+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.500+0000 m31102| 2015-07-19T23:39:22.484+0000 I COMMAND [repl writer worker 1] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.501+0000 m31101| 2015-07-19T23:39:22.485+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.501+0000 m31202| 2015-07-19T23:39:22.485+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.501+0000 m31201| 2015-07-19T23:39:22.485+0000 I COMMAND [repl writer worker 6] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.501+0000 m31102| 2015-07-19T23:39:22.486+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.501+0000 m31201| 2015-07-19T23:39:22.486+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.501+0000 m31202| 2015-07-19T23:39:22.486+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.501+0000 m31201| 2015-07-19T23:39:22.487+0000 I COMMAND [repl writer worker 3] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.502+0000 m31202| 2015-07-19T23:39:22.488+0000 I COMMAND [repl writer worker 5] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.502+0000 m31201| 2015-07-19T23:39:22.489+0000 I COMMAND [repl writer worker 2] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.502+0000 m31202| 2015-07-19T23:39:22.489+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.577+0000 m31200| 2015-07-19T23:39:22.577+0000 I INDEX [conn94] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.577+0000 m31100| 2015-07-19T23:39:22.577+0000 I INDEX [conn33] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.577+0000 m31100| 2015-07-19T23:39:22.577+0000 I INDEX [conn33] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.578+0000 m31200| 2015-07-19T23:39:22.577+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.579+0000 m31200| 2015-07-19T23:39:22.578+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.579+0000 m31100| 2015-07-19T23:39:22.578+0000 I INDEX [conn33] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.580+0000 m31200| 2015-07-19T23:39:22.580+0000 I INDEX [conn22] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.580+0000 m31200| 2015-07-19T23:39:22.580+0000 I INDEX [conn22] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.582+0000 m31100| 2015-07-19T23:39:22.581+0000 I INDEX [conn58] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.582+0000 m31100| 2015-07-19T23:39:22.581+0000 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.582+0000 m31102| 2015-07-19T23:39:22.581+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.582+0000 m31102| 2015-07-19T23:39:22.581+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.582+0000 m31200| 2015-07-19T23:39:22.582+0000 I INDEX [conn22] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.582+0000 m31201| 2015-07-19T23:39:22.582+0000 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.583+0000 m31201| 2015-07-19T23:39:22.582+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.583+0000 m31102| 2015-07-19T23:39:22.583+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.583+0000 m31101| 2015-07-19T23:39:22.583+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.583+0000 m31101| 2015-07-19T23:39:22.583+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.584+0000 m31201| 2015-07-19T23:39:22.584+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.584+0000 m31202| 2015-07-19T23:39:22.584+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.584+0000 m31202| 2015-07-19T23:39:22.584+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.585+0000 m31200| 2015-07-19T23:39:22.585+0000 I INDEX [conn102] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.585+0000 m31200| 2015-07-19T23:39:22.585+0000 I INDEX [conn102] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.586+0000 m31200| 2015-07-19T23:39:22.586+0000 I INDEX [conn102] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.587+0000 m31101| 2015-07-19T23:39:22.586+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.587+0000 m31200| 2015-07-19T23:39:22.586+0000 I COMMAND [conn97] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.590+0000 m31100| 2015-07-19T23:39:22.587+0000 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.590+0000 m31201| 2015-07-19T23:39:22.587+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.591+0000 m31201| 2015-07-19T23:39:22.587+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.591+0000 m31202| 2015-07-19T23:39:22.588+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.591+0000 m31200| 2015-07-19T23:39:22.589+0000 I INDEX [conn94] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.591+0000 m31200| 2015-07-19T23:39:22.589+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.591+0000 m31100| 2015-07-19T23:39:22.589+0000 I INDEX [conn37] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.591+0000 m31100| 2015-07-19T23:39:22.589+0000 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.592+0000 m31200| 2015-07-19T23:39:22.590+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.592+0000 m30998| 2015-07-19T23:39:22.592+0000 I NETWORK [conn34] end connection 10.139.123.131:35882 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.593+0000 m31102| 2015-07-19T23:39:22.593+0000 I INDEX [repl writer worker 10] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.593+0000 m31102| 2015-07-19T23:39:22.593+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.594+0000 m31202| 2015-07-19T23:39:22.593+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.594+0000 m31202| 2015-07-19T23:39:22.593+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.594+0000 m31200| 2015-07-19T23:39:22.594+0000 I INDEX [conn93] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.595+0000 m31200| 2015-07-19T23:39:22.594+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.596+0000 m31201| 2015-07-19T23:39:22.594+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.597+0000 m31100| 2015-07-19T23:39:22.594+0000 I INDEX [conn37] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.597+0000 m31101| 2015-07-19T23:39:22.595+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.598+0000 m31101| 2015-07-19T23:39:22.595+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.598+0000 m31100| 2015-07-19T23:39:22.595+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.600+0000 m30998| 2015-07-19T23:39:22.599+0000 I NETWORK [conn36] end connection 10.139.123.131:35884 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.600+0000 m31102| 2015-07-19T23:39:22.600+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.601+0000 m31200| 2015-07-19T23:39:22.600+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.603+0000 m31201| 2015-07-19T23:39:22.601+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.603+0000 m31201| 2015-07-19T23:39:22.601+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.603+0000 m31100| 2015-07-19T23:39:22.601+0000 I INDEX [conn33] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.603+0000 m31100| 2015-07-19T23:39:22.602+0000 I INDEX [conn33] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.607+0000 m31102| 2015-07-19T23:39:22.602+0000 I INDEX [repl writer worker 15] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.608+0000 m31102| 2015-07-19T23:39:22.602+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.608+0000 m31200| 2015-07-19T23:39:22.602+0000 I INDEX [conn103] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.608+0000 m31200| 2015-07-19T23:39:22.602+0000 I INDEX [conn103] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.612+0000 m31202| 2015-07-19T23:39:22.603+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.613+0000 m31101| 2015-07-19T23:39:22.603+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.613+0000 m31201| 2015-07-19T23:39:22.603+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.615+0000 m31201| 2015-07-19T23:39:22.604+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.615+0000 m31102| 2015-07-19T23:39:22.604+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.615+0000 m31100| 2015-07-19T23:39:22.604+0000 I INDEX [conn33] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.616+0000 m31200| 2015-07-19T23:39:22.604+0000 I INDEX [conn103] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.616+0000 m31102| 2015-07-19T23:39:22.605+0000 I COMMAND [repl writer worker 11] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.616+0000 m31200| 2015-07-19T23:39:22.605+0000 I COMMAND [conn97] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.618+0000 m31100| 2015-07-19T23:39:22.606+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.618+0000 m31100| 2015-07-19T23:39:22.606+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.620+0000 m31202| 2015-07-19T23:39:22.607+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.620+0000 m31202| 2015-07-19T23:39:22.607+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.620+0000 m31201| 2015-07-19T23:39:22.608+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.620+0000 m31201| 2015-07-19T23:39:22.608+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.620+0000 m31101| 2015-07-19T23:39:22.608+0000 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.623+0000 m31101| 2015-07-19T23:39:22.608+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.624+0000 m31100| 2015-07-19T23:39:22.609+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.624+0000 m31102| 2015-07-19T23:39:22.609+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.624+0000 m31102| 2015-07-19T23:39:22.609+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.624+0000 m31201| 2015-07-19T23:39:22.610+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.624+0000 m31202| 2015-07-19T23:39:22.610+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.625+0000 m31101| 2015-07-19T23:39:22.610+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.625+0000 m31102| 2015-07-19T23:39:22.610+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.625+0000 m31202| 2015-07-19T23:39:22.611+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.625+0000 m31101| 2015-07-19T23:39:22.612+0000 I COMMAND [repl writer worker 7] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.625+0000 m31102| 2015-07-19T23:39:22.613+0000 I INDEX [repl writer worker 7] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.625+0000 m31102| 2015-07-19T23:39:22.613+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.626+0000 m31201| 2015-07-19T23:39:22.613+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.626+0000 m31201| 2015-07-19T23:39:22.613+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.626+0000 m31102| 2015-07-19T23:39:22.614+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.626+0000 m30999| 2015-07-19T23:39:22.615+0000 I NETWORK [conn36] end connection 10.139.123.131:57244 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.626+0000 m31202| 2015-07-19T23:39:22.616+0000 I INDEX [repl writer worker 2] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.626+0000 m31202| 2015-07-19T23:39:22.616+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.627+0000 m31100| 2015-07-19T23:39:22.617+0000 I INDEX [conn54] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.627+0000 m31100| 2015-07-19T23:39:22.617+0000 I INDEX [conn54] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.627+0000 m31101| 2015-07-19T23:39:22.617+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.627+0000 m31101| 2015-07-19T23:39:22.617+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.627+0000 m31201| 2015-07-19T23:39:22.618+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.627+0000 m31100| 2015-07-19T23:39:22.618+0000 I INDEX [conn54] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.627+0000 m31100| 2015-07-19T23:39:22.618+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.631+0000 m31202| 2015-07-19T23:39:22.619+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.631+0000 m31101| 2015-07-19T23:39:22.620+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.631+0000 m31201| 2015-07-19T23:39:22.621+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.631+0000 m31201| 2015-07-19T23:39:22.621+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.631+0000 m31102| 2015-07-19T23:39:22.623+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.631+0000 m31102| 2015-07-19T23:39:22.623+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.632+0000 m30998| 2015-07-19T23:39:22.623+0000 I NETWORK [conn38] end connection 10.139.123.131:35890 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.632+0000 m31202| 2015-07-19T23:39:22.624+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.632+0000 m31202| 2015-07-19T23:39:22.624+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.634+0000 m31101| 2015-07-19T23:39:22.625+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.634+0000 m31101| 2015-07-19T23:39:22.625+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.634+0000 m31102| 2015-07-19T23:39:22.627+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.634+0000 m31202| 2015-07-19T23:39:22.627+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.634+0000 m31102| 2015-07-19T23:39:22.628+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.634+0000 m31101| 2015-07-19T23:39:22.628+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.635+0000 m31201| 2015-07-19T23:39:22.628+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.635+0000 m31201| 2015-07-19T23:39:22.629+0000 I COMMAND [repl writer worker 10] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.635+0000 m31202| 2015-07-19T23:39:22.631+0000 I INDEX [repl writer worker 6] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.635+0000 m31202| 2015-07-19T23:39:22.631+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.635+0000 m31101| 2015-07-19T23:39:22.630+0000 I INDEX [repl writer worker 5] build index on: db6.coll6 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.635+0000 m31101| 2015-07-19T23:39:22.630+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.636+0000 m31202| 2015-07-19T23:39:22.633+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.636+0000 m31101| 2015-07-19T23:39:22.633+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.636+0000 m31202| 2015-07-19T23:39:22.634+0000 I COMMAND [repl writer worker 0] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.636+0000 m31101| 2015-07-19T23:39:22.634+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.699+0000 m31100| 2015-07-19T23:39:22.699+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.699+0000 m31100| 2015-07-19T23:39:22.699+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.699+0000 m31200| 2015-07-19T23:39:22.699+0000 I INDEX [conn93] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.699+0000 m31200| 2015-07-19T23:39:22.699+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.700+0000 m31200| 2015-07-19T23:39:22.700+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.701+0000 m31100| 2015-07-19T23:39:22.701+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.703+0000 m31201| 2015-07-19T23:39:22.703+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.703+0000 m31201| 2015-07-19T23:39:22.703+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.704+0000 m31202| 2015-07-19T23:39:22.703+0000 I INDEX [repl writer worker 1] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.704+0000 m31202| 2015-07-19T23:39:22.703+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.705+0000 m30999| 2015-07-19T23:39:22.705+0000 I NETWORK [conn35] end connection 10.139.123.131:57237 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.706+0000 m31201| 2015-07-19T23:39:22.706+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.707+0000 m31102| 2015-07-19T23:39:22.706+0000 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.708+0000 m31102| 2015-07-19T23:39:22.707+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.708+0000 m31202| 2015-07-19T23:39:22.707+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.708+0000 m31101| 2015-07-19T23:39:22.707+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.708+0000 m31101| 2015-07-19T23:39:22.707+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.708+0000 m31102| 2015-07-19T23:39:22.708+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.710+0000 m31101| 2015-07-19T23:39:22.709+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.722+0000 m31200| 2015-07-19T23:39:22.722+0000 I INDEX [conn93] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.722+0000 m31200| 2015-07-19T23:39:22.722+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.723+0000 m31100| 2015-07-19T23:39:22.723+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.723+0000 m31100| 2015-07-19T23:39:22.723+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.723+0000 m31200| 2015-07-19T23:39:22.723+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.724+0000 m31100| 2015-07-19T23:39:22.724+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.725+0000 m31100| 2015-07-19T23:39:22.724+0000 I COMMAND [conn43] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.725+0000 m31200| 2015-07-19T23:39:22.724+0000 I COMMAND [conn97] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.726+0000 m31201| 2015-07-19T23:39:22.726+0000 I INDEX [repl writer worker 4] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.727+0000 m31201| 2015-07-19T23:39:22.726+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.728+0000 m31202| 2015-07-19T23:39:22.727+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.728+0000 m31202| 2015-07-19T23:39:22.727+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.728+0000 m31101| 2015-07-19T23:39:22.728+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.728+0000 m31101| 2015-07-19T23:39:22.728+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.729+0000 m31201| 2015-07-19T23:39:22.729+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.729+0000 m31202| 2015-07-19T23:39:22.729+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.731+0000 m31201| 2015-07-19T23:39:22.730+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.731+0000 m31101| 2015-07-19T23:39:22.730+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.732+0000 m31202| 2015-07-19T23:39:22.730+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.732+0000 m31102| 2015-07-19T23:39:22.730+0000 I INDEX [repl writer worker 14] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.732+0000 m31102| 2015-07-19T23:39:22.730+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.732+0000 m31101| 2015-07-19T23:39:22.731+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.733+0000 m31102| 2015-07-19T23:39:22.733+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.733+0000 m31102| 2015-07-19T23:39:22.733+0000 I COMMAND [repl writer worker 12] CMD: dropIndexes db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.828+0000 m31200| 2015-07-19T23:39:22.828+0000 I INDEX [conn93] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.828+0000 m31100| 2015-07-19T23:39:22.828+0000 I INDEX [conn52] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.828+0000 m31100| 2015-07-19T23:39:22.828+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.829+0000 m31200| 2015-07-19T23:39:22.828+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.829+0000 m31200| 2015-07-19T23:39:22.829+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.830+0000 m31100| 2015-07-19T23:39:22.829+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.830+0000 m31200| 2015-07-19T23:39:22.830+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 101ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.835+0000 m31201| 2015-07-19T23:39:22.834+0000 I INDEX [repl writer worker 8] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.835+0000 m31201| 2015-07-19T23:39:22.834+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.836+0000 m30999| 2015-07-19T23:39:22.834+0000 I NETWORK [conn39] end connection 10.139.123.131:57247 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.836+0000 m31102| 2015-07-19T23:39:22.834+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.837+0000 m31102| 2015-07-19T23:39:22.834+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.837+0000 m31101| 2015-07-19T23:39:22.835+0000 I INDEX [repl writer worker 0] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.837+0000 m31101| 2015-07-19T23:39:22.835+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.837+0000 m31201| 2015-07-19T23:39:22.836+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.837+0000 m31202| 2015-07-19T23:39:22.837+0000 I INDEX [repl writer worker 13] build index on: db6.coll6 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.838+0000 m31202| 2015-07-19T23:39:22.837+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.840+0000 m31101| 2015-07-19T23:39:22.840+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.840+0000 m31102| 2015-07-19T23:39:22.840+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.844+0000 m31202| 2015-07-19T23:39:22.841+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.851+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.851+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.851+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.852+0000 jstests/concurrency/fsm_workloads/list_indexes.js: Workload completed in 2065 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.852+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.852+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.852+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.852+0000 m30999| 2015-07-19T23:39:22.851+0000 I COMMAND [conn1] DROP: db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.852+0000 m30999| 2015-07-19T23:39:22.851+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:22.851+0000-55ac352ad2c1f750d1548391", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349162851), what: "dropCollection.start", ns: "db6.coll6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.904+0000 m30999| 2015-07-19T23:39:22.903+0000 I SHARDING [conn1] distributed lock 'db6.coll6/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac352ad2c1f750d1548392 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.904+0000 m31100| 2015-07-19T23:39:22.904+0000 I COMMAND [conn12] CMD: drop db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.907+0000 m31200| 2015-07-19T23:39:22.907+0000 I COMMAND [conn14] CMD: drop db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.911+0000 m31202| 2015-07-19T23:39:22.910+0000 I COMMAND [repl writer worker 10] CMD: drop db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.911+0000 m31201| 2015-07-19T23:39:22.911+0000 I COMMAND [repl writer worker 6] CMD: drop db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.961+0000 m31100| 2015-07-19T23:39:22.961+0000 I SHARDING [conn12] remotely refreshing metadata for db6.coll6 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||55ac3528d2c1f750d154838f, current metadata version is 2|3||55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.962+0000 m31100| 2015-07-19T23:39:22.961+0000 W SHARDING [conn12] no chunks found when reloading db6.coll6, previous version was 0|0||55ac3528d2c1f750d154838f, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.962+0000 m31100| 2015-07-19T23:39:22.961+0000 I SHARDING [conn12] dropping metadata for db6.coll6 at shard version 2|3||55ac3528d2c1f750d154838f, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.962+0000 m31200| 2015-07-19T23:39:22.962+0000 I SHARDING [conn14] remotely refreshing metadata for db6.coll6 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||55ac3528d2c1f750d154838f, current metadata version is 2|5||55ac3528d2c1f750d154838f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.963+0000 m31200| 2015-07-19T23:39:22.962+0000 W SHARDING [conn14] no chunks found when reloading db6.coll6, previous version was 0|0||55ac3528d2c1f750d154838f, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.963+0000 m31200| 2015-07-19T23:39:22.962+0000 I SHARDING [conn14] dropping metadata for db6.coll6 at shard version 2|5||55ac3528d2c1f750d154838f, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:22.963+0000 m30999| 2015-07-19T23:39:22.963+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:22.963+0000-55ac352ad2c1f750d1548393", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349162963), what: "dropCollection", ns: "db6.coll6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.014+0000 m30999| 2015-07-19T23:39:23.014+0000 I SHARDING [conn1] distributed lock 'db6.coll6/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.066+0000 m30999| 2015-07-19T23:39:23.066+0000 I COMMAND [conn1] DROP DATABASE: db6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.066+0000 m30999| 2015-07-19T23:39:23.066+0000 I SHARDING [conn1] DBConfig::dropDatabase: db6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.066+0000 m30999| 2015-07-19T23:39:23.066+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:23.066+0000-55ac352bd2c1f750d1548394", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349163066), what: "dropDatabase.start", ns: "db6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.167+0000 m30999| 2015-07-19T23:39:23.167+0000 I SHARDING [conn1] DBConfig::dropDatabase: db6 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.168+0000 m31200| 2015-07-19T23:39:23.168+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39683 #111 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.168+0000 m31200| 2015-07-19T23:39:23.168+0000 I COMMAND [conn111] dropDatabase db6 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.169+0000 m31200| 2015-07-19T23:39:23.168+0000 I COMMAND [conn111] dropDatabase db6 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.169+0000 m30999| 2015-07-19T23:39:23.169+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:23.169+0000-55ac352bd2c1f750d1548395", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349163169), what: "dropDatabase", ns: "db6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.169+0000 m31200| 2015-07-19T23:39:23.169+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 255ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.170+0000 m31200| 2015-07-19T23:39:23.169+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 30 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 256ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.170+0000 m31202| 2015-07-19T23:39:23.169+0000 I COMMAND [repl writer worker 7] dropDatabase db6 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.170+0000 m31202| 2015-07-19T23:39:23.169+0000 I COMMAND [repl writer worker 7] dropDatabase db6 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.170+0000 m31201| 2015-07-19T23:39:23.169+0000 I COMMAND [repl writer worker 15] dropDatabase db6 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.170+0000 m31201| 2015-07-19T23:39:23.169+0000 I COMMAND [repl writer worker 15] dropDatabase db6 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.224+0000 m31100| 2015-07-19T23:39:23.224+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:2 reslen:202 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 317ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.227+0000 m31100| 2015-07-19T23:39:23.224+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:2 reslen:202 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 317ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.228+0000 m31102| 2015-07-19T23:39:23.224+0000 I COMMAND [repl writer worker 5] CMD: drop db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.228+0000 m31101| 2015-07-19T23:39:23.224+0000 I COMMAND [repl writer worker 10] CMD: drop db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.232+0000 m31100| 2015-07-19T23:39:23.232+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.235+0000 m31101| 2015-07-19T23:39:23.235+0000 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.235+0000 m31102| 2015-07-19T23:39:23.235+0000 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.246+0000 m31200| 2015-07-19T23:39:23.245+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.246+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.247+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.247+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.247+0000 jstests/concurrency/fsm_workloads/update_check_index.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.247+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.247+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.247+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.249+0000 m31202| 2015-07-19T23:39:23.248+0000 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.249+0000 m31201| 2015-07-19T23:39:23.248+0000 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.249+0000 m30999| 2015-07-19T23:39:23.249+0000 I SHARDING [conn1] distributed lock 'db7/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac352bd2c1f750d1548396 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.251+0000 m30999| 2015-07-19T23:39:23.251+0000 I SHARDING [conn1] Placing [db7] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.251+0000 m30999| 2015-07-19T23:39:23.251+0000 I SHARDING [conn1] Enabling sharding for database [db7] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.302+0000 m30999| 2015-07-19T23:39:23.302+0000 I SHARDING [conn1] distributed lock 'db7/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.311+0000 m31200| 2015-07-19T23:39:23.310+0000 I INDEX [conn28] build index on: db7.coll7 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.311+0000 m31200| 2015-07-19T23:39:23.310+0000 I INDEX [conn28] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.312+0000 m31200| 2015-07-19T23:39:23.312+0000 I INDEX [conn28] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.313+0000 m30999| 2015-07-19T23:39:23.312+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db7.coll7", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.314+0000 m30999| 2015-07-19T23:39:23.313+0000 I SHARDING [conn1] distributed lock 'db7.coll7/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac352bd2c1f750d1548397 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.314+0000 m30999| 2015-07-19T23:39:23.314+0000 I SHARDING [conn1] enable sharding on: db7.coll7 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.315+0000 m30999| 2015-07-19T23:39:23.314+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:23.314+0000-55ac352bd2c1f750d1548398", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349163314), what: "shardCollection.start", ns: "db7.coll7", details: { shardKey: { _id: "hashed" }, collection: "db7.coll7", primary: "test-rs1:test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.319+0000 m31201| 2015-07-19T23:39:23.318+0000 I INDEX [repl writer worker 5] build index on: db7.coll7 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.319+0000 m31201| 2015-07-19T23:39:23.318+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.320+0000 m31202| 2015-07-19T23:39:23.319+0000 I INDEX [repl writer worker 4] build index on: db7.coll7 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.320+0000 m31202| 2015-07-19T23:39:23.319+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.320+0000 m31201| 2015-07-19T23:39:23.319+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.323+0000 m31202| 2015-07-19T23:39:23.323+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.365+0000 m30999| 2015-07-19T23:39:23.365+0000 I SHARDING [conn1] going to create 2 chunk(s) for: db7.coll7 using new epoch 55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.467+0000 m30999| 2015-07-19T23:39:23.467+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db7.coll7: 0ms sequenceNumber: 35 version: 1|1||55ac352bd2c1f750d1548399 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.519+0000 m30999| 2015-07-19T23:39:23.518+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db7.coll7: 0ms sequenceNumber: 36 version: 1|1||55ac352bd2c1f750d1548399 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.519+0000 m31200| 2015-07-19T23:39:23.519+0000 I SHARDING [conn93] remotely refreshing metadata for db7.coll7 with requested shard version 1|1||55ac352bd2c1f750d1548399, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.520+0000 m31200| 2015-07-19T23:39:23.519+0000 I SHARDING [conn93] collection db7.coll7 was previously unsharded, new metadata loaded with shard version 1|1||55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.520+0000 m31200| 2015-07-19T23:39:23.519+0000 I SHARDING [conn93] collection version was loaded at version 1|1||55ac352bd2c1f750d1548399, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.520+0000 m30999| 2015-07-19T23:39:23.520+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:23.520+0000-55ac352bd2c1f750d154839a", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349163520), what: "shardCollection", ns: "db7.coll7", details: { version: "1|1||55ac352bd2c1f750d1548399" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.571+0000 m30999| 2015-07-19T23:39:23.571+0000 I SHARDING [conn1] distributed lock 'db7.coll7/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.572+0000 m30999| 2015-07-19T23:39:23.571+0000 I SHARDING [conn1] moving chunk ns: db7.coll7 moving ( ns: db7.coll7, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.572+0000 m31200| 2015-07-19T23:39:23.571+0000 I SHARDING [conn97] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.572+0000 m31200| 2015-07-19T23:39:23.572+0000 I SHARDING [conn97] received moveChunk request: { moveChunk: "db7.coll7", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", to: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac352bd2c1f750d1548399') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.573+0000 m31200| 2015-07-19T23:39:23.573+0000 I SHARDING [conn97] distributed lock 'db7.coll7/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac352bd9a63f6196b17264 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.573+0000 m31200| 2015-07-19T23:39:23.573+0000 I SHARDING [conn97] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:23.573+0000-55ac352bd9a63f6196b17265", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39668", time: new Date(1437349163573), what: "moveChunk.start", ns: "db7.coll7", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.624+0000 m31200| 2015-07-19T23:39:23.624+0000 I SHARDING [conn97] remotely refreshing metadata for db7.coll7 based on current shard version 1|1||55ac352bd2c1f750d1548399, current metadata version is 1|1||55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.624+0000 m31200| 2015-07-19T23:39:23.624+0000 I SHARDING [conn97] metadata of collection db7.coll7 already up to date (shard version : 1|1||55ac352bd2c1f750d1548399, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.625+0000 m31200| 2015-07-19T23:39:23.624+0000 I SHARDING [conn97] moveChunk request accepted at version 1|1||55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.625+0000 m31200| 2015-07-19T23:39:23.625+0000 I SHARDING [conn97] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.625+0000 m31100| 2015-07-19T23:39:23.625+0000 I SHARDING [conn19] remotely refreshing metadata for db7.coll7, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.625+0000 m31100| 2015-07-19T23:39:23.625+0000 I SHARDING [conn19] collection db7.coll7 was previously unsharded, new metadata loaded with shard version 0|0||55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.626+0000 m31100| 2015-07-19T23:39:23.625+0000 I SHARDING [conn19] collection version was loaded at version 1|1||55ac352bd2c1f750d1548399, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.626+0000 m31100| 2015-07-19T23:39:23.625+0000 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db7.coll7 from test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 at epoch 55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.627+0000 m31200| 2015-07-19T23:39:23.627+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db7.coll7", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.629+0000 m31100| 2015-07-19T23:39:23.628+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 391ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.629+0000 m31100| 2015-07-19T23:39:23.628+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 390ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.629+0000 m31200| 2015-07-19T23:39:23.629+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db7.coll7", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.630+0000 m31100| 2015-07-19T23:39:23.630+0000 I INDEX [migrateThread] build index on: db7.coll7 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.630+0000 m31100| 2015-07-19T23:39:23.630+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.633+0000 m31100| 2015-07-19T23:39:23.633+0000 I INDEX [migrateThread] build index on: db7.coll7 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.633+0000 m31100| 2015-07-19T23:39:23.633+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.634+0000 m31200| 2015-07-19T23:39:23.633+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db7.coll7", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.635+0000 m31100| 2015-07-19T23:39:23.635+0000 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.636+0000 m31100| 2015-07-19T23:39:23.636+0000 I SHARDING [migrateThread] Deleter starting delete for: db7.coll7 from { _id: MinKey } -> { _id: 0 }, with opId: 28371 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.636+0000 m31100| 2015-07-19T23:39:23.636+0000 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db7.coll7 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.640+0000 m31101| 2015-07-19T23:39:23.640+0000 I INDEX [repl writer worker 11] build index on: db7.coll7 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.640+0000 m31101| 2015-07-19T23:39:23.640+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.640+0000 m31102| 2015-07-19T23:39:23.640+0000 I INDEX [repl writer worker 15] build index on: db7.coll7 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.640+0000 m31102| 2015-07-19T23:39:23.640+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.642+0000 m31200| 2015-07-19T23:39:23.642+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db7.coll7", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.644+0000 m31101| 2015-07-19T23:39:23.643+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.644+0000 m31102| 2015-07-19T23:39:23.644+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.644+0000 m31100| 2015-07-19T23:39:23.644+0000 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.645+0000 m31100| 2015-07-19T23:39:23.644+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db7.coll7' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.658+0000 m31200| 2015-07-19T23:39:23.658+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db7.coll7", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.658+0000 m31200| 2015-07-19T23:39:23.658+0000 I SHARDING [conn97] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.658+0000 m31200| 2015-07-19T23:39:23.658+0000 I SHARDING [conn97] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.658+0000 m31200| 2015-07-19T23:39:23.658+0000 I SHARDING [conn97] moveChunk setting version to: 2|0||55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.665+0000 m31100| 2015-07-19T23:39:23.665+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db7.coll7' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.665+0000 m31100| 2015-07-19T23:39:23.665+0000 I SHARDING [migrateThread] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:23.665+0000-55ac352b68c42881b59cba3a", server: "ip-10-139-123-131", clientAddr: "", time: new Date(1437349163665), what: "moveChunk.to", ns: "db7.coll7", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 10, step 2 of 5: 8, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 20, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.716+0000 m31200| 2015-07-19T23:39:23.716+0000 I SHARDING [conn97] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db7.coll7", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.716+0000 m31200| 2015-07-19T23:39:23.716+0000 I SHARDING [conn97] moveChunk updating self version to: 2|1||55ac352bd2c1f750d1548399 through { _id: 0 } -> { _id: MaxKey } for collection 'db7.coll7' [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.717+0000 m31200| 2015-07-19T23:39:23.716+0000 I SHARDING [conn97] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:23.716+0000-55ac352bd9a63f6196b17266", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39668", time: new Date(1437349163716), what: "moveChunk.commit", ns: "db7.coll7", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.767+0000 m31200| 2015-07-19T23:39:23.767+0000 I SHARDING [conn97] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.768+0000 m31200| 2015-07-19T23:39:23.767+0000 I SHARDING [conn97] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.768+0000 m31200| 2015-07-19T23:39:23.767+0000 I SHARDING [conn97] Deleter starting delete for: db7.coll7 from { _id: MinKey } -> { _id: 0 }, with opId: 25303 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.768+0000 m31200| 2015-07-19T23:39:23.767+0000 I SHARDING [conn97] rangeDeleter deleted 0 documents for db7.coll7 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.768+0000 m31200| 2015-07-19T23:39:23.767+0000 I SHARDING [conn97] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.768+0000 m31200| 2015-07-19T23:39:23.768+0000 I SHARDING [conn97] distributed lock 'db7.coll7/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.768+0000 m31200| 2015-07-19T23:39:23.768+0000 I SHARDING [conn97] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:23.768+0000-55ac352bd9a63f6196b17267", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39668", time: new Date(1437349163768), what: "moveChunk.from", ns: "db7.coll7", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 52, step 3 of 6: 1, step 4 of 6: 32, step 5 of 6: 109, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.819+0000 m31200| 2015-07-19T23:39:23.819+0000 I COMMAND [conn97] command db7.coll7 command: moveChunk { moveChunk: "db7.coll7", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", to: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac352bd2c1f750d1548399') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 247ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.820+0000 m30999| 2015-07-19T23:39:23.819+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db7.coll7: 0ms sequenceNumber: 37 version: 2|1||55ac352bd2c1f750d1548399 based on: 1|1||55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.820+0000 m31100| 2015-07-19T23:39:23.820+0000 I SHARDING [conn43] received splitChunk request: { splitChunk: "db7.coll7", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac352bd2c1f750d1548399') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.821+0000 m31100| 2015-07-19T23:39:23.821+0000 I SHARDING [conn43] distributed lock 'db7.coll7/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac352b68c42881b59cba3b [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.821+0000 m31100| 2015-07-19T23:39:23.821+0000 I SHARDING [conn43] remotely refreshing metadata for db7.coll7 based on current shard version 0|0||55ac352bd2c1f750d1548399, current metadata version is 1|1||55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.822+0000 m31100| 2015-07-19T23:39:23.821+0000 I SHARDING [conn43] updating metadata for db7.coll7 from shard version 0|0||55ac352bd2c1f750d1548399 to shard version 2|0||55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.822+0000 m31100| 2015-07-19T23:39:23.822+0000 I SHARDING [conn43] collection version was loaded at version 2|1||55ac352bd2c1f750d1548399, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.822+0000 m31100| 2015-07-19T23:39:23.822+0000 I SHARDING [conn43] splitChunk accepted at version 2|0||55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.822+0000 m31100| 2015-07-19T23:39:23.822+0000 I SHARDING [conn43] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:23.822+0000-55ac352b68c42881b59cba3c", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47680", time: new Date(1437349163822), what: "split", ns: "db7.coll7", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('55ac352bd2c1f750d1548399') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('55ac352bd2c1f750d1548399') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.874+0000 m31100| 2015-07-19T23:39:23.873+0000 I SHARDING [conn43] distributed lock 'db7.coll7/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.874+0000 m30999| 2015-07-19T23:39:23.874+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db7.coll7: 0ms sequenceNumber: 38 version: 2|3||55ac352bd2c1f750d1548399 based on: 2|1||55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.875+0000 m31200| 2015-07-19T23:39:23.874+0000 I SHARDING [conn97] received splitChunk request: { splitChunk: "db7.coll7", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac352bd2c1f750d1548399') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.876+0000 m31200| 2015-07-19T23:39:23.875+0000 I SHARDING [conn97] distributed lock 'db7.coll7/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac352bd9a63f6196b17268 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.876+0000 m31200| 2015-07-19T23:39:23.875+0000 I SHARDING [conn97] remotely refreshing metadata for db7.coll7 based on current shard version 2|0||55ac352bd2c1f750d1548399, current metadata version is 2|0||55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.876+0000 m31200| 2015-07-19T23:39:23.876+0000 I SHARDING [conn97] updating metadata for db7.coll7 from shard version 2|0||55ac352bd2c1f750d1548399 to shard version 2|1||55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.876+0000 m31200| 2015-07-19T23:39:23.876+0000 I SHARDING [conn97] collection version was loaded at version 2|3||55ac352bd2c1f750d1548399, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.876+0000 m31200| 2015-07-19T23:39:23.876+0000 I SHARDING [conn97] splitChunk accepted at version 2|1||55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.877+0000 m31200| 2015-07-19T23:39:23.877+0000 I SHARDING [conn97] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:23.877+0000-55ac352bd9a63f6196b17269", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39668", time: new Date(1437349163877), what: "split", ns: "db7.coll7", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('55ac352bd2c1f750d1548399') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('55ac352bd2c1f750d1548399') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.928+0000 m31200| 2015-07-19T23:39:23.928+0000 I SHARDING [conn97] distributed lock 'db7.coll7/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.929+0000 m30999| 2015-07-19T23:39:23.928+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db7.coll7: 0ms sequenceNumber: 39 version: 2|5||55ac352bd2c1f750d1548399 based on: 2|3||55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.931+0000 m31200| 2015-07-19T23:39:23.931+0000 I INDEX [conn93] build index on: db7.coll7 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.931+0000 m31200| 2015-07-19T23:39:23.931+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.931+0000 m31100| 2015-07-19T23:39:23.931+0000 I INDEX [conn52] build index on: db7.coll7 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.931+0000 m31100| 2015-07-19T23:39:23.931+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.933+0000 m31200| 2015-07-19T23:39:23.932+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.934+0000 m31200| 2015-07-19T23:39:23.933+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:153 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 618ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.934+0000 m31200| 2015-07-19T23:39:23.933+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:153 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 618ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.934+0000 m31100| 2015-07-19T23:39:23.933+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.934+0000 m31100| 2015-07-19T23:39:23.933+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 292ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.936+0000 m31100| 2015-07-19T23:39:23.933+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:153 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 293ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.937+0000 m31201| 2015-07-19T23:39:23.935+0000 I INDEX [repl writer worker 0] build index on: db7.coll7 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.937+0000 m31201| 2015-07-19T23:39:23.935+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.938+0000 m31100| 2015-07-19T23:39:23.936+0000 I INDEX [conn52] build index on: db7.coll7 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.939+0000 m31100| 2015-07-19T23:39:23.936+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.939+0000 m31202| 2015-07-19T23:39:23.936+0000 I INDEX [repl writer worker 12] build index on: db7.coll7 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.940+0000 m31202| 2015-07-19T23:39:23.936+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.940+0000 m31200| 2015-07-19T23:39:23.937+0000 I INDEX [conn93] build index on: db7.coll7 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.941+0000 m31200| 2015-07-19T23:39:23.937+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.941+0000 m31102| 2015-07-19T23:39:23.937+0000 I INDEX [repl writer worker 11] build index on: db7.coll7 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.941+0000 m31102| 2015-07-19T23:39:23.937+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.942+0000 m31201| 2015-07-19T23:39:23.937+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.943+0000 m31100| 2015-07-19T23:39:23.938+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.943+0000 m31202| 2015-07-19T23:39:23.938+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.943+0000 m31101| 2015-07-19T23:39:23.938+0000 I INDEX [repl writer worker 7] build index on: db7.coll7 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.943+0000 m31101| 2015-07-19T23:39:23.938+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.944+0000 m31200| 2015-07-19T23:39:23.939+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.945+0000 m31102| 2015-07-19T23:39:23.939+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.945+0000 m31101| 2015-07-19T23:39:23.941+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.945+0000 m31201| 2015-07-19T23:39:23.942+0000 I INDEX [repl writer worker 11] build index on: db7.coll7 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.945+0000 m31201| 2015-07-19T23:39:23.942+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.945+0000 m31102| 2015-07-19T23:39:23.942+0000 I INDEX [repl writer worker 13] build index on: db7.coll7 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.946+0000 m31102| 2015-07-19T23:39:23.942+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.946+0000 m31200| 2015-07-19T23:39:23.942+0000 I INDEX [conn93] build index on: db7.coll7 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.950+0000 m31200| 2015-07-19T23:39:23.942+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.950+0000 m31202| 2015-07-19T23:39:23.943+0000 I INDEX [repl writer worker 2] build index on: db7.coll7 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.950+0000 m31202| 2015-07-19T23:39:23.943+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.951+0000 m31100| 2015-07-19T23:39:23.943+0000 I INDEX [conn52] build index on: db7.coll7 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.951+0000 m31100| 2015-07-19T23:39:23.943+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.951+0000 m31102| 2015-07-19T23:39:23.943+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.952+0000 m31200| 2015-07-19T23:39:23.945+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.952+0000 m31100| 2015-07-19T23:39:23.946+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.952+0000 m31201| 2015-07-19T23:39:23.946+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.952+0000 m31101| 2015-07-19T23:39:23.946+0000 I INDEX [repl writer worker 13] build index on: db7.coll7 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.952+0000 m31101| 2015-07-19T23:39:23.946+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.952+0000 m31202| 2015-07-19T23:39:23.946+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.953+0000 m31101| 2015-07-19T23:39:23.948+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.954+0000 m31102| 2015-07-19T23:39:23.948+0000 I INDEX [repl writer worker 7] build index on: db7.coll7 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.960+0000 m31102| 2015-07-19T23:39:23.948+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.961+0000 m31201| 2015-07-19T23:39:23.949+0000 I INDEX [repl writer worker 13] build index on: db7.coll7 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.962+0000 m31201| 2015-07-19T23:39:23.949+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.962+0000 m31202| 2015-07-19T23:39:23.950+0000 I INDEX [repl writer worker 14] build index on: db7.coll7 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.962+0000 m31202| 2015-07-19T23:39:23.950+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.962+0000 m31102| 2015-07-19T23:39:23.950+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.962+0000 m31101| 2015-07-19T23:39:23.951+0000 I INDEX [repl writer worker 3] build index on: db7.coll7 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.963+0000 m31101| 2015-07-19T23:39:23.951+0000 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.963+0000 m31202| 2015-07-19T23:39:23.952+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.964+0000 m31201| 2015-07-19T23:39:23.953+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.964+0000 m31101| 2015-07-19T23:39:23.953+0000 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:23.964+0000 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.056+0000 m30999| 2015-07-19T23:39:24.056+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57282 #40 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.085+0000 m30999| 2015-07-19T23:39:24.085+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57283 #41 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.102+0000 m30999| 2015-07-19T23:39:24.101+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57284 #42 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.104+0000 m30998| 2015-07-19T23:39:24.104+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35926 #39 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.105+0000 m30998| 2015-07-19T23:39:24.105+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35927 #40 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.115+0000 m30999| 2015-07-19T23:39:24.115+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57287 #43 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.126+0000 m30998| 2015-07-19T23:39:24.126+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35929 #41 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.127+0000 m30998| 2015-07-19T23:39:24.126+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35930 #42 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.130+0000 m30999| 2015-07-19T23:39:24.130+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57290 #44 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.133+0000 m30998| 2015-07-19T23:39:24.133+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35932 #43 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.137+0000 setting random seed: 2423118292354 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.137+0000 setting random seed: 4325244617648 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.138+0000 setting random seed: 7297602239996 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.139+0000 setting random seed: 2583141187205 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.139+0000 setting random seed: 9097013431601 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.141+0000 setting random seed: 8201894136145 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.145+0000 m31200| 2015-07-19T23:39:24.144+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:7 reslen:853 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 181ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.145+0000 m31200| 2015-07-19T23:39:24.144+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:7 reslen:853 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 181ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.145+0000 m31100| 2015-07-19T23:39:24.144+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:141 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 187ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.146+0000 m31100| 2015-07-19T23:39:24.144+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 183ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.146+0000 m30998| 2015-07-19T23:39:24.146+0000 I SHARDING [conn40] ChunkManager: time to load chunks for db7.coll7: 0ms sequenceNumber: 10 version: 2|5||55ac352bd2c1f750d1548399 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.149+0000 setting random seed: 1151684154756 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.149+0000 setting random seed: 5772093120031 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.151+0000 setting random seed: 8662526831030 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.180+0000 setting random seed: 1221411032602 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.210+0000 m31200| 2015-07-19T23:39:24.210+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39694 #112 (40 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.268+0000 m31100| 2015-07-19T23:39:24.268+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47859 #115 (59 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.268+0000 m31200| 2015-07-19T23:39:24.268+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39695 #113 (41 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.269+0000 m31100| 2015-07-19T23:39:24.268+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47861 #116 (60 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.290+0000 m31200| 2015-07-19T23:39:24.290+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39698 #114 (42 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.297+0000 m31200| 2015-07-19T23:39:24.297+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39699 #115 (43 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:24.297+0000 m31200| 2015-07-19T23:39:24.297+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39700 #116 (44 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:25.214+0000 m31200| 2015-07-19T23:39:25.214+0000 I WRITE [conn113] update db7.coll7 query: { a: 1.0, b: 1.0 } update: { $set: { c: 956.0 } } nscanned:7 nscannedObjects:7 nMatched:7 nModified:7 keyUpdates:1 writeConflicts:28 numYields:28 locks:{ Global: { acquireCount: { r: 36, w: 36 } }, Database: { acquireCount: { w: 36 } }, Collection: { acquireCount: { w: 29 } }, Metadata: { acquireCount: { w: 7 } }, oplog: { acquireCount: { w: 7 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:25.215+0000 m31200| 2015-07-19T23:39:25.214+0000 I COMMAND [conn113] command db7.$cmd command: update { update: "coll7", updates: [ { q: { a: 1.0, b: 1.0 }, u: { $set: { c: 956.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 0|0, ObjectId('00000000ffffffffffffffff') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 36, w: 36 } }, Database: { acquireCount: { w: 36 } }, Collection: { acquireCount: { w: 29 } }, Metadata: { acquireCount: { w: 7 } }, oplog: { acquireCount: { w: 7 } } } protocol:op_command 104ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.413+0000 m30998| 2015-07-19T23:39:26.413+0000 I NETWORK [conn42] end connection 10.139.123.131:35930 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.438+0000 m30998| 2015-07-19T23:39:26.438+0000 I NETWORK [conn41] end connection 10.139.123.131:35929 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.498+0000 m30999| 2015-07-19T23:39:26.498+0000 I NETWORK [conn40] end connection 10.139.123.131:57282 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.555+0000 m30998| 2015-07-19T23:39:26.555+0000 I NETWORK [conn39] end connection 10.139.123.131:35926 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.556+0000 m30999| 2015-07-19T23:39:26.556+0000 I NETWORK [conn43] end connection 10.139.123.131:57287 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.612+0000 m30999| 2015-07-19T23:39:26.612+0000 I NETWORK [conn44] end connection 10.139.123.131:57290 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.654+0000 m30999| 2015-07-19T23:39:26.654+0000 I NETWORK [conn41] end connection 10.139.123.131:57283 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.658+0000 m30998| 2015-07-19T23:39:26.657+0000 I NETWORK [conn40] end connection 10.139.123.131:35927 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.715+0000 m30998| 2015-07-19T23:39:26.715+0000 I NETWORK [conn43] end connection 10.139.123.131:35932 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.728+0000 m30999| 2015-07-19T23:39:26.728+0000 I NETWORK [conn42] end connection 10.139.123.131:57284 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.759+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.759+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.759+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.759+0000 jstests/concurrency/fsm_workloads/update_check_index.js: Workload completed in 2794 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.759+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.759+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.759+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.759+0000 m30999| 2015-07-19T23:39:26.759+0000 I COMMAND [conn1] DROP: db7.coll7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.759+0000 m30999| 2015-07-19T23:39:26.759+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:26.759+0000-55ac352ed2c1f750d154839b", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349166759), what: "dropCollection.start", ns: "db7.coll7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.811+0000 m30999| 2015-07-19T23:39:26.811+0000 I SHARDING [conn1] distributed lock 'db7.coll7/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac352ed2c1f750d154839c [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.811+0000 m31100| 2015-07-19T23:39:26.811+0000 I COMMAND [conn12] CMD: drop db7.coll7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.812+0000 m31200| 2015-07-19T23:39:26.812+0000 I COMMAND [conn14] CMD: drop db7.coll7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.814+0000 m31102| 2015-07-19T23:39:26.814+0000 I COMMAND [repl writer worker 11] CMD: drop db7.coll7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.814+0000 m31101| 2015-07-19T23:39:26.814+0000 I COMMAND [repl writer worker 10] CMD: drop db7.coll7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.816+0000 m31201| 2015-07-19T23:39:26.816+0000 I COMMAND [repl writer worker 8] CMD: drop db7.coll7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.816+0000 m31202| 2015-07-19T23:39:26.816+0000 I COMMAND [repl writer worker 7] CMD: drop db7.coll7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.865+0000 m31100| 2015-07-19T23:39:26.865+0000 I SHARDING [conn12] remotely refreshing metadata for db7.coll7 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||55ac352bd2c1f750d1548399, current metadata version is 2|3||55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.866+0000 m31100| 2015-07-19T23:39:26.866+0000 W SHARDING [conn12] no chunks found when reloading db7.coll7, previous version was 0|0||55ac352bd2c1f750d1548399, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.866+0000 m31100| 2015-07-19T23:39:26.866+0000 I SHARDING [conn12] dropping metadata for db7.coll7 at shard version 2|3||55ac352bd2c1f750d1548399, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.866+0000 m31200| 2015-07-19T23:39:26.866+0000 I SHARDING [conn14] remotely refreshing metadata for db7.coll7 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||55ac352bd2c1f750d1548399, current metadata version is 2|5||55ac352bd2c1f750d1548399 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.867+0000 m31200| 2015-07-19T23:39:26.867+0000 W SHARDING [conn14] no chunks found when reloading db7.coll7, previous version was 0|0||55ac352bd2c1f750d1548399, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.867+0000 m31200| 2015-07-19T23:39:26.867+0000 I SHARDING [conn14] dropping metadata for db7.coll7 at shard version 2|5||55ac352bd2c1f750d1548399, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.867+0000 m30999| 2015-07-19T23:39:26.867+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:26.867+0000-55ac352ed2c1f750d154839d", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349166867), what: "dropCollection", ns: "db7.coll7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.918+0000 m30999| 2015-07-19T23:39:26.918+0000 I SHARDING [conn1] distributed lock 'db7.coll7/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.970+0000 m30999| 2015-07-19T23:39:26.970+0000 I COMMAND [conn1] DROP DATABASE: db7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.970+0000 m30999| 2015-07-19T23:39:26.970+0000 I SHARDING [conn1] DBConfig::dropDatabase: db7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:26.971+0000 m30999| 2015-07-19T23:39:26.970+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:26.970+0000-55ac352ed2c1f750d154839e", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349166970), what: "dropDatabase.start", ns: "db7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.072+0000 m30999| 2015-07-19T23:39:27.072+0000 I SHARDING [conn1] DBConfig::dropDatabase: db7 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.072+0000 m31200| 2015-07-19T23:39:27.072+0000 I COMMAND [conn111] dropDatabase db7 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.072+0000 m31200| 2015-07-19T23:39:27.072+0000 I COMMAND [conn111] dropDatabase db7 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.072+0000 m30999| 2015-07-19T23:39:27.072+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:27.072+0000-55ac352fd2c1f750d154839f", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349167072), what: "dropDatabase", ns: "db7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.073+0000 m31200| 2015-07-19T23:39:27.072+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 254ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.073+0000 m31200| 2015-07-19T23:39:27.072+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 254ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.073+0000 m31202| 2015-07-19T23:39:27.073+0000 I COMMAND [repl writer worker 4] dropDatabase db7 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.073+0000 m31202| 2015-07-19T23:39:27.073+0000 I COMMAND [repl writer worker 4] dropDatabase db7 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.073+0000 m31201| 2015-07-19T23:39:27.073+0000 I COMMAND [repl writer worker 1] dropDatabase db7 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.074+0000 m31201| 2015-07-19T23:39:27.073+0000 I COMMAND [repl writer worker 1] dropDatabase db7 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.418+0000 m31100| 2015-07-19T23:39:27.418+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 601ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.418+0000 m31100| 2015-07-19T23:39:27.418+0000 I WRITE [conn1] insert test.fsm_teardown query: { _id: ObjectId('55ac352f0e533c3f6a64093c'), a: 1.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 293ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.419+0000 m31100| 2015-07-19T23:39:27.418+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:2 reslen:223 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 601ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.496+0000 m31100| 2015-07-19T23:39:27.496+0000 I COMMAND [conn1] command test.$cmd command: insert { insert: "fsm_teardown", documents: [ { _id: ObjectId('55ac352f0e533c3f6a64093c'), a: 1.0 } ], ordered: true, writeConcern: { w: 3.0, wtimeout: 300000.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } protocol:op_command 371ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.496+0000 m31100| 2015-07-19T23:39:27.496+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.568+0000 m31200| 2015-07-19T23:39:27.568+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 493ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.569+0000 m31200| 2015-07-19T23:39:27.568+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 493ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.574+0000 m31200| 2015-07-19T23:39:27.573+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.575+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.575+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.575+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.575+0000 jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.575+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.575+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.575+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.577+0000 m31202| 2015-07-19T23:39:27.576+0000 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.577+0000 m31201| 2015-07-19T23:39:27.576+0000 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.577+0000 m30999| 2015-07-19T23:39:27.577+0000 I SHARDING [conn1] distributed lock 'db8/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac352fd2c1f750d15483a0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.579+0000 m30999| 2015-07-19T23:39:27.579+0000 I SHARDING [conn1] Placing [db8] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.579+0000 m30999| 2015-07-19T23:39:27.579+0000 I SHARDING [conn1] Enabling sharding for database [db8] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.631+0000 m30999| 2015-07-19T23:39:27.630+0000 I SHARDING [conn1] distributed lock 'db8/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.637+0000 m31200| 2015-07-19T23:39:27.636+0000 I INDEX [conn113] build index on: db8.coll8 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db8.coll8" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.637+0000 m31200| 2015-07-19T23:39:27.636+0000 I INDEX [conn113] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.638+0000 m31200| 2015-07-19T23:39:27.638+0000 I INDEX [conn113] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.638+0000 m30999| 2015-07-19T23:39:27.638+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db8.coll8", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.639+0000 m30999| 2015-07-19T23:39:27.639+0000 I SHARDING [conn1] distributed lock 'db8.coll8/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac352fd2c1f750d15483a1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.640+0000 m30999| 2015-07-19T23:39:27.640+0000 I SHARDING [conn1] enable sharding on: db8.coll8 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.641+0000 m30999| 2015-07-19T23:39:27.640+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:27.640+0000-55ac352fd2c1f750d15483a2", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349167640), what: "shardCollection.start", ns: "db8.coll8", details: { shardKey: { _id: "hashed" }, collection: "db8.coll8", primary: "test-rs1:test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.643+0000 m31201| 2015-07-19T23:39:27.642+0000 I INDEX [repl writer worker 5] build index on: db8.coll8 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db8.coll8" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.643+0000 m31201| 2015-07-19T23:39:27.642+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.644+0000 m31201| 2015-07-19T23:39:27.644+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.691+0000 m30999| 2015-07-19T23:39:27.690+0000 I SHARDING [conn1] going to create 2 chunk(s) for: db8.coll8 using new epoch 55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.793+0000 m30999| 2015-07-19T23:39:27.793+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db8.coll8: 0ms sequenceNumber: 40 version: 1|1||55ac352fd2c1f750d15483a3 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.844+0000 m30999| 2015-07-19T23:39:27.844+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db8.coll8: 0ms sequenceNumber: 41 version: 1|1||55ac352fd2c1f750d15483a3 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.845+0000 m31200| 2015-07-19T23:39:27.845+0000 I SHARDING [conn93] remotely refreshing metadata for db8.coll8 with requested shard version 1|1||55ac352fd2c1f750d15483a3, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.846+0000 m31200| 2015-07-19T23:39:27.845+0000 I SHARDING [conn93] collection db8.coll8 was previously unsharded, new metadata loaded with shard version 1|1||55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.846+0000 m31200| 2015-07-19T23:39:27.845+0000 I SHARDING [conn93] collection version was loaded at version 1|1||55ac352fd2c1f750d15483a3, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.846+0000 m30999| 2015-07-19T23:39:27.845+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:27.845+0000-55ac352fd2c1f750d15483a4", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349167845), what: "shardCollection", ns: "db8.coll8", details: { version: "1|1||55ac352fd2c1f750d15483a3" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.897+0000 m30999| 2015-07-19T23:39:27.897+0000 I SHARDING [conn1] distributed lock 'db8.coll8/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.898+0000 m30999| 2015-07-19T23:39:27.897+0000 I SHARDING [conn1] moving chunk ns: db8.coll8 moving ( ns: db8.coll8, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.898+0000 m31200| 2015-07-19T23:39:27.897+0000 I SHARDING [conn97] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.898+0000 m31200| 2015-07-19T23:39:27.898+0000 I SHARDING [conn97] received moveChunk request: { moveChunk: "db8.coll8", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", to: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac352fd2c1f750d15483a3') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.899+0000 m31200| 2015-07-19T23:39:27.899+0000 I SHARDING [conn97] distributed lock 'db8.coll8/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac352fd9a63f6196b1726b [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.899+0000 m31200| 2015-07-19T23:39:27.899+0000 I SHARDING [conn97] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:27.899+0000-55ac352fd9a63f6196b1726c", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39668", time: new Date(1437349167899), what: "moveChunk.start", ns: "db8.coll8", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.950+0000 m31200| 2015-07-19T23:39:27.950+0000 I SHARDING [conn97] remotely refreshing metadata for db8.coll8 based on current shard version 1|1||55ac352fd2c1f750d15483a3, current metadata version is 1|1||55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.950+0000 m31200| 2015-07-19T23:39:27.950+0000 I SHARDING [conn97] metadata of collection db8.coll8 already up to date (shard version : 1|1||55ac352fd2c1f750d15483a3, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.951+0000 m31200| 2015-07-19T23:39:27.950+0000 I SHARDING [conn97] moveChunk request accepted at version 1|1||55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.951+0000 m31200| 2015-07-19T23:39:27.951+0000 I SHARDING [conn97] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.951+0000 m31100| 2015-07-19T23:39:27.951+0000 I SHARDING [conn19] remotely refreshing metadata for db8.coll8, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.951+0000 m31100| 2015-07-19T23:39:27.951+0000 I SHARDING [conn19] collection db8.coll8 was previously unsharded, new metadata loaded with shard version 0|0||55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.952+0000 m31100| 2015-07-19T23:39:27.951+0000 I SHARDING [conn19] collection version was loaded at version 1|1||55ac352fd2c1f750d15483a3, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.952+0000 m31100| 2015-07-19T23:39:27.951+0000 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db8.coll8 from test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 at epoch 55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.953+0000 m31200| 2015-07-19T23:39:27.953+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db8.coll8", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.954+0000 m31100| 2015-07-19T23:39:27.953+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:2 reslen:202 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 454ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.955+0000 m31100| 2015-07-19T23:39:27.953+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:2 reslen:202 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 454ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.955+0000 m31101| 2015-07-19T23:39:27.954+0000 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.955+0000 m31102| 2015-07-19T23:39:27.954+0000 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.955+0000 m31200| 2015-07-19T23:39:27.955+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db8.coll8", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.960+0000 m31200| 2015-07-19T23:39:27.959+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db8.coll8", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.960+0000 m31100| 2015-07-19T23:39:27.960+0000 I INDEX [migrateThread] build index on: db8.coll8 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db8.coll8" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.960+0000 m31100| 2015-07-19T23:39:27.960+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.961+0000 m31100| 2015-07-19T23:39:27.961+0000 I INDEX [migrateThread] build index on: db8.coll8 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db8.coll8" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.962+0000 m31100| 2015-07-19T23:39:27.961+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.965+0000 m31100| 2015-07-19T23:39:27.965+0000 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.965+0000 m31100| 2015-07-19T23:39:27.965+0000 I SHARDING [migrateThread] Deleter starting delete for: db8.coll8 from { _id: MinKey } -> { _id: 0 }, with opId: 32753 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.965+0000 m31100| 2015-07-19T23:39:27.965+0000 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db8.coll8 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.968+0000 m31200| 2015-07-19T23:39:27.968+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db8.coll8", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.969+0000 m31101| 2015-07-19T23:39:27.969+0000 I INDEX [repl writer worker 9] build index on: db8.coll8 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db8.coll8" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.970+0000 m31101| 2015-07-19T23:39:27.969+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.970+0000 m31102| 2015-07-19T23:39:27.970+0000 I INDEX [repl writer worker 9] build index on: db8.coll8 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db8.coll8" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.970+0000 m31102| 2015-07-19T23:39:27.970+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.971+0000 m31101| 2015-07-19T23:39:27.971+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.971+0000 m31102| 2015-07-19T23:39:27.971+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.971+0000 m31100| 2015-07-19T23:39:27.971+0000 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.972+0000 m31100| 2015-07-19T23:39:27.971+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db8.coll8' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.984+0000 m31200| 2015-07-19T23:39:27.984+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db8.coll8", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.985+0000 m31200| 2015-07-19T23:39:27.984+0000 I SHARDING [conn97] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.985+0000 m31200| 2015-07-19T23:39:27.984+0000 I SHARDING [conn97] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.985+0000 m31200| 2015-07-19T23:39:27.984+0000 I SHARDING [conn97] moveChunk setting version to: 2|0||55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.992+0000 m31100| 2015-07-19T23:39:27.992+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db8.coll8' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:27.992+0000 m31100| 2015-07-19T23:39:27.992+0000 I SHARDING [migrateThread] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:27.992+0000-55ac352f68c42881b59cba3d", server: "ip-10-139-123-131", clientAddr: "", time: new Date(1437349167992), what: "moveChunk.to", ns: "db8.coll8", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 13, step 2 of 5: 6, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 20, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.043+0000 m31200| 2015-07-19T23:39:28.043+0000 I SHARDING [conn97] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db8.coll8", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.044+0000 m31200| 2015-07-19T23:39:28.043+0000 I SHARDING [conn97] moveChunk updating self version to: 2|1||55ac352fd2c1f750d15483a3 through { _id: 0 } -> { _id: MaxKey } for collection 'db8.coll8' [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.044+0000 m31200| 2015-07-19T23:39:28.044+0000 I SHARDING [conn97] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:28.044+0000-55ac3530d9a63f6196b1726d", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39668", time: new Date(1437349168044), what: "moveChunk.commit", ns: "db8.coll8", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.095+0000 m31200| 2015-07-19T23:39:28.095+0000 I SHARDING [conn97] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.095+0000 m31200| 2015-07-19T23:39:28.095+0000 I SHARDING [conn97] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.095+0000 m31200| 2015-07-19T23:39:28.095+0000 I SHARDING [conn97] Deleter starting delete for: db8.coll8 from { _id: MinKey } -> { _id: 0 }, with opId: 29426 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.095+0000 m31200| 2015-07-19T23:39:28.095+0000 I SHARDING [conn97] rangeDeleter deleted 0 documents for db8.coll8 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.096+0000 m31200| 2015-07-19T23:39:28.095+0000 I SHARDING [conn97] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.096+0000 m31200| 2015-07-19T23:39:28.095+0000 I SHARDING [conn97] distributed lock 'db8.coll8/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.096+0000 m31200| 2015-07-19T23:39:28.096+0000 I SHARDING [conn97] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:28.095+0000-55ac3530d9a63f6196b1726e", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39668", time: new Date(1437349168095), what: "moveChunk.from", ns: "db8.coll8", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 52, step 3 of 6: 1, step 4 of 6: 32, step 5 of 6: 110, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.147+0000 m31200| 2015-07-19T23:39:28.146+0000 I COMMAND [conn97] command db8.coll8 command: moveChunk { moveChunk: "db8.coll8", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", to: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac352fd2c1f750d15483a3') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 248ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.147+0000 m30999| 2015-07-19T23:39:28.147+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db8.coll8: 0ms sequenceNumber: 42 version: 2|1||55ac352fd2c1f750d15483a3 based on: 1|1||55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.148+0000 m31100| 2015-07-19T23:39:28.147+0000 I SHARDING [conn43] received splitChunk request: { splitChunk: "db8.coll8", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac352fd2c1f750d15483a3') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.149+0000 m31100| 2015-07-19T23:39:28.149+0000 I SHARDING [conn43] distributed lock 'db8.coll8/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac353068c42881b59cba3e [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.149+0000 m31100| 2015-07-19T23:39:28.149+0000 I SHARDING [conn43] remotely refreshing metadata for db8.coll8 based on current shard version 0|0||55ac352fd2c1f750d15483a3, current metadata version is 1|1||55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.149+0000 m31100| 2015-07-19T23:39:28.149+0000 I SHARDING [conn43] updating metadata for db8.coll8 from shard version 0|0||55ac352fd2c1f750d15483a3 to shard version 2|0||55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.149+0000 m31100| 2015-07-19T23:39:28.149+0000 I SHARDING [conn43] collection version was loaded at version 2|1||55ac352fd2c1f750d15483a3, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.150+0000 m31100| 2015-07-19T23:39:28.149+0000 I SHARDING [conn43] splitChunk accepted at version 2|0||55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.150+0000 m31100| 2015-07-19T23:39:28.150+0000 I SHARDING [conn43] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:28.150+0000-55ac353068c42881b59cba3f", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47680", time: new Date(1437349168150), what: "split", ns: "db8.coll8", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('55ac352fd2c1f750d15483a3') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('55ac352fd2c1f750d15483a3') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.201+0000 m31100| 2015-07-19T23:39:28.201+0000 I SHARDING [conn43] distributed lock 'db8.coll8/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.202+0000 m30999| 2015-07-19T23:39:28.202+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db8.coll8: 0ms sequenceNumber: 43 version: 2|3||55ac352fd2c1f750d15483a3 based on: 2|1||55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.202+0000 m31200| 2015-07-19T23:39:28.202+0000 I SHARDING [conn97] received splitChunk request: { splitChunk: "db8.coll8", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac352fd2c1f750d15483a3') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.203+0000 m31200| 2015-07-19T23:39:28.203+0000 I SHARDING [conn97] distributed lock 'db8.coll8/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3530d9a63f6196b1726f [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.203+0000 m31200| 2015-07-19T23:39:28.203+0000 I SHARDING [conn97] remotely refreshing metadata for db8.coll8 based on current shard version 2|0||55ac352fd2c1f750d15483a3, current metadata version is 2|0||55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.204+0000 m31200| 2015-07-19T23:39:28.203+0000 I SHARDING [conn97] updating metadata for db8.coll8 from shard version 2|0||55ac352fd2c1f750d15483a3 to shard version 2|1||55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.204+0000 m31200| 2015-07-19T23:39:28.203+0000 I SHARDING [conn97] collection version was loaded at version 2|3||55ac352fd2c1f750d15483a3, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.204+0000 m31200| 2015-07-19T23:39:28.203+0000 I SHARDING [conn97] splitChunk accepted at version 2|1||55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.204+0000 m31200| 2015-07-19T23:39:28.204+0000 I SHARDING [conn97] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:28.204+0000-55ac3530d9a63f6196b17270", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39668", time: new Date(1437349168204), what: "split", ns: "db8.coll8", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('55ac352fd2c1f750d15483a3') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('55ac352fd2c1f750d15483a3') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.256+0000 m31200| 2015-07-19T23:39:28.255+0000 I SHARDING [conn97] distributed lock 'db8.coll8/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.257+0000 m30999| 2015-07-19T23:39:28.256+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db8.coll8: 0ms sequenceNumber: 44 version: 2|5||55ac352fd2c1f750d15483a3 based on: 2|3||55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.426+0000 m31100| 2015-07-19T23:39:28.425+0000 I INDEX [conn52] build index on: db8.coll8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db8.coll8", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.426+0000 m31100| 2015-07-19T23:39:28.425+0000 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.426+0000 m31200| 2015-07-19T23:39:28.425+0000 I INDEX [conn93] build index on: db8.coll8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db8.coll8", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.426+0000 m31200| 2015-07-19T23:39:28.425+0000 I INDEX [conn93] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.475+0000 m31200| 2015-07-19T23:39:28.475+0000 I INDEX [conn93] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.477+0000 m31200| 2015-07-19T23:39:28.475+0000 I COMMAND [conn93] command db8.$cmd command: createIndexes { createIndexes: "coll8", indexes: [ { key: { indexed_insert_text: "text" }, name: "indexed_insert_text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 217ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.477+0000 m31200| 2015-07-19T23:39:28.475+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:2 reslen:355 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 837ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.477+0000 m31200| 2015-07-19T23:39:28.475+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:193 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 835ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.562+0000 m31100| 2015-07-19T23:39:28.561+0000 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.562+0000 m31100| 2015-07-19T23:39:28.561+0000 I COMMAND [conn52] command db8.$cmd command: createIndexes { createIndexes: "coll8", indexes: [ { key: { indexed_insert_text: "text" }, name: "indexed_insert_text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 303ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.562+0000 m31100| 2015-07-19T23:39:28.561+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:193 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 591ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.563+0000 m31100| 2015-07-19T23:39:28.561+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 591ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.563+0000 m31201| 2015-07-19T23:39:28.562+0000 I INDEX [repl writer worker 7] build index on: db8.coll8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db8.coll8", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.563+0000 m31201| 2015-07-19T23:39:28.562+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.563+0000 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.586+0000 m31202| 2015-07-19T23:39:28.585+0000 I INDEX [repl writer worker 1] build index on: db8.coll8 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db8.coll8" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.586+0000 m31202| 2015-07-19T23:39:28.585+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.586+0000 m31201| 2015-07-19T23:39:28.586+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.587+0000 m31101| 2015-07-19T23:39:28.587+0000 I INDEX [repl writer worker 2] build index on: db8.coll8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db8.coll8", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.587+0000 m31101| 2015-07-19T23:39:28.587+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.587+0000 m31102| 2015-07-19T23:39:28.587+0000 I INDEX [repl writer worker 4] build index on: db8.coll8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db8.coll8", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.588+0000 m31102| 2015-07-19T23:39:28.587+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.590+0000 m31202| 2015-07-19T23:39:28.590+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.617+0000 m31102| 2015-07-19T23:39:28.617+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.618+0000 m31202| 2015-07-19T23:39:28.618+0000 I INDEX [repl writer worker 14] build index on: db8.coll8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db8.coll8", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.618+0000 m31202| 2015-07-19T23:39:28.618+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.624+0000 m31202| 2015-07-19T23:39:28.624+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.651+0000 m31101| 2015-07-19T23:39:28.650+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.857+0000 m30998| 2015-07-19T23:39:28.857+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35940 #44 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.891+0000 m30998| 2015-07-19T23:39:28.890+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35941 #45 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.895+0000 m30998| 2015-07-19T23:39:28.895+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35942 #46 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.898+0000 m30999| 2015-07-19T23:39:28.897+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57302 #45 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.910+0000 m30998| 2015-07-19T23:39:28.910+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35944 #47 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.913+0000 m30999| 2015-07-19T23:39:28.912+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57304 #46 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.924+0000 m30999| 2015-07-19T23:39:28.924+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57305 #47 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.927+0000 m30999| 2015-07-19T23:39:28.926+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57306 #48 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.941+0000 m30998| 2015-07-19T23:39:28.940+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35948 #48 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.960+0000 m30999| 2015-07-19T23:39:28.960+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57308 #49 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.963+0000 m30998| 2015-07-19T23:39:28.963+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35950 #49 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.965+0000 m30999| 2015-07-19T23:39:28.965+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57310 #50 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.967+0000 m30998| 2015-07-19T23:39:28.967+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35952 #50 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.983+0000 m30999| 2015-07-19T23:39:28.982+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57312 #51 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.985+0000 m30999| 2015-07-19T23:39:28.985+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57313 #52 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.988+0000 m30998| 2015-07-19T23:39:28.988+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35955 #51 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.992+0000 m30999| 2015-07-19T23:39:28.992+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57315 #53 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.993+0000 m30998| 2015-07-19T23:39:28.993+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35957 #52 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.995+0000 m30998| 2015-07-19T23:39:28.995+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35958 #53 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:28.997+0000 m30999| 2015-07-19T23:39:28.997+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57318 #54 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.011+0000 setting random seed: 4811421283520 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.012+0000 setting random seed: 7968524852767 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.012+0000 setting random seed: 7055710912682 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.012+0000 setting random seed: 3401643014512 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.012+0000 setting random seed: 8739989465102 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.014+0000 setting random seed: 8373757414519 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.015+0000 setting random seed: 8613026277162 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.016+0000 setting random seed: 5759885795414 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.017+0000 m31100| 2015-07-19T23:39:29.017+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 452ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.017+0000 m31100| 2015-07-19T23:39:29.017+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:482 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 450ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.018+0000 setting random seed: 4435288868844 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.022+0000 setting random seed: 8832700788043 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.026+0000 setting random seed: 7009913744404 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.026+0000 setting random seed: 1764671625569 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.026+0000 setting random seed: 5084112929180 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.028+0000 m30998| 2015-07-19T23:39:29.028+0000 I SHARDING [conn47] ChunkManager: time to load chunks for db8.coll8: 0ms sequenceNumber: 11 version: 2|5||55ac352fd2c1f750d15483a3 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.030+0000 setting random seed: 8547119493596 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.030+0000 setting random seed: 9265399090945 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.033+0000 setting random seed: 3461158741265 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.035+0000 setting random seed: 6442458410747 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.036+0000 m31200| 2015-07-19T23:39:29.035+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 557ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.036+0000 m31200| 2015-07-19T23:39:29.035+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 557ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.056+0000 setting random seed: 2760632131248 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.068+0000 setting random seed: 6751668332144 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.070+0000 setting random seed: 4307940606959 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.252+0000 m31200| 2015-07-19T23:39:29.252+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39721 #117 (45 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.256+0000 m31100| 2015-07-19T23:39:29.256+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47886 #117 (61 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.259+0000 m31200| 2015-07-19T23:39:29.258+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39723 #118 (46 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.260+0000 m31100| 2015-07-19T23:39:29.260+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47888 #118 (62 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.275+0000 m31200| 2015-07-19T23:39:29.274+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39725 #119 (47 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.292+0000 m31200| 2015-07-19T23:39:29.292+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39726 #120 (48 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.296+0000 m31200| 2015-07-19T23:39:29.296+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39727 #121 (49 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.297+0000 m31200| 2015-07-19T23:39:29.297+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39728 #122 (50 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.318+0000 m31100| 2015-07-19T23:39:29.318+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47893 #119 (63 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.319+0000 m31100| 2015-07-19T23:39:29.319+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47894 #120 (64 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.323+0000 m31200| 2015-07-19T23:39:29.323+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39731 #123 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.324+0000 m31200| 2015-07-19T23:39:29.323+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39732 #124 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.324+0000 m31200| 2015-07-19T23:39:29.324+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39733 #125 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.336+0000 m31200| 2015-07-19T23:39:29.336+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39734 #126 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.346+0000 m31100| 2015-07-19T23:39:29.346+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47899 #121 (65 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.366+0000 m31200| 2015-07-19T23:39:29.366+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39736 #127 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.410+0000 m31200| 2015-07-19T23:39:29.409+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39737 #128 (56 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.412+0000 m31200| 2015-07-19T23:39:29.412+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39738 #129 (57 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.483+0000 m31100| 2015-07-19T23:39:29.482+0000 I QUERY [conn49] query db8.coll8 query: { $text: { $search: "In future posts we’ll share more information about all the features that the new WiredTiger storage engine, and brings collection-level to the Wired..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:279 nscannedObjects:64 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:4 nreturned:64 reslen:16776 locks:{ Global: { acquireCount: { r: 10 } }, Database: { acquireCount: { r: 5 } }, Collection: { acquireCount: { r: 5 } } } 119ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.522+0000 m31100| 2015-07-19T23:39:29.522+0000 I QUERY [conn52] query db8.coll8 query: { $text: { $search: "workloads with a simple version upgrade. For highly concurrent Notes. document-level locking), compression, and pluggable storage engines. workloads w..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:229 nscannedObjects:70 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:4 nreturned:70 reslen:17680 locks:{ Global: { acquireCount: { r: 10 } }, Database: { acquireCount: { r: 5 } }, Collection: { acquireCount: { r: 5 } } } 107ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.538+0000 m31100| 2015-07-19T23:39:29.537+0000 I QUERY [conn58] query db8.coll8 query: { $text: { $search: "Compression file formats, and optionally, compression. WiredTiger is key to ecosystem. MongoDB 2.8 ships with two storage engines, both of which For m..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:359 nscannedObjects:75 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:75 reslen:19074 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 101ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.588+0000 m31200| 2015-07-19T23:39:29.588+0000 I QUERY [conn121] query db8.coll8 query: { $text: { $search: "of modern, multi-core servers with access to large amounts of to the WiredTiger storage engine, please see the 2.8 Release The WiredTiger storage engi..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:255 nscannedObjects:60 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:3 nreturned:60 reslen:15944 locks:{ Global: { acquireCount: { r: 8 } }, Database: { acquireCount: { r: 4 } }, Collection: { acquireCount: { r: 4 } } } 126ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.686+0000 m31200| 2015-07-19T23:39:29.685+0000 I QUERY [conn123] query db8.coll8 query: { $text: { $search: "locking to MMAPv1. As a result, concurrency will improve for all We’re truly excited to announce the availability of the first MongoDB make up the 2..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:368 nscannedObjects:69 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:4 nreturned:69 reslen:17885 locks:{ Global: { acquireCount: { r: 10 } }, Database: { acquireCount: { r: 5 } }, Collection: { acquireCount: { r: 5 } } } 117ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.695+0000 m31200| 2015-07-19T23:39:29.694+0000 I QUERY [conn103] query db8.coll8 query: { $text: { $search: "Notes. compression, which provides a good compromise between speed and to the WiredTiger storage engine, please see the 2.8 Release" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:253 nscannedObjects:65 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:3 nreturned:65 reslen:17458 locks:{ Global: { acquireCount: { r: 8 } }, Database: { acquireCount: { r: 4 } }, Collection: { acquireCount: { r: 4 } } } 118ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.720+0000 m31200| 2015-07-19T23:39:29.719+0000 I QUERY [conn90] query db8.coll8 query: { $text: { $search: "Pluggable storage engines are first-class players in the MongoDB use cases, where writing makes up a significant portion of make up the 2.8 release. W..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:460 nscannedObjects:71 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:71 reslen:18136 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 138ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.790+0000 m31100| 2015-07-19T23:39:29.789+0000 I QUERY [conn37] query db8.coll8 query: { $text: { $search: "MongoDB Community to develop a wide array of storage engines designed for in greater utilization of available hardware resources, and vastly better of..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:530157552779 ntoreturn:0 ntoskip:0 nscanned:684 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:26243 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 141ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.793+0000 m31100| 2015-07-19T23:39:29.791+0000 I QUERY [conn58] query db8.coll8 query: { $text: { $search: "now correspond more directly to system throughput. The WiredTiger storage engine in MongoDB 2.8 provides" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:376 nscannedObjects:94 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:94 reslen:25232 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 140ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.802+0000 m31100| 2015-07-19T23:39:29.795+0000 I QUERY [conn119] query db8.coll8 query: { $text: { $search: "make up the 2.8 release. We will begin today with our three headliners: document-level locking), compression, and pluggable storage engines." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:528837093405 ntoreturn:0 ntoskip:0 nscanned:466 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:26398 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 111ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.814+0000 m31100| 2015-07-19T23:39:29.813+0000 I QUERY [conn52] query db8.coll8 query: { $text: { $search: "specific workloads, hardware optimizations, or deployment architectures. For more information, including how to seamlessly upgrade document-level lock..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529609889425 ntoreturn:0 ntoskip:0 nscanned:329 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:26545 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 148ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.863+0000 m31100| 2015-07-19T23:39:29.862+0000 I QUERY [conn23] query db8.coll8 query: { $text: { $search: "RAM. To minimize on-disk overhead and I/O, WiredTiger uses compact level locking. MongoDB 2.8 introduces document-level locking with use the pluggable..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:511 nscannedObjects:99 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:99 reslen:26397 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 113ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.882+0000 m31200| 2015-07-19T23:39:29.881+0000 I QUERY [conn123] query db8.coll8 query: { $text: { $search: "use the pluggable storage API. Our original storage engine, now named great prizes (details below). MongoDB 2.8 RC0 usage in MongoDB has been traditi..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:511 nscannedObjects:85 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:85 reslen:22025 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 120ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.883+0000 m31100| 2015-07-19T23:39:29.882+0000 I QUERY [conn54] query db8.coll8 query: { $text: { $search: "specific workloads, hardware optimizations, or deployment architectures. for their data. In 2.8, WiredTiger compression defaults to Snappy locking to ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529962978878 ntoreturn:0 ntoskip:0 nscanned:453 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:27033 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 110ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.890+0000 m31100| 2015-07-19T23:39:29.890+0000 I QUERY [conn57] query db8.coll8 query: { $text: { $search: "Prior to 2.8, MongoDB’s concurrency model supported database engines that seamlessly integrate with MongoDB. This opens the door for the Compression" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:528473640401 ntoreturn:0 ntoskip:0 nscanned:406 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:26956 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 110ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.892+0000 m31200| 2015-07-19T23:39:29.891+0000 I QUERY [conn127] query db8.coll8 query: { $text: { $search: "make up the 2.8 release. We will begin today with our three headliners: document-level locking), compression, and pluggable storage engines." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:370 nscannedObjects:86 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:86 reslen:22297 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 155ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.899+0000 m31200| 2015-07-19T23:39:29.898+0000 I QUERY [conn124] query db8.coll8 query: { $text: { $search: "2.8 release candidate (rc0), headlined by improved concurrency (including index, so users can choose the compression algorithm most appropriate compre..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:386 nscannedObjects:86 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:86 reslen:22402 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 120ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.933+0000 m31100| 2015-07-19T23:39:29.932+0000 I QUERY [conn58] query db8.coll8 query: { $text: { $search: "locking to MMAPv1. As a result, concurrency will improve for all storage engine, WiredTiger, that fulfills our desire to make MongoDB of modern, multi..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:528422802344 ntoreturn:0 ntoskip:0 nscanned:504 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:27523 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 123ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.936+0000 m31100| 2015-07-19T23:39:29.935+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47903 #122 (66 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.937+0000 m31100| 2015-07-19T23:39:29.936+0000 I QUERY [conn21] query db8.coll8 query: { $text: { $search: "index, so users can choose the compression algorithm most appropriate 30-80%. Compression is configured individually for each collection and in greate..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529511393430 ntoreturn:0 ntoskip:0 nscanned:502 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:101 reslen:27342 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 128ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.958+0000 m31200| 2015-07-19T23:39:29.957+0000 I QUERY [conn129] query db8.coll8 query: { $text: { $search: "level locking. MongoDB 2.8 introduces document-level locking with compression, which provides a good compromise between speed and storage engine, Wire..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:531 nscannedObjects:92 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:92 reslen:23997 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 147ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.974+0000 m31100| 2015-07-19T23:39:29.973+0000 I QUERY [conn52] query db8.coll8 query: { $text: { $search: "storage engine, WiredTiger, that fulfills our desire to make MongoDB usage in MongoDB has been traditionally fairly low, it will fully utilize availab..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529684314715 ntoreturn:0 ntoskip:0 nscanned:528 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:27548 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 147ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:29.991+0000 m31200| 2015-07-19T23:39:29.990+0000 I QUERY [conn125] query db8.coll8 query: { $text: { $search: "MongoDB Community to develop a wide array of storage engines designed for Prior to 2.8, MongoDB’s concurrency model supported database Notes." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:388 nscannedObjects:87 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:4 nreturned:87 reslen:23252 locks:{ Global: { acquireCount: { r: 10 } }, Database: { acquireCount: { r: 5 } }, Collection: { acquireCount: { r: 5 } } } 101ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.003+0000 m31100| 2015-07-19T23:39:30.002+0000 I QUERY [conn56] query db8.coll8 query: { $text: { $search: "RAM. To minimize on-disk overhead and I/O, WiredTiger uses compact compression rates. For greater compression, at the cost of additional next three we..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:312 nscannedObjects:97 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:97 reslen:26576 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 125ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.008+0000 m31100| 2015-07-19T23:39:30.008+0000 I QUERY [conn23] query db8.coll8 query: { $text: { $search: "Notes. file formats, and optionally, compression. WiredTiger is key to usage in MongoDB has been traditionally fairly low, it will document-level lock..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529628084897 ntoreturn:0 ntoskip:0 nscanned:677 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:26920 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 117ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.024+0000 m31100| 2015-07-19T23:39:30.023+0000 I QUERY [conn55] query db8.coll8 query: { $text: { $search: "In future posts we’ll share more information about all the features that 2.8 release candidate (rc0), headlined by improved concurrency (including M..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:530317722223 ntoreturn:0 ntoskip:0 nscanned:577 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:27390 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 134ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.054+0000 m31100| 2015-07-19T23:39:30.053+0000 I QUERY [conn37] query db8.coll8 query: { $text: { $search: "make up the 2.8 release. We will begin today with our three headliners: storage engine, WiredTiger, that fulfills our desire to make MongoDB For more ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:528976450847 ntoreturn:0 ntoskip:0 nscanned:737 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:26175 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 180ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.067+0000 m31100| 2015-07-19T23:39:30.067+0000 I QUERY [conn53] query db8.coll8 query: { $text: { $search: "throughput for write-heavy workloads, including those that mix reading and writing. In future posts we’ll share more information about all the featu..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529713391108 ntoreturn:0 ntoskip:0 nscanned:413 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:27386 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 105ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.081+0000 m31200| 2015-07-19T23:39:30.080+0000 I QUERY [conn127] query db8.coll8 query: { $text: { $search: "of modern, multi-core servers with access to large amounts of dramatically improve throughput and performance. document-level locking), compression, a..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:404 nscannedObjects:98 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:98 reslen:26260 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 127ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.087+0000 m31200| 2015-07-19T23:39:30.068+0000 I QUERY [conn122] query db8.coll8 query: { $text: { $search: "throughput for write-heavy workloads, including those that mix reading and writing. In future posts we’ll share more information about all the featu..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:99871197703 ntoreturn:0 ntoskip:0 nscanned:443 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:26366 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 101ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.108+0000 m31100| 2015-07-19T23:39:30.107+0000 I QUERY [conn21] query db8.coll8 query: { $text: { $search: "in greater utilization of available hardware resources, and vastly better fully utilize available hardware resources. So whereas CPU make up the 2.8 r..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529218647813 ntoreturn:0 ntoskip:0 nscanned:680 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:27085 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 136ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.131+0000 m31200| 2015-07-19T23:39:30.130+0000 I QUERY [conn129] query db8.coll8 query: { $text: { $search: "ecosystem. MongoDB 2.8 ships with two storage engines, both of which by participating in our MongoDB 2.8 Bug Hunt. Winners are entitled to some For mo..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:99364692332 ntoreturn:0 ntoskip:0 nscanned:400 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:26960 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 135ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.140+0000 m31100| 2015-07-19T23:39:30.139+0000 I QUERY [conn58] query db8.coll8 query: { $text: { $search: "throughput for write-heavy workloads, including those that mix reading WiredTiger was created by the lead engineers of Berkeley DB and" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:233 nscannedObjects:100 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:100 reslen:27513 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 165ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.155+0000 m31100| 2015-07-19T23:39:30.154+0000 I QUERY [conn52] query db8.coll8 query: { $text: { $search: "30-80%. Compression is configured individually for each collection and MongoDB Community to develop a wide array of storage engines designed for" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:528836439570 ntoreturn:0 ntoskip:0 nscanned:350 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:27203 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 163ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.160+0000 m31100| 2015-07-19T23:39:30.159+0000 I QUERY [conn49] query db8.coll8 query: { $text: { $search: "locking to MMAPv1. As a result, concurrency will improve for all to the WiredTiger storage engine, please see the 2.8 Release of modern, multi-core se..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:528670431185 ntoreturn:0 ntoskip:0 nscanned:714 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:26908 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 119ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.183+0000 m31100| 2015-07-19T23:39:30.182+0000 I QUERY [conn56] query db8.coll8 query: { $text: { $search: "engines that seamlessly integrate with MongoDB. This opens the door for the Notes. ecosystem. MongoDB 2.8 ships with two storage engines, both of which" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529569464150 ntoreturn:0 ntoskip:0 nscanned:423 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:28381 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 115ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.199+0000 m31200| 2015-07-19T23:39:30.198+0000 I QUERY [conn125] query db8.coll8 query: { $text: { $search: "CPU utilization, you can switch to zlib compression. usage in MongoDB has been traditionally fairly low, it will by participating in our MongoDB 2.8 B..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:98878202806 ntoreturn:0 ntoskip:0 nscanned:366 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:27697 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 120ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.200+0000 m31100| 2015-07-19T23:39:30.199+0000 I QUERY [conn23] query db8.coll8 query: { $text: { $search: "The WiredTiger storage engine in MongoDB 2.8 provides to the WiredTiger storage engine, please see the 2.8 Release make up the 2.8 release. We will be..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:530088793654 ntoreturn:0 ntoskip:0 nscanned:628 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:27619 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 139ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.228+0000 m31100| 2015-07-19T23:39:30.224+0000 I QUERY [conn21] query db8.coll8 query: { $text: { $search: "index, so users can choose the compression algorithm most appropriate your turn to help ensure the quality of this important release. Over the compres..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:255 nscannedObjects:95 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:95 reslen:26139 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 110ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.229+0000 m31200| 2015-07-19T23:39:30.225+0000 I QUERY [conn90] query db8.coll8 query: { $text: { $search: "for their data. In 2.8, WiredTiger compression defaults to Snappy operations, migrating to the WiredTiger storage engine will next three weeks, we cha..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:99074886435 ntoreturn:0 ntoskip:0 nscanned:474 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:27437 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 114ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.236+0000 m31100| 2015-07-19T23:39:30.235+0000 I QUERY [conn55] query db8.coll8 query: { $text: { $search: "storage engine, WiredTiger, that fulfills our desire to make MongoDB great prizes (details below). MongoDB 2.8 RC0 of modern, multi-core servers with..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:528586198125 ntoreturn:0 ntoskip:0 nscanned:897 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:101 reslen:25076 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 142ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.247+0000 m31100| 2015-07-19T23:39:30.246+0000 I QUERY [conn54] query db8.coll8 query: { $text: { $search: "your turn to help ensure the quality of this important release. Over the now correspond more directly to system throughput. achieves high concurrency ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529277179141 ntoreturn:0 ntoskip:0 nscanned:577 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:101 reslen:27251 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 144ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.255+0000 m31100| 2015-07-19T23:39:30.249+0000 I QUERY [conn121] query db8.coll8 query: { $text: { $search: "MongoDB 2.8 includes significant improvements to concurrency, resulting now correspond more directly to system throughput. throughput for write-heavy ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529866492652 ntoreturn:0 ntoskip:0 nscanned:800 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:101 reslen:25161 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 112ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.272+0000 m31100| 2015-07-19T23:39:30.271+0000 I QUERY [conn120] query db8.coll8 query: { $text: { $search: "the new WiredTiger storage engine, and brings collection-level The new pluggable storage API allows external parties to build custom storage in greate..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529713707279 ntoreturn:0 ntoskip:0 nscanned:717 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:26025 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 124ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.282+0000 m31200| 2015-07-19T23:39:30.281+0000 I QUERY [conn127] query db8.coll8 query: { $text: { $search: "Notes. locking to MMAPv1. As a result, concurrency will improve for all We’re truly excited to announce the availability of the first MongoDB" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:98841174078 ntoreturn:0 ntoskip:0 nscanned:280 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:3 nreturned:101 reslen:27649 locks:{ Global: { acquireCount: { r: 8 } }, Database: { acquireCount: { r: 4 } }, Collection: { acquireCount: { r: 4 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.300+0000 m31100| 2015-07-19T23:39:30.299+0000 I QUERY [conn58] query db8.coll8 query: { $text: { $search: "engines that seamlessly integrate with MongoDB. This opens the door for the your turn to help ensure the quality of this important release. Over the w..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:528890689785 ntoreturn:0 ntoskip:0 nscanned:611 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:27296 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 136ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.314+0000 m31200| 2015-07-19T23:39:30.287+0000 I QUERY [conn129] query db8.coll8 query: { $text: { $search: "MongoDB 2.8 includes significant improvements to concurrency, resulting now correspond more directly to system throughput. throughput for write-heavy ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:100863080610 ntoreturn:0 ntoskip:0 nscanned:736 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:101 reslen:26250 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 145ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.318+0000 m31200| 2015-07-19T23:39:30.317+0000 I QUERY [conn125] query db8.coll8 query: { $text: { $search: "your turn to help ensure the quality of this important release. Over the the coming weeks optimizing and tuning some of the new features. Now it’s b..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:100827954177 ntoreturn:0 ntoskip:0 nscanned:643 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:26109 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 107ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.329+0000 m31100| 2015-07-19T23:39:30.328+0000 I QUERY [conn50] query db8.coll8 query: { $text: { $search: "delivering the other two features we’re highlighting today. We’re truly excited to announce the availability of the first MongoDB Improved Concurrency" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529274712436 ntoreturn:0 ntoskip:0 nscanned:309 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:27574 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 149ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.351+0000 m31200| 2015-07-19T23:39:30.335+0000 I QUERY [conn122] query db8.coll8 query: { $text: { $search: "index, so users can choose the compression algorithm most appropriate Notes. Prior to 2.8, MongoDB’s concurrency model supported database throughput..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:100148629801 ntoreturn:0 ntoskip:0 nscanned:530 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:101 reslen:26835 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 136ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.353+0000 m31200| 2015-07-19T23:39:30.304+0000 I QUERY [conn124] query db8.coll8 query: { $text: { $search: "delivering the other two features we’re highlighting today. We’re truly excited to announce the availability of the first MongoDB Improved Concurrency" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:100521085037 ntoreturn:0 ntoskip:0 nscanned:267 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:4 nreturned:101 reslen:27886 locks:{ Global: { acquireCount: { r: 10 } }, Database: { acquireCount: { r: 5 } }, Collection: { acquireCount: { r: 5 } } } 125ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.353+0000 m31200| 2015-07-19T23:39:30.353+0000 I QUERY [conn128] query db8.coll8 query: { $text: { $search: "delivering the other two features we’re highlighting today. Prior to 2.8, MongoDB’s concurrency model supported database dramatically improve thro..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:99754741757 ntoreturn:0 ntoskip:0 nscanned:457 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:27369 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 122ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.354+0000 m31100| 2015-07-19T23:39:30.353+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47904 #123 (67 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.369+0000 m31100| 2015-07-19T23:39:30.369+0000 I QUERY [conn56] query db8.coll8 query: { $text: { $search: "The improved concurrency also means that MongoDB will more the new WiredTiger storage engine, and brings collection-level fully utilize available hard..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:530269335493 ntoreturn:0 ntoskip:0 nscanned:742 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:11 nreturned:101 reslen:26279 locks:{ Global: { acquireCount: { r: 24 } }, Database: { acquireCount: { r: 12 } }, Collection: { acquireCount: { r: 12 } } } 148ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.400+0000 m31100| 2015-07-19T23:39:30.399+0000 I QUERY [conn54] query db8.coll8 query: { $text: { $search: "operations, migrating to the WiredTiger storage engine will" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:530351269451 ntoreturn:0 ntoskip:0 nscanned:292 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:29491 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 121ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.413+0000 m31100| 2015-07-19T23:39:30.412+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47905 #124 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.420+0000 m31100| 2015-07-19T23:39:30.419+0000 I QUERY [conn21] query db8.coll8 query: { $text: { $search: "MongoDB Community to develop a wide array of storage engines designed for locking to MMAPv1. As a result, concurrency will improve for all RAM. To min..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529913147758 ntoreturn:0 ntoskip:0 nscanned:693 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:101 reslen:27790 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 171ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.431+0000 m30998| 2015-07-19T23:39:30.428+0000 I NETWORK [conn46] end connection 10.139.123.131:35942 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.484+0000 m31100| 2015-07-19T23:39:30.483+0000 I QUERY [conn58] query db8.coll8 query: { $text: { $search: "burn through write-heavy workloads and be more resource efficient. by participating in our MongoDB 2.8 Bug Hunt. Winners are entitled to some in great..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:528502629286 ntoreturn:0 ntoskip:0 nscanned:792 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:101 reslen:25950 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 162ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.489+0000 m30999| 2015-07-19T23:39:30.489+0000 I NETWORK [conn49] end connection 10.139.123.131:57308 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.508+0000 m31100| 2015-07-19T23:39:30.507+0000 I QUERY [conn120] query db8.coll8 query: { $text: { $search: "level locking. MongoDB 2.8 introduces document-level locking with Notes. in greater utilization of available hardware resources, and vastly better Com..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529993079249 ntoreturn:0 ntoskip:0 nscanned:566 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:25897 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.551+0000 m31200| 2015-07-19T23:39:30.550+0000 I QUERY [conn127] query db8.coll8 query: { $text: { $search: "level locking. MongoDB 2.8 introduces document-level locking with make up the 2.8 release. We will begin today with our three headliners: We’ve put ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:100192689397 ntoreturn:0 ntoskip:0 nscanned:632 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:25816 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 134ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.569+0000 m31200| 2015-07-19T23:39:30.569+0000 I QUERY [conn90] query db8.coll8 query: { $text: { $search: "ecosystem. MongoDB 2.8 ships with two storage engines, both of which The improved concurrency also means that MongoDB will more great prizes (details ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:100295762578 ntoreturn:0 ntoskip:0 nscanned:584 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:27018 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 133ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.581+0000 m31100| 2015-07-19T23:39:30.513+0000 I QUERY [conn50] query db8.coll8 query: { $text: { $search: "Prior to 2.8, MongoDB’s concurrency model supported database make up the 2.8 release. We will begin today with our three headliners: of modern, mult..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:528739460034 ntoreturn:0 ntoskip:0 nscanned:618 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:27988 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 123ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.593+0000 m31100| 2015-07-19T23:39:30.496+0000 I QUERY [conn23] query db8.coll8 query: { $text: { $search: "“MMAPv1”, remains as the default. We are also introducing a new We’re truly excited to announce the availability of the first MongoDB your turn ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529590019206 ntoreturn:0 ntoskip:0 nscanned:605 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:26732 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 177ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.594+0000 m31100| 2015-07-19T23:39:30.550+0000 I QUERY [conn121] query db8.coll8 query: { $text: { $search: "storage engine, WiredTiger, that fulfills our desire to make MongoDB of modern, multi-core servers with access to large amounts of MongoDB 2.8 include..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:530125689414 ntoreturn:0 ntoskip:0 nscanned:1007 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:101 reslen:24622 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 110ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.616+0000 m31100| 2015-07-19T23:39:30.586+0000 I QUERY [conn57] query db8.coll8 query: { $text: { $search: "usage in MongoDB has been traditionally fairly low, it will Improved Concurrency document-level locking), compression, and pluggable storage engines. ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529600024561 ntoreturn:0 ntoskip:0 nscanned:740 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:101 reslen:25023 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 106ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.625+0000 m30999| 2015-07-19T23:39:30.625+0000 I NETWORK [conn54] end connection 10.139.123.131:57318 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.630+0000 m30998| 2015-07-19T23:39:30.629+0000 I NETWORK [conn44] end connection 10.139.123.131:35940 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.642+0000 m31100| 2015-07-19T23:39:30.642+0000 I QUERY [conn52] query db8.coll8 query: { $text: { $search: "“MMAPv1”, remains as the default. We are also introducing a new Pluggable Storage Engines 30-80%. Compression is configured individually for each ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:530287725601 ntoreturn:0 ntoskip:0 nscanned:463 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:27663 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 127ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.654+0000 m31100| 2015-07-19T23:39:30.654+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47906 #125 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.661+0000 m31100| 2015-07-19T23:39:30.661+0000 I QUERY [conn21] query db8.coll8 query: { $text: { $search: "throughput for write-heavy workloads, including those that mix reading by participating in our MongoDB 2.8 Bug Hunt. Winners are entitled to some" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529057633864 ntoreturn:0 ntoskip:0 nscanned:442 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:26509 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 102ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.672+0000 m31100| 2015-07-19T23:39:30.663+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47907 #126 (70 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.686+0000 m30999| 2015-07-19T23:39:30.686+0000 I NETWORK [conn45] end connection 10.139.123.131:57302 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.690+0000 m31100| 2015-07-19T23:39:30.690+0000 I QUERY [conn119] query db8.coll8 query: { $text: { $search: "now correspond more directly to system throughput. to the WiredTiger storage engine, please see the 2.8 Release" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:528987169866 ntoreturn:0 ntoskip:0 nscanned:529 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:26826 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 131ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.697+0000 m31100| 2015-07-19T23:39:30.692+0000 I QUERY [conn56] query db8.coll8 query: { $text: { $search: "ecosystem. MongoDB 2.8 ships with two storage engines, both of which the new WiredTiger storage engine, and brings collection-level The new pluggable ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529573174950 ntoreturn:0 ntoskip:0 nscanned:817 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:25587 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.698+0000 m31100| 2015-07-19T23:39:30.693+0000 I QUERY [conn54] query db8.coll8 query: { $text: { $search: "The improved concurrency also means that MongoDB will more RAM. To minimize on-disk overhead and I/O, WiredTiger uses compact of modern, multi-core se..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529133499791 ntoreturn:0 ntoskip:0 nscanned:698 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:101 reslen:24577 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 153ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.707+0000 m31100| 2015-07-19T23:39:30.706+0000 I QUERY [conn53] query db8.coll8 query: { $text: { $search: "The WiredTiger storage engine in MongoDB 2.8 provides by participating in our MongoDB 2.8 Bug Hunt. Winners are entitled to some index, so users can c..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:528845968099 ntoreturn:0 ntoskip:0 nscanned:725 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:101 reslen:25458 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 175ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.723+0000 m31100| 2015-07-19T23:39:30.722+0000 I QUERY [conn38] getmore db8.coll8 query: { $text: { $search: "level locking. MongoDB 2.8 introduces document-level locking with make up the 2.8 release. We will begin today with our three headliners: We’ve put ..." } } cursorid:530250153762 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:41 reslen:11664 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 128ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.734+0000 m31200| 2015-07-19T23:39:30.734+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39744 #130 (58 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.740+0000 m31100| 2015-07-19T23:39:30.739+0000 I QUERY [conn51] query db8.coll8 query: { $text: { $search: "your turn to help ensure the quality of this important release. Over the delivering the other two features we’re highlighting today. operations, mig..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529062707329 ntoreturn:0 ntoskip:0 nscanned:558 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:27434 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.757+0000 m30998| 2015-07-19T23:39:30.757+0000 I NETWORK [conn48] end connection 10.139.123.131:35948 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.774+0000 m30999| 2015-07-19T23:39:30.774+0000 I NETWORK [conn48] end connection 10.139.123.131:57306 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.787+0000 m30998| 2015-07-19T23:39:30.787+0000 I NETWORK [conn50] end connection 10.139.123.131:35952 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.794+0000 m31100| 2015-07-19T23:39:30.794+0000 I QUERY [conn58] query db8.coll8 query: { $text: { $search: "in greater utilization of available hardware resources, and vastly better fully utilize available hardware resources. So whereas CPU ecosystem. MongoD..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529286545195 ntoreturn:0 ntoskip:0 nscanned:662 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:25760 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 107ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.830+0000 m31100| 2015-07-19T23:39:30.830+0000 I QUERY [conn37] query db8.coll8 query: { $text: { $search: "Improved Concurrency fully utilize available hardware resources. So whereas CPU workloads with a simple version upgrade. For highly concurrent The imp..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:528860925941 ntoreturn:0 ntoskip:0 nscanned:553 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:26546 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 102ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.837+0000 m31100| 2015-07-19T23:39:30.836+0000 I QUERY [conn54] query db8.coll8 query: { $text: { $search: "Prior to 2.8, MongoDB’s concurrency model supported database storage engine, WiredTiger, that fulfills our desire to make MongoDB and writing. index..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:528640964385 ntoreturn:0 ntoskip:0 nscanned:920 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:101 reslen:24960 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 114ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.850+0000 m31200| 2015-07-19T23:39:30.849+0000 I QUERY [conn122] query db8.coll8 query: { $text: { $search: "your turn to help ensure the quality of this important release. Over the the coming weeks optimizing and tuning some of the new features. Now it’s a..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:100378653017 ntoreturn:0 ntoskip:0 nscanned:506 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:27518 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 114ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.871+0000 m30999| 2015-07-19T23:39:30.870+0000 I NETWORK [conn52] end connection 10.139.123.131:57313 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.889+0000 m30998| 2015-07-19T23:39:30.889+0000 I NETWORK [conn49] end connection 10.139.123.131:35950 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.943+0000 m31100| 2015-07-19T23:39:30.942+0000 I QUERY [conn56] query db8.coll8 query: { $text: { $search: "storage engine, WiredTiger, that fulfills our desire to make MongoDB locking to MMAPv1. As a result, concurrency will improve for all level locking. M..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:529924496269 ntoreturn:0 ntoskip:0 nscanned:915 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:101 reslen:25134 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 117ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.962+0000 m30998| 2015-07-19T23:39:30.961+0000 I NETWORK [conn45] end connection 10.139.123.131:35941 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:30.984+0000 m30999| 2015-07-19T23:39:30.984+0000 I NETWORK [conn47] end connection 10.139.123.131:57305 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.068+0000 m30999| 2015-07-19T23:39:31.068+0000 I NETWORK [conn51] end connection 10.139.123.131:57312 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.069+0000 m30998| 2015-07-19T23:39:31.069+0000 I NETWORK [conn53] end connection 10.139.123.131:35958 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.073+0000 m30999| 2015-07-19T23:39:31.073+0000 I NETWORK [conn53] end connection 10.139.123.131:57315 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.080+0000 m30999| 2015-07-19T23:39:31.079+0000 I NETWORK [conn50] end connection 10.139.123.131:57310 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.097+0000 m30998| 2015-07-19T23:39:31.096+0000 I NETWORK [conn47] end connection 10.139.123.131:35944 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.110+0000 m30999| 2015-07-19T23:39:31.110+0000 I NETWORK [conn46] end connection 10.139.123.131:57304 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.144+0000 m30998| 2015-07-19T23:39:31.144+0000 I NETWORK [conn52] end connection 10.139.123.131:35957 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.187+0000 m30998| 2015-07-19T23:39:31.187+0000 I NETWORK [conn51] end connection 10.139.123.131:35955 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.206+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.206+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.206+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.207+0000 jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js: Workload completed in 2644 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.207+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.207+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.207+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.207+0000 m30999| 2015-07-19T23:39:31.206+0000 I COMMAND [conn1] DROP: db8.coll8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.207+0000 m30999| 2015-07-19T23:39:31.206+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:31.206+0000-55ac3533d2c1f750d15483a5", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349171206), what: "dropCollection.start", ns: "db8.coll8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.259+0000 m30999| 2015-07-19T23:39:31.258+0000 I SHARDING [conn1] distributed lock 'db8.coll8/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3533d2c1f750d15483a6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.259+0000 m31100| 2015-07-19T23:39:31.259+0000 I COMMAND [conn12] CMD: drop db8.coll8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.259+0000 m31100| 2015-07-19T23:39:31.259+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 106ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.260+0000 m31100| 2015-07-19T23:39:31.259+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 106ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.260+0000 m31200| 2015-07-19T23:39:31.260+0000 I COMMAND [conn14] CMD: drop db8.coll8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.260+0000 m31200| 2015-07-19T23:39:31.260+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 149ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.261+0000 m31200| 2015-07-19T23:39:31.260+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 146ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.262+0000 m31101| 2015-07-19T23:39:31.262+0000 I COMMAND [repl writer worker 10] CMD: drop db8.coll8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.262+0000 m31102| 2015-07-19T23:39:31.262+0000 I COMMAND [repl writer worker 5] CMD: drop db8.coll8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.263+0000 m31202| 2015-07-19T23:39:31.263+0000 I COMMAND [repl writer worker 0] CMD: drop db8.coll8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.263+0000 m31201| 2015-07-19T23:39:31.263+0000 I COMMAND [repl writer worker 7] CMD: drop db8.coll8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.312+0000 m31100| 2015-07-19T23:39:31.312+0000 I SHARDING [conn12] remotely refreshing metadata for db8.coll8 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||55ac352fd2c1f750d15483a3, current metadata version is 2|3||55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.313+0000 m31100| 2015-07-19T23:39:31.313+0000 W SHARDING [conn12] no chunks found when reloading db8.coll8, previous version was 0|0||55ac352fd2c1f750d15483a3, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.313+0000 m31100| 2015-07-19T23:39:31.313+0000 I SHARDING [conn12] dropping metadata for db8.coll8 at shard version 2|3||55ac352fd2c1f750d15483a3, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.313+0000 m31200| 2015-07-19T23:39:31.313+0000 I SHARDING [conn14] remotely refreshing metadata for db8.coll8 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||55ac352fd2c1f750d15483a3, current metadata version is 2|5||55ac352fd2c1f750d15483a3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.314+0000 m31200| 2015-07-19T23:39:31.314+0000 W SHARDING [conn14] no chunks found when reloading db8.coll8, previous version was 0|0||55ac352fd2c1f750d15483a3, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.314+0000 m31200| 2015-07-19T23:39:31.314+0000 I SHARDING [conn14] dropping metadata for db8.coll8 at shard version 2|5||55ac352fd2c1f750d15483a3, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.314+0000 m30999| 2015-07-19T23:39:31.314+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:31.314+0000-55ac3533d2c1f750d15483a7", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349171314), what: "dropCollection", ns: "db8.coll8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.365+0000 m30999| 2015-07-19T23:39:31.365+0000 I SHARDING [conn1] distributed lock 'db8.coll8/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.417+0000 m30999| 2015-07-19T23:39:31.417+0000 I COMMAND [conn1] DROP DATABASE: db8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.417+0000 m30999| 2015-07-19T23:39:31.417+0000 I SHARDING [conn1] DBConfig::dropDatabase: db8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.417+0000 m30999| 2015-07-19T23:39:31.417+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:31.417+0000-55ac3533d2c1f750d15483a8", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349171417), what: "dropDatabase.start", ns: "db8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.519+0000 m30999| 2015-07-19T23:39:31.518+0000 I SHARDING [conn1] DBConfig::dropDatabase: db8 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.520+0000 m31200| 2015-07-19T23:39:31.518+0000 I COMMAND [conn111] dropDatabase db8 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.520+0000 m31200| 2015-07-19T23:39:31.519+0000 I COMMAND [conn111] dropDatabase db8 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.520+0000 m31200| 2015-07-19T23:39:31.519+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 36 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 253ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.520+0000 m31200| 2015-07-19T23:39:31.519+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 253ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.521+0000 m30999| 2015-07-19T23:39:31.519+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:31.519+0000-55ac3533d2c1f750d15483a9", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349171519), what: "dropDatabase", ns: "db8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.521+0000 m31201| 2015-07-19T23:39:31.519+0000 I COMMAND [repl writer worker 11] dropDatabase db8 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.521+0000 m31201| 2015-07-19T23:39:31.519+0000 I COMMAND [repl writer worker 11] dropDatabase db8 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.521+0000 m31202| 2015-07-19T23:39:31.519+0000 I COMMAND [repl writer worker 1] dropDatabase db8 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.521+0000 m31202| 2015-07-19T23:39:31.519+0000 I COMMAND [repl writer worker 1] dropDatabase db8 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.576+0000 m31100| 2015-07-19T23:39:31.573+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 309ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.576+0000 m31100| 2015-07-19T23:39:31.573+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 309ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.581+0000 m31100| 2015-07-19T23:39:31.581+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.586+0000 m31101| 2015-07-19T23:39:31.584+0000 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.586+0000 m31102| 2015-07-19T23:39:31.584+0000 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.604+0000 m31200| 2015-07-19T23:39:31.604+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.605+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.606+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.606+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.606+0000 jstests/concurrency/fsm_workloads/collmod.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.606+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.606+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.606+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.608+0000 m31202| 2015-07-19T23:39:31.607+0000 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.608+0000 m31201| 2015-07-19T23:39:31.607+0000 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.609+0000 m30999| 2015-07-19T23:39:31.609+0000 I SHARDING [conn1] distributed lock 'db9/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3533d2c1f750d15483aa [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.611+0000 m30999| 2015-07-19T23:39:31.611+0000 I SHARDING [conn1] Placing [db9] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.611+0000 m30999| 2015-07-19T23:39:31.611+0000 I SHARDING [conn1] Enabling sharding for database [db9] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.662+0000 m30999| 2015-07-19T23:39:31.662+0000 I SHARDING [conn1] distributed lock 'db9/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.669+0000 m31200| 2015-07-19T23:39:31.668+0000 I INDEX [conn28] build index on: db9.coll9 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db9.coll9" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.669+0000 m31200| 2015-07-19T23:39:31.668+0000 I INDEX [conn28] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.673+0000 m31200| 2015-07-19T23:39:31.671+0000 I INDEX [conn28] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.674+0000 m30999| 2015-07-19T23:39:31.671+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db9.coll9", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.674+0000 m30999| 2015-07-19T23:39:31.672+0000 I SHARDING [conn1] distributed lock 'db9.coll9/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3533d2c1f750d15483ab [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.674+0000 m30999| 2015-07-19T23:39:31.673+0000 I SHARDING [conn1] enable sharding on: db9.coll9 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.676+0000 m30999| 2015-07-19T23:39:31.673+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:31.673+0000-55ac3533d2c1f750d15483ac", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349171673), what: "shardCollection.start", ns: "db9.coll9", details: { shardKey: { _id: "hashed" }, collection: "db9.coll9", primary: "test-rs1:test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.677+0000 m31202| 2015-07-19T23:39:31.675+0000 I INDEX [repl writer worker 9] build index on: db9.coll9 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db9.coll9" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.677+0000 m31202| 2015-07-19T23:39:31.675+0000 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.677+0000 m31201| 2015-07-19T23:39:31.675+0000 I INDEX [repl writer worker 15] build index on: db9.coll9 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db9.coll9" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.677+0000 m31201| 2015-07-19T23:39:31.675+0000 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.677+0000 m31202| 2015-07-19T23:39:31.677+0000 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.678+0000 m31201| 2015-07-19T23:39:31.677+0000 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.724+0000 m30999| 2015-07-19T23:39:31.724+0000 I SHARDING [conn1] going to create 2 chunk(s) for: db9.coll9 using new epoch 55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.826+0000 m30999| 2015-07-19T23:39:31.826+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db9.coll9: 0ms sequenceNumber: 45 version: 1|1||55ac3533d2c1f750d15483ad based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.878+0000 m30999| 2015-07-19T23:39:31.877+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db9.coll9: 0ms sequenceNumber: 46 version: 1|1||55ac3533d2c1f750d15483ad based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.879+0000 m31200| 2015-07-19T23:39:31.878+0000 I SHARDING [conn117] remotely refreshing metadata for db9.coll9 with requested shard version 1|1||55ac3533d2c1f750d15483ad, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.879+0000 m31200| 2015-07-19T23:39:31.879+0000 I SHARDING [conn117] collection db9.coll9 was previously unsharded, new metadata loaded with shard version 1|1||55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.879+0000 m31200| 2015-07-19T23:39:31.879+0000 I SHARDING [conn117] collection version was loaded at version 1|1||55ac3533d2c1f750d15483ad, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.879+0000 m30999| 2015-07-19T23:39:31.879+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:31.879+0000-55ac3533d2c1f750d15483ae", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349171879), what: "shardCollection", ns: "db9.coll9", details: { version: "1|1||55ac3533d2c1f750d15483ad" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.936+0000 m30999| 2015-07-19T23:39:31.930+0000 I SHARDING [conn1] distributed lock 'db9.coll9/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.995+0000 m30999| 2015-07-19T23:39:31.930+0000 I SHARDING [conn1] moving chunk ns: db9.coll9 moving ( ns: db9.coll9, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.996+0000 m31200| 2015-07-19T23:39:31.930+0000 I SHARDING [conn97] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.996+0000 m31200| 2015-07-19T23:39:31.931+0000 I SHARDING [conn97] received moveChunk request: { moveChunk: "db9.coll9", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", to: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac3533d2c1f750d15483ad') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.996+0000 m31200| 2015-07-19T23:39:31.932+0000 I SHARDING [conn97] distributed lock 'db9.coll9/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3533d9a63f6196b17272 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.996+0000 m31200| 2015-07-19T23:39:31.932+0000 I SHARDING [conn97] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:31.932+0000-55ac3533d9a63f6196b17273", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39668", time: new Date(1437349171932), what: "moveChunk.start", ns: "db9.coll9", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.996+0000 m31200| 2015-07-19T23:39:31.983+0000 I SHARDING [conn97] remotely refreshing metadata for db9.coll9 based on current shard version 1|1||55ac3533d2c1f750d15483ad, current metadata version is 1|1||55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.996+0000 m31200| 2015-07-19T23:39:31.983+0000 I SHARDING [conn97] metadata of collection db9.coll9 already up to date (shard version : 1|1||55ac3533d2c1f750d15483ad, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.996+0000 m31200| 2015-07-19T23:39:31.983+0000 I SHARDING [conn97] moveChunk request accepted at version 1|1||55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.996+0000 m31200| 2015-07-19T23:39:31.984+0000 I SHARDING [conn97] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.997+0000 m31100| 2015-07-19T23:39:31.984+0000 I SHARDING [conn19] remotely refreshing metadata for db9.coll9, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.997+0000 m31100| 2015-07-19T23:39:31.984+0000 I SHARDING [conn19] collection db9.coll9 was previously unsharded, new metadata loaded with shard version 0|0||55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.997+0000 m31100| 2015-07-19T23:39:31.984+0000 I SHARDING [conn19] collection version was loaded at version 1|1||55ac3533d2c1f750d15483ad, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.997+0000 m31100| 2015-07-19T23:39:31.984+0000 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db9.coll9 from test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 at epoch 55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.997+0000 m31200| 2015-07-19T23:39:31.986+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db9.coll9", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.997+0000 m31100| 2015-07-19T23:39:31.986+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 400ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.998+0000 m31100| 2015-07-19T23:39:31.986+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 400ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.998+0000 m31200| 2015-07-19T23:39:31.988+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db9.coll9", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.998+0000 m31100| 2015-07-19T23:39:31.992+0000 I INDEX [migrateThread] build index on: db9.coll9 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db9.coll9" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.998+0000 m31100| 2015-07-19T23:39:31.993+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.998+0000 m31200| 2015-07-19T23:39:31.993+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db9.coll9", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.998+0000 m31100| 2015-07-19T23:39:31.994+0000 I INDEX [migrateThread] build index on: db9.coll9 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db9.coll9" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.998+0000 m31100| 2015-07-19T23:39:31.994+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.998+0000 m31100| 2015-07-19T23:39:31.997+0000 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.998+0000 m31100| 2015-07-19T23:39:31.997+0000 I SHARDING [migrateThread] Deleter starting delete for: db9.coll9 from { _id: MinKey } -> { _id: 0 }, with opId: 34443 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:31.999+0000 m31100| 2015-07-19T23:39:31.998+0000 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db9.coll9 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.001+0000 m31200| 2015-07-19T23:39:32.001+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db9.coll9", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.002+0000 m31102| 2015-07-19T23:39:32.001+0000 I INDEX [repl writer worker 10] build index on: db9.coll9 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db9.coll9" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.002+0000 m31102| 2015-07-19T23:39:32.001+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.002+0000 m31101| 2015-07-19T23:39:32.002+0000 I INDEX [repl writer worker 2] build index on: db9.coll9 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db9.coll9" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.002+0000 m31101| 2015-07-19T23:39:32.002+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.003+0000 m31102| 2015-07-19T23:39:32.003+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.004+0000 m31101| 2015-07-19T23:39:32.003+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.004+0000 m31100| 2015-07-19T23:39:32.003+0000 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.004+0000 m31100| 2015-07-19T23:39:32.003+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db9.coll9' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.018+0000 m31200| 2015-07-19T23:39:32.017+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db9.coll9", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.018+0000 m31200| 2015-07-19T23:39:32.017+0000 I SHARDING [conn97] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.018+0000 m31200| 2015-07-19T23:39:32.017+0000 I SHARDING [conn97] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.018+0000 m31200| 2015-07-19T23:39:32.017+0000 I SHARDING [conn97] moveChunk setting version to: 2|0||55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.024+0000 m31100| 2015-07-19T23:39:32.024+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db9.coll9' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.024+0000 m31100| 2015-07-19T23:39:32.024+0000 I SHARDING [migrateThread] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:32.024+0000-55ac353468c42881b59cba40", server: "ip-10-139-123-131", clientAddr: "", time: new Date(1437349172024), what: "moveChunk.to", ns: "db9.coll9", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 12, step 2 of 5: 5, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 20, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.075+0000 m31200| 2015-07-19T23:39:32.075+0000 I SHARDING [conn97] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db9.coll9", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.076+0000 m31200| 2015-07-19T23:39:32.075+0000 I SHARDING [conn97] moveChunk updating self version to: 2|1||55ac3533d2c1f750d15483ad through { _id: 0 } -> { _id: MaxKey } for collection 'db9.coll9' [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.076+0000 m31200| 2015-07-19T23:39:32.076+0000 I SHARDING [conn97] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:32.076+0000-55ac3534d9a63f6196b17274", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39668", time: new Date(1437349172076), what: "moveChunk.commit", ns: "db9.coll9", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.127+0000 m31200| 2015-07-19T23:39:32.127+0000 I SHARDING [conn97] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.127+0000 m31200| 2015-07-19T23:39:32.127+0000 I SHARDING [conn97] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.127+0000 m31200| 2015-07-19T23:39:32.127+0000 I SHARDING [conn97] Deleter starting delete for: db9.coll9 from { _id: MinKey } -> { _id: 0 }, with opId: 30981 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.128+0000 m31200| 2015-07-19T23:39:32.127+0000 I SHARDING [conn97] rangeDeleter deleted 0 documents for db9.coll9 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.128+0000 m31200| 2015-07-19T23:39:32.127+0000 I SHARDING [conn97] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.128+0000 m31200| 2015-07-19T23:39:32.127+0000 I SHARDING [conn97] distributed lock 'db9.coll9/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.128+0000 m31200| 2015-07-19T23:39:32.127+0000 I SHARDING [conn97] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:32.127+0000-55ac3534d9a63f6196b17275", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39668", time: new Date(1437349172127), what: "moveChunk.from", ns: "db9.coll9", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 52, step 3 of 6: 1, step 4 of 6: 32, step 5 of 6: 109, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.179+0000 m31200| 2015-07-19T23:39:32.178+0000 I COMMAND [conn97] command db9.coll9 command: moveChunk { moveChunk: "db9.coll9", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", to: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac3533d2c1f750d15483ad') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 247ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.179+0000 m30999| 2015-07-19T23:39:32.179+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db9.coll9: 0ms sequenceNumber: 47 version: 2|1||55ac3533d2c1f750d15483ad based on: 1|1||55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.180+0000 m31100| 2015-07-19T23:39:32.179+0000 I SHARDING [conn43] received splitChunk request: { splitChunk: "db9.coll9", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3533d2c1f750d15483ad') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.181+0000 m31100| 2015-07-19T23:39:32.180+0000 I SHARDING [conn43] distributed lock 'db9.coll9/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac353468c42881b59cba41 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.181+0000 m31100| 2015-07-19T23:39:32.180+0000 I SHARDING [conn43] remotely refreshing metadata for db9.coll9 based on current shard version 0|0||55ac3533d2c1f750d15483ad, current metadata version is 1|1||55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.181+0000 m31100| 2015-07-19T23:39:32.181+0000 I SHARDING [conn43] updating metadata for db9.coll9 from shard version 0|0||55ac3533d2c1f750d15483ad to shard version 2|0||55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.181+0000 m31100| 2015-07-19T23:39:32.181+0000 I SHARDING [conn43] collection version was loaded at version 2|1||55ac3533d2c1f750d15483ad, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.182+0000 m31100| 2015-07-19T23:39:32.181+0000 I SHARDING [conn43] splitChunk accepted at version 2|0||55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.182+0000 m31100| 2015-07-19T23:39:32.182+0000 I SHARDING [conn43] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:32.182+0000-55ac353468c42881b59cba42", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47680", time: new Date(1437349172182), what: "split", ns: "db9.coll9", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('55ac3533d2c1f750d15483ad') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('55ac3533d2c1f750d15483ad') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.233+0000 m31100| 2015-07-19T23:39:32.233+0000 I SHARDING [conn43] distributed lock 'db9.coll9/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.234+0000 m30999| 2015-07-19T23:39:32.233+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db9.coll9: 0ms sequenceNumber: 48 version: 2|3||55ac3533d2c1f750d15483ad based on: 2|1||55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.234+0000 m31200| 2015-07-19T23:39:32.234+0000 I SHARDING [conn97] received splitChunk request: { splitChunk: "db9.coll9", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3533d2c1f750d15483ad') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.235+0000 m31200| 2015-07-19T23:39:32.235+0000 I SHARDING [conn97] distributed lock 'db9.coll9/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3534d9a63f6196b17276 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.235+0000 m31200| 2015-07-19T23:39:32.235+0000 I SHARDING [conn97] remotely refreshing metadata for db9.coll9 based on current shard version 2|0||55ac3533d2c1f750d15483ad, current metadata version is 2|0||55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.235+0000 m31200| 2015-07-19T23:39:32.235+0000 I SHARDING [conn97] updating metadata for db9.coll9 from shard version 2|0||55ac3533d2c1f750d15483ad to shard version 2|1||55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.236+0000 m31200| 2015-07-19T23:39:32.235+0000 I SHARDING [conn97] collection version was loaded at version 2|3||55ac3533d2c1f750d15483ad, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.236+0000 m31200| 2015-07-19T23:39:32.235+0000 I SHARDING [conn97] splitChunk accepted at version 2|1||55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.236+0000 m31200| 2015-07-19T23:39:32.236+0000 I SHARDING [conn97] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:32.236+0000-55ac3534d9a63f6196b17277", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39668", time: new Date(1437349172236), what: "split", ns: "db9.coll9", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('55ac3533d2c1f750d15483ad') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('55ac3533d2c1f750d15483ad') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.287+0000 m31200| 2015-07-19T23:39:32.287+0000 I SHARDING [conn97] distributed lock 'db9.coll9/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.288+0000 m30999| 2015-07-19T23:39:32.288+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db9.coll9: 0ms sequenceNumber: 49 version: 2|5||55ac3533d2c1f750d15483ad based on: 2|3||55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.329+0000 m31100| 2015-07-19T23:39:32.317+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 314ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.331+0000 m31200| 2015-07-19T23:39:32.317+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:3 reslen:341 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 641ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.333+0000 m31200| 2015-07-19T23:39:32.317+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:4 reslen:448 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 641ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.336+0000 m31100| 2015-07-19T23:39:32.317+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 315ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.460+0000 m31200| 2015-07-19T23:39:32.460+0000 I COMMAND [conn28] command db9.$cmd command: insert { insert: "coll9", documents: 474, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('55ac3533d2c1f750d15483ad') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 482, w: 482 } }, Database: { acquireCount: { w: 482 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 474 } }, oplog: { acquireCount: { w: 474 } } } protocol:op_command 143ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.492+0000 m31100| 2015-07-19T23:39:32.492+0000 I COMMAND [conn16] command db9.$cmd command: insert { insert: "coll9", documents: 526, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('55ac3533d2c1f750d15483ad') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 535, w: 535 } }, Database: { acquireCount: { w: 535 } }, Collection: { acquireCount: { w: 9 } }, Metadata: { acquireCount: { w: 526 } }, oplog: { acquireCount: { w: 526 } } } protocol:op_command 175ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.496+0000 m31200| 2015-07-19T23:39:32.496+0000 I INDEX [conn117] build index on: db9.coll9 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db9.coll9", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.496+0000 m31200| 2015-07-19T23:39:32.496+0000 I INDEX [conn117] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.497+0000 m31100| 2015-07-19T23:39:32.496+0000 I INDEX [conn23] build index on: db9.coll9 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db9.coll9", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.497+0000 m31100| 2015-07-19T23:39:32.496+0000 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.499+0000 m31200| 2015-07-19T23:39:32.499+0000 I INDEX [conn117] build index done. scanned 474 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.500+0000 m31100| 2015-07-19T23:39:32.500+0000 I INDEX [conn23] build index done. scanned 526 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.501+0000 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.502+0000 m31101| 2015-07-19T23:39:32.502+0000 I INDEX [repl writer worker 1] build index on: db9.coll9 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db9.coll9", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.503+0000 m31101| 2015-07-19T23:39:32.502+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.506+0000 m31102| 2015-07-19T23:39:32.505+0000 I INDEX [repl writer worker 4] build index on: db9.coll9 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db9.coll9", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.506+0000 m31102| 2015-07-19T23:39:32.506+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.506+0000 m31202| 2015-07-19T23:39:32.506+0000 I INDEX [repl writer worker 5] build index on: db9.coll9 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db9.coll9", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.506+0000 m31202| 2015-07-19T23:39:32.506+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.507+0000 m31201| 2015-07-19T23:39:32.507+0000 I INDEX [repl writer worker 6] build index on: db9.coll9 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db9.coll9", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.508+0000 m31201| 2015-07-19T23:39:32.507+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.510+0000 m31101| 2015-07-19T23:39:32.510+0000 I INDEX [repl writer worker 1] build index done. scanned 526 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.511+0000 m31202| 2015-07-19T23:39:32.511+0000 I INDEX [repl writer worker 5] build index done. scanned 474 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.511+0000 m31102| 2015-07-19T23:39:32.511+0000 I INDEX [repl writer worker 4] build index done. scanned 526 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.511+0000 m31201| 2015-07-19T23:39:32.511+0000 I INDEX [repl writer worker 6] build index done. scanned 474 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.612+0000 m30999| 2015-07-19T23:39:32.612+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57344 #55 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.657+0000 m30998| 2015-07-19T23:39:32.656+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35986 #54 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.657+0000 m30998| 2015-07-19T23:39:32.656+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35987 #55 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.658+0000 m30999| 2015-07-19T23:39:32.658+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57347 #56 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.658+0000 m30998| 2015-07-19T23:39:32.658+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35989 #56 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.666+0000 m30999| 2015-07-19T23:39:32.665+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57349 #57 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.675+0000 m30999| 2015-07-19T23:39:32.675+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57350 #58 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.677+0000 m30998| 2015-07-19T23:39:32.677+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35992 #57 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.681+0000 m30998| 2015-07-19T23:39:32.681+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35993 #58 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.682+0000 m30999| 2015-07-19T23:39:32.681+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57353 #59 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.686+0000 setting random seed: 5999840539880 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.686+0000 setting random seed: 9656358337961 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.686+0000 setting random seed: 3044977374374 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.689+0000 setting random seed: 6498508523218 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.689+0000 setting random seed: 9565714881755 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.689+0000 setting random seed: 2062000213190 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.689+0000 setting random seed: 5363423367962 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.689+0000 setting random seed: 3168111424893 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.689+0000 m31200| 2015-07-19T23:39:32.688+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 184ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.690+0000 m30998| 2015-07-19T23:39:32.688+0000 I SHARDING [conn54] ChunkManager: time to load chunks for db9.coll9: 0ms sequenceNumber: 12 version: 2|5||55ac3533d2c1f750d15483ad based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.690+0000 m31100| 2015-07-19T23:39:32.688+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 185ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.690+0000 setting random seed: 1913308333605 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.690+0000 m31200| 2015-07-19T23:39:32.688+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 183ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.690+0000 setting random seed: 1147870803251 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.691+0000 m31100| 2015-07-19T23:39:32.688+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 185ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.691+0000 m31200| 2015-07-19T23:39:32.689+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39756 #131 (59 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.753+0000 m30999| 2015-07-19T23:39:32.752+0000 I NETWORK [conn58] end connection 10.139.123.131:57350 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.757+0000 m30998| 2015-07-19T23:39:32.757+0000 I NETWORK [conn57] end connection 10.139.123.131:35992 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.761+0000 m30998| 2015-07-19T23:39:32.760+0000 I NETWORK [conn54] end connection 10.139.123.131:35986 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.761+0000 m30998| 2015-07-19T23:39:32.761+0000 I NETWORK [conn56] end connection 10.139.123.131:35989 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.781+0000 m31200| 2015-07-19T23:39:32.781+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39757 #132 (60 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.782+0000 m30999| 2015-07-19T23:39:32.782+0000 I NETWORK [conn56] end connection 10.139.123.131:57347 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.787+0000 m30998| 2015-07-19T23:39:32.787+0000 I NETWORK [conn55] end connection 10.139.123.131:35987 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.790+0000 m31200| 2015-07-19T23:39:32.789+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39758 #133 (61 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.801+0000 m30999| 2015-07-19T23:39:32.801+0000 I NETWORK [conn55] end connection 10.139.123.131:57344 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.810+0000 m30999| 2015-07-19T23:39:32.810+0000 I NETWORK [conn57] end connection 10.139.123.131:57349 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.820+0000 m30998| 2015-07-19T23:39:32.820+0000 I NETWORK [conn58] end connection 10.139.123.131:35993 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.831+0000 m30999| 2015-07-19T23:39:32.831+0000 I NETWORK [conn59] end connection 10.139.123.131:57353 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.939+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.940+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.940+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.940+0000 jstests/concurrency/fsm_workloads/collmod.js: Workload completed in 438 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.940+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.940+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.940+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.941+0000 m30999| 2015-07-19T23:39:32.940+0000 I COMMAND [conn1] DROP: db9.coll9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.941+0000 m30999| 2015-07-19T23:39:32.940+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:32.940+0000-55ac3534d2c1f750d15483af", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349172940), what: "dropCollection.start", ns: "db9.coll9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.993+0000 m30999| 2015-07-19T23:39:32.993+0000 I SHARDING [conn1] distributed lock 'db9.coll9/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3534d2c1f750d15483b0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.993+0000 m31100| 2015-07-19T23:39:32.993+0000 I COMMAND [conn12] CMD: drop db9.coll9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.993+0000 m31100| 2015-07-19T23:39:32.993+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 164ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.994+0000 m31100| 2015-07-19T23:39:32.993+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 164ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.994+0000 m31200| 2015-07-19T23:39:32.994+0000 I COMMAND [conn14] CMD: drop db9.coll9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.995+0000 m31200| 2015-07-19T23:39:32.994+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 164ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.995+0000 m31200| 2015-07-19T23:39:32.994+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 164ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.996+0000 m31101| 2015-07-19T23:39:32.996+0000 I COMMAND [repl writer worker 15] CMD: drop db9.coll9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.996+0000 m31102| 2015-07-19T23:39:32.996+0000 I COMMAND [repl writer worker 12] CMD: drop db9.coll9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.997+0000 m31201| 2015-07-19T23:39:32.997+0000 I COMMAND [repl writer worker 11] CMD: drop db9.coll9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:32.998+0000 m31202| 2015-07-19T23:39:32.997+0000 I COMMAND [repl writer worker 12] CMD: drop db9.coll9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.047+0000 m31100| 2015-07-19T23:39:33.046+0000 I SHARDING [conn12] remotely refreshing metadata for db9.coll9 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||55ac3533d2c1f750d15483ad, current metadata version is 2|3||55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.047+0000 m31100| 2015-07-19T23:39:33.047+0000 W SHARDING [conn12] no chunks found when reloading db9.coll9, previous version was 0|0||55ac3533d2c1f750d15483ad, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.047+0000 m31100| 2015-07-19T23:39:33.047+0000 I SHARDING [conn12] dropping metadata for db9.coll9 at shard version 2|3||55ac3533d2c1f750d15483ad, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.048+0000 m31200| 2015-07-19T23:39:33.047+0000 I SHARDING [conn14] remotely refreshing metadata for db9.coll9 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||55ac3533d2c1f750d15483ad, current metadata version is 2|5||55ac3533d2c1f750d15483ad [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.048+0000 m31200| 2015-07-19T23:39:33.048+0000 W SHARDING [conn14] no chunks found when reloading db9.coll9, previous version was 0|0||55ac3533d2c1f750d15483ad, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.048+0000 m31200| 2015-07-19T23:39:33.048+0000 I SHARDING [conn14] dropping metadata for db9.coll9 at shard version 2|5||55ac3533d2c1f750d15483ad, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.048+0000 m30999| 2015-07-19T23:39:33.048+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:33.048+0000-55ac3535d2c1f750d15483b1", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349173048), what: "dropCollection", ns: "db9.coll9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.100+0000 m30999| 2015-07-19T23:39:33.099+0000 I SHARDING [conn1] distributed lock 'db9.coll9/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.151+0000 m30999| 2015-07-19T23:39:33.151+0000 I COMMAND [conn1] DROP DATABASE: db9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.151+0000 m30999| 2015-07-19T23:39:33.151+0000 I SHARDING [conn1] DBConfig::dropDatabase: db9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.151+0000 m30999| 2015-07-19T23:39:33.151+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:33.151+0000-55ac3535d2c1f750d15483b2", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349173151), what: "dropDatabase.start", ns: "db9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.253+0000 m30999| 2015-07-19T23:39:33.252+0000 I SHARDING [conn1] DBConfig::dropDatabase: db9 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.253+0000 m31200| 2015-07-19T23:39:33.253+0000 I COMMAND [conn111] dropDatabase db9 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.253+0000 m31200| 2015-07-19T23:39:33.253+0000 I COMMAND [conn111] dropDatabase db9 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.253+0000 m31200| 2015-07-19T23:39:33.253+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 114 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 253ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.254+0000 m31200| 2015-07-19T23:39:33.253+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:108 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 155 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 253ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.254+0000 m30999| 2015-07-19T23:39:33.253+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:33.253+0000-55ac3535d2c1f750d15483b3", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349173253), what: "dropDatabase", ns: "db9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.254+0000 m31202| 2015-07-19T23:39:33.253+0000 I COMMAND [repl writer worker 14] dropDatabase db9 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.254+0000 m31202| 2015-07-19T23:39:33.253+0000 I COMMAND [repl writer worker 14] dropDatabase db9 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.255+0000 m31201| 2015-07-19T23:39:33.254+0000 I COMMAND [repl writer worker 1] dropDatabase db9 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.255+0000 m31201| 2015-07-19T23:39:33.254+0000 I COMMAND [repl writer worker 1] dropDatabase db9 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.308+0000 m31100| 2015-07-19T23:39:33.308+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 309ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.308+0000 m31100| 2015-07-19T23:39:33.308+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 309ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.316+0000 m31100| 2015-07-19T23:39:33.316+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.319+0000 m31102| 2015-07-19T23:39:33.319+0000 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.319+0000 m31101| 2015-07-19T23:39:33.319+0000 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.329+0000 m31200| 2015-07-19T23:39:33.329+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.330+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.331+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.331+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.331+0000 jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.331+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.331+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.331+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.332+0000 m31201| 2015-07-19T23:39:33.332+0000 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.333+0000 m31202| 2015-07-19T23:39:33.332+0000 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.334+0000 m30999| 2015-07-19T23:39:33.333+0000 I SHARDING [conn1] distributed lock 'db10/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3535d2c1f750d15483b4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.335+0000 m30999| 2015-07-19T23:39:33.335+0000 I SHARDING [conn1] Placing [db10] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.336+0000 m30999| 2015-07-19T23:39:33.335+0000 I SHARDING [conn1] Enabling sharding for database [db10] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.387+0000 m30999| 2015-07-19T23:39:33.386+0000 I SHARDING [conn1] distributed lock 'db10/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.393+0000 m31200| 2015-07-19T23:39:33.392+0000 I INDEX [conn28] build index on: db10.coll10 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.393+0000 m31200| 2015-07-19T23:39:33.392+0000 I INDEX [conn28] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.394+0000 m31200| 2015-07-19T23:39:33.394+0000 I INDEX [conn28] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.395+0000 m30999| 2015-07-19T23:39:33.394+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db10.coll10", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.396+0000 m30999| 2015-07-19T23:39:33.396+0000 I SHARDING [conn1] distributed lock 'db10.coll10/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3535d2c1f750d15483b5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.397+0000 m30999| 2015-07-19T23:39:33.397+0000 I SHARDING [conn1] enable sharding on: db10.coll10 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.397+0000 m30999| 2015-07-19T23:39:33.397+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:33.397+0000-55ac3535d2c1f750d15483b6", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349173397), what: "shardCollection.start", ns: "db10.coll10", details: { shardKey: { _id: "hashed" }, collection: "db10.coll10", primary: "test-rs1:test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.398+0000 m31202| 2015-07-19T23:39:33.398+0000 I INDEX [repl writer worker 13] build index on: db10.coll10 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.398+0000 m31202| 2015-07-19T23:39:33.398+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.402+0000 m31202| 2015-07-19T23:39:33.401+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.402+0000 m31201| 2015-07-19T23:39:33.402+0000 I INDEX [repl writer worker 10] build index on: db10.coll10 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.402+0000 m31201| 2015-07-19T23:39:33.402+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.403+0000 m31201| 2015-07-19T23:39:33.403+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.448+0000 m30999| 2015-07-19T23:39:33.447+0000 I SHARDING [conn1] going to create 2 chunk(s) for: db10.coll10 using new epoch 55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.550+0000 m30999| 2015-07-19T23:39:33.549+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db10.coll10: 0ms sequenceNumber: 50 version: 1|1||55ac3535d2c1f750d15483b7 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.601+0000 m30999| 2015-07-19T23:39:33.601+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db10.coll10: 0ms sequenceNumber: 51 version: 1|1||55ac3535d2c1f750d15483b7 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.602+0000 m31200| 2015-07-19T23:39:33.602+0000 I SHARDING [conn117] remotely refreshing metadata for db10.coll10 with requested shard version 1|1||55ac3535d2c1f750d15483b7, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.602+0000 m31200| 2015-07-19T23:39:33.602+0000 I SHARDING [conn117] collection db10.coll10 was previously unsharded, new metadata loaded with shard version 1|1||55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.602+0000 m31200| 2015-07-19T23:39:33.602+0000 I SHARDING [conn117] collection version was loaded at version 1|1||55ac3535d2c1f750d15483b7, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.603+0000 m30999| 2015-07-19T23:39:33.602+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:33.602+0000-55ac3535d2c1f750d15483b8", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349173602), what: "shardCollection", ns: "db10.coll10", details: { version: "1|1||55ac3535d2c1f750d15483b7" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.654+0000 m30999| 2015-07-19T23:39:33.654+0000 I SHARDING [conn1] distributed lock 'db10.coll10/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.655+0000 m30999| 2015-07-19T23:39:33.654+0000 I SHARDING [conn1] moving chunk ns: db10.coll10 moving ( ns: db10.coll10, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.655+0000 m31200| 2015-07-19T23:39:33.654+0000 I SHARDING [conn133] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.655+0000 m31200| 2015-07-19T23:39:33.654+0000 I SHARDING [conn133] received moveChunk request: { moveChunk: "db10.coll10", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", to: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac3535d2c1f750d15483b7') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.656+0000 m31200| 2015-07-19T23:39:33.656+0000 I SHARDING [conn133] distributed lock 'db10.coll10/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3535d9a63f6196b17279 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.656+0000 m31200| 2015-07-19T23:39:33.656+0000 I SHARDING [conn133] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:33.656+0000-55ac3535d9a63f6196b1727a", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39758", time: new Date(1437349173656), what: "moveChunk.start", ns: "db10.coll10", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.707+0000 m31200| 2015-07-19T23:39:33.706+0000 I SHARDING [conn133] remotely refreshing metadata for db10.coll10 based on current shard version 1|1||55ac3535d2c1f750d15483b7, current metadata version is 1|1||55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.707+0000 m31200| 2015-07-19T23:39:33.707+0000 I SHARDING [conn133] metadata of collection db10.coll10 already up to date (shard version : 1|1||55ac3535d2c1f750d15483b7, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.707+0000 m31200| 2015-07-19T23:39:33.707+0000 I SHARDING [conn133] moveChunk request accepted at version 1|1||55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.708+0000 m31200| 2015-07-19T23:39:33.708+0000 I SHARDING [conn133] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.708+0000 m31100| 2015-07-19T23:39:33.708+0000 I SHARDING [conn19] remotely refreshing metadata for db10.coll10, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.708+0000 m31100| 2015-07-19T23:39:33.708+0000 I SHARDING [conn19] collection db10.coll10 was previously unsharded, new metadata loaded with shard version 0|0||55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.708+0000 m31100| 2015-07-19T23:39:33.708+0000 I SHARDING [conn19] collection version was loaded at version 1|1||55ac3535d2c1f750d15483b7, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.709+0000 m31100| 2015-07-19T23:39:33.708+0000 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db10.coll10 from test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 at epoch 55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.710+0000 m31200| 2015-07-19T23:39:33.710+0000 I SHARDING [conn133] moveChunk data transfer progress: { active: true, ns: "db10.coll10", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.711+0000 m31100| 2015-07-19T23:39:33.711+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:110 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 389ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.711+0000 m31100| 2015-07-19T23:39:33.711+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:110 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 389ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.712+0000 m31200| 2015-07-19T23:39:33.712+0000 I SHARDING [conn133] moveChunk data transfer progress: { active: true, ns: "db10.coll10", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.717+0000 m31200| 2015-07-19T23:39:33.716+0000 I SHARDING [conn133] moveChunk data transfer progress: { active: true, ns: "db10.coll10", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.717+0000 m31100| 2015-07-19T23:39:33.717+0000 I INDEX [migrateThread] build index on: db10.coll10 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.717+0000 m31100| 2015-07-19T23:39:33.717+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.719+0000 m31100| 2015-07-19T23:39:33.719+0000 I INDEX [migrateThread] build index on: db10.coll10 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.719+0000 m31100| 2015-07-19T23:39:33.719+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.721+0000 m31100| 2015-07-19T23:39:33.721+0000 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.721+0000 m31100| 2015-07-19T23:39:33.721+0000 I SHARDING [migrateThread] Deleter starting delete for: db10.coll10 from { _id: MinKey } -> { _id: 0 }, with opId: 35514 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.722+0000 m31100| 2015-07-19T23:39:33.721+0000 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db10.coll10 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.725+0000 m31200| 2015-07-19T23:39:33.725+0000 I SHARDING [conn133] moveChunk data transfer progress: { active: true, ns: "db10.coll10", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.726+0000 m31102| 2015-07-19T23:39:33.725+0000 I INDEX [repl writer worker 10] build index on: db10.coll10 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.726+0000 m31102| 2015-07-19T23:39:33.725+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.726+0000 m31101| 2015-07-19T23:39:33.726+0000 I INDEX [repl writer worker 14] build index on: db10.coll10 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.726+0000 m31101| 2015-07-19T23:39:33.726+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.727+0000 m31102| 2015-07-19T23:39:33.727+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.727+0000 m31101| 2015-07-19T23:39:33.727+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.728+0000 m31100| 2015-07-19T23:39:33.728+0000 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.728+0000 m31100| 2015-07-19T23:39:33.728+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db10.coll10' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.741+0000 m31200| 2015-07-19T23:39:33.741+0000 I SHARDING [conn133] moveChunk data transfer progress: { active: true, ns: "db10.coll10", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.741+0000 m31200| 2015-07-19T23:39:33.741+0000 I SHARDING [conn133] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.741+0000 m31200| 2015-07-19T23:39:33.741+0000 I SHARDING [conn133] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.742+0000 m31200| 2015-07-19T23:39:33.741+0000 I SHARDING [conn133] moveChunk setting version to: 2|0||55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.748+0000 m31100| 2015-07-19T23:39:33.748+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db10.coll10' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.749+0000 m31100| 2015-07-19T23:39:33.748+0000 I SHARDING [migrateThread] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:33.748+0000-55ac353568c42881b59cba43", server: "ip-10-139-123-131", clientAddr: "", time: new Date(1437349173748), what: "moveChunk.to", ns: "db10.coll10", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 12, step 2 of 5: 6, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 20, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.800+0000 m31200| 2015-07-19T23:39:33.799+0000 I SHARDING [conn133] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db10.coll10", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.800+0000 m31200| 2015-07-19T23:39:33.799+0000 I SHARDING [conn133] moveChunk updating self version to: 2|1||55ac3535d2c1f750d15483b7 through { _id: 0 } -> { _id: MaxKey } for collection 'db10.coll10' [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.800+0000 m31200| 2015-07-19T23:39:33.800+0000 I SHARDING [conn133] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:33.800+0000-55ac3535d9a63f6196b1727b", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39758", time: new Date(1437349173800), what: "moveChunk.commit", ns: "db10.coll10", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.851+0000 m31200| 2015-07-19T23:39:33.851+0000 I SHARDING [conn133] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.851+0000 m31200| 2015-07-19T23:39:33.851+0000 I SHARDING [conn133] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.852+0000 m31200| 2015-07-19T23:39:33.851+0000 I SHARDING [conn133] Deleter starting delete for: db10.coll10 from { _id: MinKey } -> { _id: 0 }, with opId: 32009 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.852+0000 m31200| 2015-07-19T23:39:33.851+0000 I SHARDING [conn133] rangeDeleter deleted 0 documents for db10.coll10 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.852+0000 m31200| 2015-07-19T23:39:33.851+0000 I SHARDING [conn133] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.852+0000 m31200| 2015-07-19T23:39:33.851+0000 I SHARDING [conn133] distributed lock 'db10.coll10/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.852+0000 m31200| 2015-07-19T23:39:33.852+0000 I SHARDING [conn133] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:33.852+0000-55ac3535d9a63f6196b1727c", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39758", time: new Date(1437349173852), what: "moveChunk.from", ns: "db10.coll10", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 52, step 3 of 6: 1, step 4 of 6: 32, step 5 of 6: 109, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.903+0000 m31200| 2015-07-19T23:39:33.902+0000 I COMMAND [conn133] command db10.coll10 command: moveChunk { moveChunk: "db10.coll10", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", to: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac3535d2c1f750d15483b7') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 248ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.903+0000 m30999| 2015-07-19T23:39:33.903+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db10.coll10: 0ms sequenceNumber: 52 version: 2|1||55ac3535d2c1f750d15483b7 based on: 1|1||55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.904+0000 m31100| 2015-07-19T23:39:33.903+0000 I SHARDING [conn43] received splitChunk request: { splitChunk: "db10.coll10", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3535d2c1f750d15483b7') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.905+0000 m31100| 2015-07-19T23:39:33.904+0000 I SHARDING [conn43] distributed lock 'db10.coll10/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac353568c42881b59cba44 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.905+0000 m31100| 2015-07-19T23:39:33.904+0000 I SHARDING [conn43] remotely refreshing metadata for db10.coll10 based on current shard version 0|0||55ac3535d2c1f750d15483b7, current metadata version is 1|1||55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.905+0000 m31100| 2015-07-19T23:39:33.905+0000 I SHARDING [conn43] updating metadata for db10.coll10 from shard version 0|0||55ac3535d2c1f750d15483b7 to shard version 2|0||55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.905+0000 m31100| 2015-07-19T23:39:33.905+0000 I SHARDING [conn43] collection version was loaded at version 2|1||55ac3535d2c1f750d15483b7, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.905+0000 m31100| 2015-07-19T23:39:33.905+0000 I SHARDING [conn43] splitChunk accepted at version 2|0||55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.906+0000 m31100| 2015-07-19T23:39:33.905+0000 I SHARDING [conn43] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:33.905+0000-55ac353568c42881b59cba45", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47680", time: new Date(1437349173905), what: "split", ns: "db10.coll10", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('55ac3535d2c1f750d15483b7') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('55ac3535d2c1f750d15483b7') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.957+0000 m31100| 2015-07-19T23:39:33.957+0000 I SHARDING [conn43] distributed lock 'db10.coll10/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.958+0000 m30999| 2015-07-19T23:39:33.958+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db10.coll10: 0ms sequenceNumber: 53 version: 2|3||55ac3535d2c1f750d15483b7 based on: 2|1||55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.958+0000 m31200| 2015-07-19T23:39:33.958+0000 I SHARDING [conn133] received splitChunk request: { splitChunk: "db10.coll10", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3535d2c1f750d15483b7') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.959+0000 m31200| 2015-07-19T23:39:33.959+0000 I SHARDING [conn133] distributed lock 'db10.coll10/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3535d9a63f6196b1727d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.959+0000 m31200| 2015-07-19T23:39:33.959+0000 I SHARDING [conn133] remotely refreshing metadata for db10.coll10 based on current shard version 2|0||55ac3535d2c1f750d15483b7, current metadata version is 2|0||55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.960+0000 m31200| 2015-07-19T23:39:33.959+0000 I SHARDING [conn133] updating metadata for db10.coll10 from shard version 2|0||55ac3535d2c1f750d15483b7 to shard version 2|1||55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.960+0000 m31200| 2015-07-19T23:39:33.959+0000 I SHARDING [conn133] collection version was loaded at version 2|3||55ac3535d2c1f750d15483b7, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.960+0000 m31200| 2015-07-19T23:39:33.960+0000 I SHARDING [conn133] splitChunk accepted at version 2|1||55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:33.960+0000 m31200| 2015-07-19T23:39:33.960+0000 I SHARDING [conn133] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:33.960+0000-55ac3535d9a63f6196b1727e", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39758", time: new Date(1437349173960), what: "split", ns: "db10.coll10", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('55ac3535d2c1f750d15483b7') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('55ac3535d2c1f750d15483b7') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.011+0000 m31200| 2015-07-19T23:39:34.011+0000 I SHARDING [conn133] distributed lock 'db10.coll10/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.012+0000 m30999| 2015-07-19T23:39:34.012+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db10.coll10: 0ms sequenceNumber: 54 version: 2|5||55ac3535d2c1f750d15483b7 based on: 2|3||55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.093+0000 m31200| 2015-07-19T23:39:34.072+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:3 reslen:374 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 672ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.093+0000 m31200| 2015-07-19T23:39:34.072+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:3 reslen:374 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 675ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.095+0000 m31100| 2015-07-19T23:39:34.072+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 346ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.096+0000 m31100| 2015-07-19T23:39:34.072+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:138 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 346ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.202+0000 m31200| 2015-07-19T23:39:34.202+0000 I COMMAND [conn28] command db10.$cmd command: insert { insert: "coll10", documents: 519, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('55ac3535d2c1f750d15483b7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 526, w: 526 } }, Database: { acquireCount: { w: 526 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 519 } }, oplog: { acquireCount: { w: 519 } } } protocol:op_command 130ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.223+0000 m31100| 2015-07-19T23:39:34.223+0000 I COMMAND [conn16] command db10.$cmd command: insert { insert: "coll10", documents: 481, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('55ac3535d2c1f750d15483b7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 489, w: 489 } }, Database: { acquireCount: { w: 489 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 481 } }, oplog: { acquireCount: { w: 481 } } } protocol:op_command 151ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.338+0000 m31200| 2015-07-19T23:39:34.338+0000 I COMMAND [conn28] command db10.$cmd command: insert { insert: "coll10", documents: 494, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('55ac3535d2c1f750d15483b7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 500, w: 500 } }, Database: { acquireCount: { w: 500 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 494 } }, oplog: { acquireCount: { w: 494 } } } protocol:op_command 105ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.406+0000 m31100| 2015-07-19T23:39:34.406+0000 I COMMAND [conn16] command db10.$cmd command: insert { insert: "coll10", documents: 506, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('55ac3535d2c1f750d15483b7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 515, w: 515 } }, Database: { acquireCount: { w: 515 } }, Collection: { acquireCount: { w: 9 } }, Metadata: { acquireCount: { w: 506 } }, oplog: { acquireCount: { w: 506 } } } protocol:op_command 173ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.408+0000 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.462+0000 m30999| 2015-07-19T23:39:34.461+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57357 #60 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.474+0000 m30998| 2015-07-19T23:39:34.474+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:35999 #59 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.483+0000 m30998| 2015-07-19T23:39:34.483+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36000 #60 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.485+0000 m30998| 2015-07-19T23:39:34.485+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36001 #61 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.496+0000 m30999| 2015-07-19T23:39:34.496+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57361 #61 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.501+0000 setting random seed: 7467226870357 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.501+0000 setting random seed: 8574237446300 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.501+0000 setting random seed: 361825646832 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.503+0000 setting random seed: 5939856106415 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.503+0000 setting random seed: 1592172482050 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.504+0000 m30998| 2015-07-19T23:39:34.503+0000 I SHARDING [conn59] distributed lock 'map_reduce_merge_nonatomic0/ip-10-139-123-131:30998:1437349129:1804289383' acquired, ts : 55ac3536230355f00547ef21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.504+0000 m30999| 2015-07-19T23:39:34.503+0000 I SHARDING [conn60] distributed lock 'map_reduce_merge_nonatomic1/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3536d2c1f750d15483b9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.505+0000 m30998| 2015-07-19T23:39:34.504+0000 I SHARDING [conn60] distributed lock 'map_reduce_merge_nonatomic2/ip-10-139-123-131:30998:1437349129:1804289383' acquired, ts : 55ac3536230355f00547ef22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.505+0000 m30998| 2015-07-19T23:39:34.504+0000 I SHARDING [conn61] distributed lock 'map_reduce_merge_nonatomic4/ip-10-139-123-131:30998:1437349129:1804289383' acquired, ts : 55ac3536230355f00547ef23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.506+0000 m30999| 2015-07-19T23:39:34.505+0000 I SHARDING [conn61] distributed lock 'map_reduce_merge_nonatomic3/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3536d2c1f750d15483ba [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.506+0000 m31200| 2015-07-19T23:39:34.506+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39764 #134 (62 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.507+0000 m31100| 2015-07-19T23:39:34.506+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47929 #127 (71 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.507+0000 m31200| 2015-07-19T23:39:34.507+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39766 #135 (63 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.507+0000 m31200| 2015-07-19T23:39:34.507+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39767 #136 (64 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.508+0000 m30999| 2015-07-19T23:39:34.507+0000 I SHARDING [conn60] Placing [map_reduce_merge_nonatomic1] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.508+0000 m30998| 2015-07-19T23:39:34.508+0000 I SHARDING [conn59] Placing [map_reduce_merge_nonatomic0] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.508+0000 m30999| 2015-07-19T23:39:34.508+0000 I SHARDING [conn61] Placing [map_reduce_merge_nonatomic3] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.509+0000 m30998| 2015-07-19T23:39:34.509+0000 I SHARDING [conn61] Placing [map_reduce_merge_nonatomic4] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.509+0000 m30998| 2015-07-19T23:39:34.509+0000 I SHARDING [conn60] Placing [map_reduce_merge_nonatomic2] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.510+0000 m29000| 2015-07-19T23:39:34.509+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55666 #35 (35 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.510+0000 m29000| 2015-07-19T23:39:34.509+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55667 #36 (36 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.561+0000 m30999| 2015-07-19T23:39:34.561+0000 I SHARDING [conn60] distributed lock 'map_reduce_merge_nonatomic1/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.561+0000 m30998| 2015-07-19T23:39:34.561+0000 I SHARDING [conn61] distributed lock 'map_reduce_merge_nonatomic4/ip-10-139-123-131:30998:1437349129:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.562+0000 m29000| 2015-07-19T23:39:34.561+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55668 #37 (37 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.562+0000 m29000| 2015-07-19T23:39:34.562+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55669 #38 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.563+0000 m30999| 2015-07-19T23:39:34.562+0000 I SHARDING [conn61] distributed lock 'map_reduce_merge_nonatomic3/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.563+0000 m30998| 2015-07-19T23:39:34.563+0000 I SHARDING [conn60] distributed lock 'map_reduce_merge_nonatomic2/ip-10-139-123-131:30998:1437349129:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.563+0000 m30998| 2015-07-19T23:39:34.563+0000 I SHARDING [conn59] distributed lock 'map_reduce_merge_nonatomic0/ip-10-139-123-131:30998:1437349129:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.571+0000 m31200| 2015-07-19T23:39:34.571+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:133 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 228ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.571+0000 m31200| 2015-07-19T23:39:34.571+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:133 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 228ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.576+0000 m30998| 2015-07-19T23:39:34.575+0000 I SHARDING [conn61] ChunkManager: time to load chunks for db10.coll10: 0ms sequenceNumber: 13 version: 2|5||55ac3535d2c1f750d15483b7 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.616+0000 m31200| 2015-07-19T23:39:34.615+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.637+0000 m31200| 2015-07-19T23:39:34.636+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.639+0000 m31100| 2015-07-19T23:39:34.639+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.657+0000 m31100| 2015-07-19T23:39:34.656+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.657+0000 m31100| 2015-07-19T23:39:34.656+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.658+0000 m31100| 2015-07-19T23:39:34.658+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.661+0000 m31100| 2015-07-19T23:39:34.661+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 249ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.661+0000 m31100| 2015-07-19T23:39:34.661+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:126 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 250ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.662+0000 m31200| 2015-07-19T23:39:34.662+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.662+0000 m31200| 2015-07-19T23:39:34.662+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.663+0000 m31200| 2015-07-19T23:39:34.662+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.677+0000 m31100| 2015-07-19T23:39:34.676+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.875+0000 m31100| 2015-07-19T23:39:34.874+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.875+0000 m31100| 2015-07-19T23:39:34.874+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.876+0000 m31100| 2015-07-19T23:39:34.876+0000 I COMMAND [conn54] CMD: drop db10.tmp.mrs.coll10_1437349174_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.878+0000 m31100| 2015-07-19T23:39:34.877+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.878+0000 m31100| 2015-07-19T23:39:34.878+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.880+0000 m31100| 2015-07-19T23:39:34.878+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.880+0000 m31100| 2015-07-19T23:39:34.878+0000 I COMMAND [conn54] command db10.tmp.mrs.coll10_1437349174_0 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.880+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.880+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.880+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.881+0000 m31100| values...., out: "tmp.mrs.coll10_1437349174_0", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:229 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 27 } }, Database: { acquireCount: { r: 27, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 5, R: 8, W: 1 }, timeAcquiringMicros: { r: 1540, w: 49351, R: 36980, W: 1078 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 302ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.909+0000 m31200| 2015-07-19T23:39:34.909+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 173ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.910+0000 m31200| 2015-07-19T23:39:34.909+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 171ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.921+0000 m31200| 2015-07-19T23:39:34.920+0000 I COMMAND [conn117] CMD: drop db10.tmp.mrs.coll10_1437349174_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.948+0000 m31200| 2015-07-19T23:39:34.947+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.948+0000 m31200| 2015-07-19T23:39:34.948+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.952+0000 m31200| 2015-07-19T23:39:34.949+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.952+0000 m31100| 2015-07-19T23:39:34.952+0000 I COMMAND [conn23] CMD: drop db10.tmp.mrs.coll10_1437349174_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.956+0000 m31200| 2015-07-19T23:39:34.956+0000 I COMMAND [conn103] CMD: drop db10.tmp.mrs.coll10_1437349174_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.956+0000 m31200| 2015-07-19T23:39:34.956+0000 I COMMAND [conn117] command db10.tmp.mrs.coll10_1437349174_1 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.956+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.956+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.956+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:34.957+0000 m31200| values...., out: "tmp.mrs.coll10_1437349174_1", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:229 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 38 } }, Database: { acquireCount: { r: 27, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 2, w: 8, R: 3, W: 7 }, timeAcquiringMicros: { r: 21909, w: 56195, R: 6146, W: 28857 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 365ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.027+0000 m31100| 2015-07-19T23:39:35.026+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.027+0000 m31200| 2015-07-19T23:39:35.026+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.028+0000 m31200| 2015-07-19T23:39:35.027+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.028+0000 m31201| 2015-07-19T23:39:35.027+0000 I COMMAND [repl writer worker 0] CMD: drop db10.tmp.mrs.coll10_1437349174_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.029+0000 m31200| 2015-07-19T23:39:35.029+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.029+0000 m31202| 2015-07-19T23:39:35.029+0000 I COMMAND [repl writer worker 6] CMD: drop db10.tmp.mrs.coll10_1437349174_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.030+0000 m31200| 2015-07-19T23:39:35.030+0000 I COMMAND [conn103] command db10.tmp.mrs.coll10_1437349174_1 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.030+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.030+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.030+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.031+0000 m31200| values...., out: "tmp.mrs.coll10_1437349174_1", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:229 locks:{ Global: { acquireCount: { r: 156, w: 75, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 28031, W: 58 } }, Database: { acquireCount: { r: 27, w: 67, R: 12, W: 11 }, acquireWaitCount: { r: 2, w: 6, R: 6, W: 6 }, timeAcquiringMicros: { r: 3720, w: 21058, R: 49942, W: 3070 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 444ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.038+0000 m31100| 2015-07-19T23:39:35.038+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.038+0000 m31101| 2015-07-19T23:39:35.038+0000 I COMMAND [repl writer worker 11] CMD: drop db10.tmp.mrs.coll10_1437349174_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.039+0000 m31102| 2015-07-19T23:39:35.038+0000 I COMMAND [repl writer worker 1] CMD: drop db10.tmp.mrs.coll10_1437349174_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.039+0000 m31100| 2015-07-19T23:39:35.038+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.039+0000 m31200| 2015-07-19T23:39:35.039+0000 I COMMAND [conn122] CMD: drop db10.tmp.mrs.coll10_1437349174_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.044+0000 m31200| 2015-07-19T23:39:35.044+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.045+0000 m31200| 2015-07-19T23:39:35.045+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.046+0000 m31200| 2015-07-19T23:39:35.046+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.048+0000 m31200| 2015-07-19T23:39:35.047+0000 I COMMAND [conn122] command db10.tmp.mrs.coll10_1437349174_0 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.048+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.048+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.048+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.048+0000 m31200| values...., out: "tmp.mrs.coll10_1437349174_0", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:229 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 85666, W: 5987 } }, Database: { acquireCount: { r: 27, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 10, R: 2, W: 9 }, timeAcquiringMicros: { r: 10072, w: 16204, R: 4083, W: 55416 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 468ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.049+0000 m31100| 2015-07-19T23:39:35.049+0000 I COMMAND [conn23] command db10.tmp.mrs.coll10_1437349174_0 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.050+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.052+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.054+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.055+0000 m31100| values...., out: "tmp.mrs.coll10_1437349174_0", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:229 locks:{ Global: { acquireCount: { r: 158, w: 75, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 2565, W: 4652 } }, Database: { acquireCount: { r: 27, w: 67, R: 13, W: 11 }, acquireWaitCount: { r: 2, w: 5, R: 5, W: 7 }, timeAcquiringMicros: { r: 6761, w: 25444, R: 58520, W: 25927 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 471ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.055+0000 m31100| 2015-07-19T23:39:35.054+0000 I COMMAND [conn53] CMD: drop db10.tmp.mrs.coll10_1437349174_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.056+0000 m31100| 2015-07-19T23:39:35.056+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.060+0000 m31100| 2015-07-19T23:39:35.056+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.060+0000 m31100| 2015-07-19T23:39:35.058+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.060+0000 m31100| 2015-07-19T23:39:35.058+0000 I COMMAND [conn53] command db10.tmp.mrs.coll10_1437349174_1 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.061+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.061+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.061+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.061+0000 m31100| values...., out: "tmp.mrs.coll10_1437349174_1", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:229 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 2845, w: 86004, W: 65 } }, Database: { acquireCount: { r: 27, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 2, w: 7, R: 5, W: 5 }, timeAcquiringMicros: { r: 12258, w: 20968, R: 54292, W: 30161 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 473ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.063+0000 m31200| 2015-07-19T23:39:35.062+0000 I COMMAND [conn22] CMD: drop db10.tmp.mrs.coll10_1437349174_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.066+0000 m31200| 2015-07-19T23:39:35.066+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.067+0000 m31200| 2015-07-19T23:39:35.067+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.068+0000 m31200| 2015-07-19T23:39:35.068+0000 I COMMAND [conn123] CMD: drop db10.tmp.mrs.coll10_1437349174_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.071+0000 m31200| 2015-07-19T23:39:35.071+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.072+0000 m31200| 2015-07-19T23:39:35.072+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.073+0000 m31200| 2015-07-19T23:39:35.073+0000 I COMMAND [conn22] command db10.tmp.mrs.coll10_1437349174_2 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.073+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.073+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.073+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.074+0000 m31200| values...., out: "tmp.mrs.coll10_1437349174_2", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:229 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 107434, W: 2733 } }, Database: { acquireCount: { r: 27, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 3, w: 10, R: 3, W: 5 }, timeAcquiringMicros: { r: 27455, w: 14757, R: 3950, W: 38372 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 480ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.074+0000 m31202| 2015-07-19T23:39:35.073+0000 I COMMAND [repl writer worker 9] CMD: drop db10.tmp.mrs.coll10_1437349174_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.074+0000 m31200| 2015-07-19T23:39:35.074+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.075+0000 m31201| 2015-07-19T23:39:35.074+0000 I COMMAND [repl writer worker 11] CMD: drop db10.tmp.mrs.coll10_1437349174_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.077+0000 m31200| 2015-07-19T23:39:35.077+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.078+0000 m31200| 2015-07-19T23:39:35.077+0000 I COMMAND [conn123] command db10.tmp.mrs.coll10_1437349174_0 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.079+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.079+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.079+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.079+0000 m31200| values...., out: "tmp.mrs.coll10_1437349174_0", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:5 reslen:229 locks:{ Global: { acquireCount: { r: 166, w: 75, W: 3 }, acquireWaitCount: { r: 3, w: 1 }, timeAcquiringMicros: { r: 24129, w: 66399 } }, Database: { acquireCount: { r: 27, w: 67, R: 17, W: 11 }, acquireWaitCount: { r: 3, w: 6, R: 4, W: 5 }, timeAcquiringMicros: { r: 17500, w: 24096, R: 2843, W: 33640 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 498ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.081+0000 m31100| 2015-07-19T23:39:35.081+0000 I COMMAND [conn58] CMD: drop db10.tmp.mrs.coll10_1437349174_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.089+0000 m31100| 2015-07-19T23:39:35.088+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.091+0000 m31100| 2015-07-19T23:39:35.091+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.092+0000 m31100| 2015-07-19T23:39:35.091+0000 I COMMAND [conn119] CMD: drop db10.tmp.mrs.coll10_1437349174_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.095+0000 m31200| 2015-07-19T23:39:35.095+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.096+0000 m31100| 2015-07-19T23:39:35.096+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.096+0000 m31100| 2015-07-19T23:39:35.096+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.096+0000 m31102| 2015-07-19T23:39:35.096+0000 I COMMAND [repl writer worker 9] CMD: drop db10.tmp.mrs.coll10_1437349174_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.097+0000 m31100| 2015-07-19T23:39:35.096+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.097+0000 m31100| 2015-07-19T23:39:35.096+0000 I COMMAND [conn119] command db10.tmp.mrs.coll10_1437349174_1 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.097+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.097+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.098+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.098+0000 m31100| values...., out: "tmp.mrs.coll10_1437349174_1", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:229 locks:{ Global: { acquireCount: { r: 162, w: 75, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 91324, W: 346 } }, Database: { acquireCount: { r: 27, w: 67, R: 15, W: 11 }, acquireWaitCount: { r: 2, w: 3, R: 8, W: 5 }, timeAcquiringMicros: { r: 8162, w: 4878, R: 40971, W: 31140 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 507ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.099+0000 m31100| 2015-07-19T23:39:35.099+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.099+0000 m31100| 2015-07-19T23:39:35.099+0000 I COMMAND [conn58] command db10.tmp.mrs.coll10_1437349174_2 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.099+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.100+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.100+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.100+0000 m31100| values...., out: "tmp.mrs.coll10_1437349174_2", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:229 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 18458, w: 94997, W: 38 } }, Database: { acquireCount: { r: 27, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 5, R: 4, W: 5 }, timeAcquiringMicros: { r: 5402, w: 40272, R: 28783, W: 39080 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 508ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.102+0000 m31101| 2015-07-19T23:39:35.102+0000 I COMMAND [repl writer worker 12] CMD: drop db10.tmp.mrs.coll10_1437349174_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.113+0000 m31100| 2015-07-19T23:39:35.113+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47936 #128 (72 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.115+0000 m31200| 2015-07-19T23:39:35.115+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39773 #137 (65 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.123+0000 m31200| 2015-07-19T23:39:35.123+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.127+0000 m31200| 2015-07-19T23:39:35.127+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.140+0000 m31200| 2015-07-19T23:39:35.139+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.142+0000 m31100| 2015-07-19T23:39:35.142+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47938 #129 (73 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.143+0000 m31100| 2015-07-19T23:39:35.143+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47939 #130 (74 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.145+0000 m31200| 2015-07-19T23:39:35.144+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39776 #138 (66 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.145+0000 m31200| 2015-07-19T23:39:35.145+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.148+0000 m31100| 2015-07-19T23:39:35.148+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47941 #131 (75 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.151+0000 m31200| 2015-07-19T23:39:35.151+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39778 #139 (67 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.155+0000 m31200| 2015-07-19T23:39:35.154+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39779 #140 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.175+0000 m31200| 2015-07-19T23:39:35.175+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.176+0000 m31100| 2015-07-19T23:39:35.176+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:47944 #132 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.177+0000 m31200| 2015-07-19T23:39:35.177+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39781 #141 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.177+0000 m31200| 2015-07-19T23:39:35.177+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.177+0000 m31200| 2015-07-19T23:39:35.177+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.179+0000 m31202| 2015-07-19T23:39:35.178+0000 I COMMAND [repl writer worker 0] CMD: drop map_reduce_merge_nonatomic2.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.180+0000 m31201| 2015-07-19T23:39:35.179+0000 I COMMAND [repl writer worker 15] CMD: drop map_reduce_merge_nonatomic2.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.184+0000 m31200| 2015-07-19T23:39:35.184+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.184+0000 m31200| 2015-07-19T23:39:35.184+0000 I COMMAND [conn103] command map_reduce_merge_nonatomic2.coll10 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.185+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.185+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.185+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.185+0000 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.185+0000 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.186+0000 m31200| }, out: { merge: "coll10", db: "map_reduce_merge_nonatomic2", nonAtomic: true } }, inputDB: "db10", shardedOutputCollection: "tmp.mrs.coll10_1437349174_1", shards: { test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102: { waitedMS: 0, result: "tmp.mrs.coll10_1437349174_1", timeMillis: 472, counts: { input: 987, emit: 987, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1437349175000|14, electionId: ObjectId('55ac34f80000000000000000') } }, test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202: { waitedMS: 0, result: "tmp.mrs.coll10_1437349174_1", timeMillis: 441, counts: { input: 1013, emit: 1013, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1437349175000|1, electionId: ObjectId('55ac35040000000000000000') } } }, shardCounts: { test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102: { input: 987, emit: 987, reduce: 80, output: 20 }, test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202: { input: 1013, emit: 1013, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 58, w: 50, W: 2 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 319 } }, Database: { acquireCount: { r: 3, w: 45, W: 6 } }, Collection: { acquireCount: { r: 3, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 125ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.186+0000 m31100| 2015-07-19T23:39:35.186+0000 I COMMAND [conn112] CMD: drop db10.tmp.mrs.coll10_1437349174_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.186+0000 m31200| 2015-07-19T23:39:35.186+0000 I COMMAND [conn132] CMD: drop db10.tmp.mrs.coll10_1437349174_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.189+0000 m31101| 2015-07-19T23:39:35.189+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349174_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.190+0000 m31102| 2015-07-19T23:39:35.189+0000 I COMMAND [repl writer worker 15] CMD: drop db10.tmp.mrs.coll10_1437349174_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.190+0000 m31201| 2015-07-19T23:39:35.190+0000 I COMMAND [repl writer worker 8] CMD: drop db10.tmp.mrs.coll10_1437349174_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.191+0000 m31202| 2015-07-19T23:39:35.190+0000 I COMMAND [repl writer worker 4] CMD: drop db10.tmp.mrs.coll10_1437349174_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.193+0000 m31200| 2015-07-19T23:39:35.193+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.194+0000 m31200| 2015-07-19T23:39:35.194+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.195+0000 m31200| 2015-07-19T23:39:35.194+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.204+0000 m31200| 2015-07-19T23:39:35.194+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.204+0000 m31200| 2015-07-19T23:39:35.195+0000 I COMMAND [conn122] command map_reduce_merge_nonatomic4.coll10 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.205+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.205+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.205+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.206+0000 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.206+0000 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.207+0000 m31200| }, out: { merge: "coll10", db: "map_reduce_merge_nonatomic4", nonAtomic: true } }, inputDB: "db10", shardedOutputCollection: "tmp.mrs.coll10_1437349174_0", shards: { test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102: { waitedMS: 0, result: "tmp.mrs.coll10_1437349174_0", timeMillis: 301, counts: { input: 987, emit: 987, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1437349174000|1013, electionId: ObjectId('55ac34f80000000000000000') } }, test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202: { waitedMS: 0, result: "tmp.mrs.coll10_1437349174_0", timeMillis: 466, counts: { input: 1013, emit: 1013, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1437349175000|34, electionId: ObjectId('55ac35040000000000000000') } } }, shardCounts: { test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102: { input: 987, emit: 987, reduce: 80, output: 20 }, test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202: { input: 1013, emit: 1013, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 58, w: 50, W: 2 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 2474, W: 1644 } }, Database: { acquireCount: { r: 3, w: 45, W: 6 } }, Collection: { acquireCount: { r: 3, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 145ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.207+0000 m31100| 2015-07-19T23:39:35.195+0000 I COMMAND [conn112] CMD: drop db10.tmp.mrs.coll10_1437349174_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.207+0000 m31201| 2015-07-19T23:39:35.195+0000 I COMMAND [repl writer worker 11] CMD: drop map_reduce_merge_nonatomic4.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.208+0000 m31202| 2015-07-19T23:39:35.196+0000 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic4.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.208+0000 m31200| 2015-07-19T23:39:35.196+0000 I COMMAND [conn132] CMD: drop db10.tmp.mrs.coll10_1437349174_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.209+0000 m31102| 2015-07-19T23:39:35.198+0000 I COMMAND [repl writer worker 3] CMD: drop db10.tmp.mrs.coll10_1437349174_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.209+0000 m31101| 2015-07-19T23:39:35.198+0000 I COMMAND [repl writer worker 1] CMD: drop db10.tmp.mrs.coll10_1437349174_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.212+0000 m31200| 2015-07-19T23:39:35.199+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.212+0000 m31100| 2015-07-19T23:39:35.199+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.212+0000 m31201| 2015-07-19T23:39:35.201+0000 I COMMAND [repl writer worker 1] CMD: drop db10.tmp.mrs.coll10_1437349174_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.214+0000 m31202| 2015-07-19T23:39:35.213+0000 I COMMAND [repl writer worker 11] CMD: drop db10.tmp.mrs.coll10_1437349174_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.215+0000 m31100| 2015-07-19T23:39:35.214+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.217+0000 m31200| 2015-07-19T23:39:35.217+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.218+0000 m31200| 2015-07-19T23:39:35.218+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.219+0000 m31200| 2015-07-19T23:39:35.219+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.220+0000 m31200| 2015-07-19T23:39:35.219+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.220+0000 m31200| 2015-07-19T23:39:35.220+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.224+0000 m31201| 2015-07-19T23:39:35.223+0000 I COMMAND [repl writer worker 10] CMD: drop map_reduce_merge_nonatomic0.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.224+0000 m31200| 2015-07-19T23:39:35.224+0000 I COMMAND [conn22] command map_reduce_merge_nonatomic0.coll10 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.224+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.224+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.224+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.225+0000 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.225+0000 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.227+0000 m31200| }, out: { merge: "coll10", db: "map_reduce_merge_nonatomic0", nonAtomic: true } }, inputDB: "db10", shardedOutputCollection: "tmp.mrs.coll10_1437349174_2", shards: { test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102: { waitedMS: 0, result: "tmp.mrs.coll10_1437349174_2", timeMillis: 500, counts: { input: 987, emit: 987, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1437349175000|39, electionId: ObjectId('55ac34f80000000000000000') } }, test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202: { waitedMS: 0, result: "tmp.mrs.coll10_1437349174_2", timeMillis: 474, counts: { input: 1013, emit: 1013, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1437349175000|60, electionId: ObjectId('55ac35040000000000000000') } } }, shardCounts: { test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102: { input: 987, emit: 987, reduce: 80, output: 20 }, test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202: { input: 1013, emit: 1013, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 58, w: 50, W: 2 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 5942, W: 7328 } }, Database: { acquireCount: { r: 3, w: 45, W: 6 } }, Collection: { acquireCount: { r: 3, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 122ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.227+0000 m31202| 2015-07-19T23:39:35.224+0000 I COMMAND [repl writer worker 9] CMD: drop map_reduce_merge_nonatomic0.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.228+0000 m31100| 2015-07-19T23:39:35.225+0000 I COMMAND [conn112] CMD: drop db10.tmp.mrs.coll10_1437349174_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.235+0000 m31200| 2015-07-19T23:39:35.234+0000 I COMMAND [conn132] CMD: drop db10.tmp.mrs.coll10_1437349174_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.238+0000 m31101| 2015-07-19T23:39:35.237+0000 I COMMAND [repl writer worker 15] CMD: drop db10.tmp.mrs.coll10_1437349174_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.239+0000 m31102| 2015-07-19T23:39:35.237+0000 I COMMAND [repl writer worker 13] CMD: drop db10.tmp.mrs.coll10_1437349174_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.243+0000 m31200| 2015-07-19T23:39:35.242+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.244+0000 m31200| 2015-07-19T23:39:35.244+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.245+0000 m31200| 2015-07-19T23:39:35.245+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.245+0000 m31200| 2015-07-19T23:39:35.245+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.246+0000 m31201| 2015-07-19T23:39:35.245+0000 I COMMAND [repl writer worker 0] CMD: drop db10.tmp.mrs.coll10_1437349174_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.246+0000 m31202| 2015-07-19T23:39:35.245+0000 I COMMAND [repl writer worker 1] CMD: drop db10.tmp.mrs.coll10_1437349174_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.247+0000 m31200| 2015-07-19T23:39:35.246+0000 I COMMAND [conn123] command map_reduce_merge_nonatomic1.coll10 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.247+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.247+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.247+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.247+0000 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.248+0000 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.249+0000 m31200| }, out: { merge: "coll10", db: "map_reduce_merge_nonatomic1", nonAtomic: true } }, inputDB: "db10", shardedOutputCollection: "tmp.mrs.coll10_1437349174_0", shards: { test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102: { waitedMS: 0, result: "tmp.mrs.coll10_1437349174_0", timeMillis: 460, counts: { input: 987, emit: 987, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1437349175000|1, electionId: ObjectId('55ac34f80000000000000000') } }, test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202: { waitedMS: 0, result: "tmp.mrs.coll10_1437349174_0", timeMillis: 492, counts: { input: 1013, emit: 1013, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1437349175000|65, electionId: ObjectId('55ac35040000000000000000') } } }, shardCounts: { test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102: { input: 987, emit: 987, reduce: 80, output: 20 }, test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202: { input: 1013, emit: 1013, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 58, w: 50, W: 2 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 26363, W: 9990 } }, Database: { acquireCount: { r: 3, w: 45, W: 6 } }, Collection: { acquireCount: { r: 3, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 161ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.249+0000 m31202| 2015-07-19T23:39:35.247+0000 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic1.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.249+0000 m31201| 2015-07-19T23:39:35.247+0000 I COMMAND [repl writer worker 11] CMD: drop map_reduce_merge_nonatomic1.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.249+0000 m31100| 2015-07-19T23:39:35.248+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.250+0000 m31100| 2015-07-19T23:39:35.249+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349174_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.259+0000 m31200| 2015-07-19T23:39:35.259+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349174_0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.262+0000 m31200| 2015-07-19T23:39:35.262+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.265+0000 m31200| 2015-07-19T23:39:35.263+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.266+0000 m31201| 2015-07-19T23:39:35.264+0000 I COMMAND [repl writer worker 9] CMD: drop map_reduce_merge_nonatomic3.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.266+0000 m31200| 2015-07-19T23:39:35.264+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.266+0000 m31200| 2015-07-19T23:39:35.264+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.266+0000 m31200| 2015-07-19T23:39:35.265+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.267+0000 m31200| 2015-07-19T23:39:35.265+0000 I COMMAND [conn117] command map_reduce_merge_nonatomic3.coll10 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.267+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.267+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.267+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.267+0000 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.267+0000 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.268+0000 m31200| }, out: { merge: "coll10", db: "map_reduce_merge_nonatomic3", nonAtomic: true } }, inputDB: "db10", shardedOutputCollection: "tmp.mrs.coll10_1437349174_1", shards: { test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102: { waitedMS: 0, result: "tmp.mrs.coll10_1437349174_1", timeMillis: 506, counts: { input: 987, emit: 987, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1437349175000|53, electionId: ObjectId('55ac34f80000000000000000') } }, test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202: { waitedMS: 0, result: "tmp.mrs.coll10_1437349174_1", timeMillis: 357, counts: { input: 1013, emit: 1013, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1437349174000|1062, electionId: ObjectId('55ac35040000000000000000') } } }, shardCounts: { test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102: { input: 987, emit: 987, reduce: 80, output: 20 }, test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202: { input: 1013, emit: 1013, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 58, w: 50, W: 2 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 16706, W: 11028 } }, Database: { acquireCount: { r: 3, w: 45, W: 6 } }, Collection: { acquireCount: { r: 3, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 168ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.269+0000 m31202| 2015-07-19T23:39:35.266+0000 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic3.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.269+0000 m31100| 2015-07-19T23:39:35.266+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349174_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.272+0000 m31200| 2015-07-19T23:39:35.272+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.277+0000 m31200| 2015-07-19T23:39:35.277+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349174_1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.277+0000 m31100| 2015-07-19T23:39:35.277+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.297+0000 m31200| 2015-07-19T23:39:35.296+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.301+0000 m31100| 2015-07-19T23:39:35.301+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.459+0000 m31200| 2015-07-19T23:39:35.458+0000 I COMMAND [conn122] CMD: drop db10.tmp.mrs.coll10_1437349175_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.460+0000 m31200| 2015-07-19T23:39:35.460+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.461+0000 m31200| 2015-07-19T23:39:35.460+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.462+0000 m31200| 2015-07-19T23:39:35.462+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.463+0000 m31200| 2015-07-19T23:39:35.463+0000 I COMMAND [conn122] command db10.tmp.mrs.coll10_1437349175_3 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.463+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.463+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.463+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.464+0000 m31200| values...., out: "tmp.mrs.coll10_1437349175_3", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:229 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2 }, timeAcquiringMicros: { r: 8396, w: 981 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 5, R: 7 }, timeAcquiringMicros: { w: 46702, R: 31931 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 263ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.481+0000 m31100| 2015-07-19T23:39:35.481+0000 I COMMAND [conn54] CMD: drop db10.tmp.mrs.coll10_1437349175_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.483+0000 m31100| 2015-07-19T23:39:35.482+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.483+0000 m31100| 2015-07-19T23:39:35.483+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.483+0000 m31100| 2015-07-19T23:39:35.483+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.497+0000 m31100| 2015-07-19T23:39:35.497+0000 I COMMAND [conn54] command db10.tmp.mrs.coll10_1437349175_3 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.497+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.498+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.498+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.498+0000 m31100| values...., out: "tmp.mrs.coll10_1437349175_3", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:229 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 4743 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 7, R: 9, W: 2 }, timeAcquiringMicros: { w: 64335, R: 38141, W: 13213 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 297ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.498+0000 m31200| 2015-07-19T23:39:35.498+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.513+0000 m31100| 2015-07-19T23:39:35.513+0000 I COMMAND [conn53] CMD: drop db10.tmp.mrs.coll10_1437349175_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.515+0000 m31100| 2015-07-19T23:39:35.514+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.516+0000 m31100| 2015-07-19T23:39:35.516+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.516+0000 m31100| 2015-07-19T23:39:35.516+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.517+0000 m31100| 2015-07-19T23:39:35.516+0000 I COMMAND [conn53] command db10.tmp.mrs.coll10_1437349175_4 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.517+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.517+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.517+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.517+0000 m31100| values...., out: "tmp.mrs.coll10_1437349175_4", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:229 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 1655, W: 5056 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 6, R: 10, W: 3 }, timeAcquiringMicros: { w: 48601, R: 50415, W: 16019 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 302ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.529+0000 m31200| 2015-07-19T23:39:35.529+0000 I COMMAND [conn103] CMD: drop db10.tmp.mrs.coll10_1437349175_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.533+0000 m31200| 2015-07-19T23:39:35.532+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.533+0000 m31200| 2015-07-19T23:39:35.533+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.554+0000 m31200| 2015-07-19T23:39:35.554+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.556+0000 m31200| 2015-07-19T23:39:35.556+0000 I COMMAND [conn103] command db10.tmp.mrs.coll10_1437349175_4 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.556+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.556+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.556+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.557+0000 m31200| values...., out: "tmp.mrs.coll10_1437349175_4", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:229 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3 }, timeAcquiringMicros: { r: 3671, w: 12842 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 4, R: 8, W: 7 }, timeAcquiringMicros: { w: 34762, R: 23656, W: 35050 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 340ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.557+0000 m31200| 2015-07-19T23:39:35.557+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.565+0000 m31100| 2015-07-19T23:39:35.565+0000 I COMMAND [conn58] CMD: drop db10.tmp.mrs.coll10_1437349175_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.567+0000 m31100| 2015-07-19T23:39:35.567+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.568+0000 m31100| 2015-07-19T23:39:35.568+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.568+0000 m31100| 2015-07-19T23:39:35.568+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.569+0000 m31100| 2015-07-19T23:39:35.569+0000 I COMMAND [conn58] command db10.tmp.mrs.coll10_1437349175_5 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.569+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.570+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.570+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.570+0000 m31100| values...., out: "tmp.mrs.coll10_1437349175_5", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:229 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2491 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 2, w: 5, R: 8, W: 5 }, timeAcquiringMicros: { r: 17342, w: 32090, R: 55323, W: 35145 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 329ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.574+0000 m31200| 2015-07-19T23:39:35.574+0000 I COMMAND [conn22] CMD: drop db10.tmp.mrs.coll10_1437349175_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.575+0000 m31200| 2015-07-19T23:39:35.575+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.578+0000 m31200| 2015-07-19T23:39:35.578+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.579+0000 m31200| 2015-07-19T23:39:35.579+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.581+0000 m31200| 2015-07-19T23:39:35.581+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.582+0000 m31200| 2015-07-19T23:39:35.581+0000 I COMMAND [conn22] command db10.tmp.mrs.coll10_1437349175_5 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.582+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.582+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.582+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.582+0000 m31200| values...., out: "tmp.mrs.coll10_1437349175_5", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:229 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 10994, W: 1187 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 2, w: 6, R: 8, W: 2 }, timeAcquiringMicros: { r: 13192, w: 19032, R: 52052, W: 10151 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 342ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.583+0000 m31200| 2015-07-19T23:39:35.582+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.583+0000 m31200| 2015-07-19T23:39:35.583+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.583+0000 m31200| 2015-07-19T23:39:35.583+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.584+0000 m31100| 2015-07-19T23:39:35.583+0000 I COMMAND [conn112] CMD: drop db10.tmp.mrs.coll10_1437349175_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.585+0000 m31200| 2015-07-19T23:39:35.584+0000 I COMMAND [conn132] CMD: drop db10.tmp.mrs.coll10_1437349175_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.590+0000 m31202| 2015-07-19T23:39:35.586+0000 I COMMAND [repl writer worker 4] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.590+0000 m31101| 2015-07-19T23:39:35.586+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349175_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.591+0000 m31102| 2015-07-19T23:39:35.587+0000 I COMMAND [repl writer worker 12] CMD: drop db10.tmp.mrs.coll10_1437349175_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.591+0000 m31201| 2015-07-19T23:39:35.589+0000 I COMMAND [repl writer worker 14] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.591+0000 m31202| 2015-07-19T23:39:35.590+0000 I COMMAND [repl writer worker 9] CMD: drop db10.tmp.mrs.coll10_1437349175_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.591+0000 m31201| 2015-07-19T23:39:35.591+0000 I COMMAND [repl writer worker 1] CMD: drop db10.tmp.mrs.coll10_1437349175_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.595+0000 m31200| 2015-07-19T23:39:35.595+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.595+0000 m31100| 2015-07-19T23:39:35.595+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.654+0000 m31200| 2015-07-19T23:39:35.654+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.656+0000 m31200| 2015-07-19T23:39:35.656+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.657+0000 m31200| 2015-07-19T23:39:35.657+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.658+0000 m31200| 2015-07-19T23:39:35.657+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.658+0000 m31200| 2015-07-19T23:39:35.658+0000 I COMMAND [conn103] command map_reduce_merge_nonatomic2.coll10 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.658+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.658+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.658+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.658+0000 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.658+0000 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.659+0000 m31200| }, out: { merge: "coll10", db: "map_reduce_merge_nonatomic2", nonAtomic: true } }, inputDB: "db10", shardedOutputCollection: "tmp.mrs.coll10_1437349175_4", shards: { test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102: { waitedMS: 0, result: "tmp.mrs.coll10_1437349175_4", timeMillis: 302, counts: { input: 987, emit: 987, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1437349175000|103, electionId: ObjectId('55ac34f80000000000000000') } }, test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202: { waitedMS: 0, result: "tmp.mrs.coll10_1437349175_4", timeMillis: 317, counts: { input: 1013, emit: 1013, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1437349175000|240, electionId: ObjectId('55ac35040000000000000000') } } }, shardCounts: { test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102: { input: 987, emit: 987, reduce: 80, output: 20 }, test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202: { input: 1013, emit: 1013, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.659+0000 m31100| 2015-07-19T23:39:35.658+0000 I COMMAND [conn112] CMD: drop db10.tmp.mrs.coll10_1437349175_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.660+0000 m31202| 2015-07-19T23:39:35.659+0000 I COMMAND [repl writer worker 2] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.660+0000 m31201| 2015-07-19T23:39:35.660+0000 I COMMAND [repl writer worker 11] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.661+0000 m31200| 2015-07-19T23:39:35.660+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.661+0000 m31200| 2015-07-19T23:39:35.660+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.661+0000 m31100| 2015-07-19T23:39:35.661+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349175_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.662+0000 m31201| 2015-07-19T23:39:35.662+0000 I COMMAND [repl writer worker 13] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.663+0000 m31202| 2015-07-19T23:39:35.662+0000 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.663+0000 m31200| 2015-07-19T23:39:35.663+0000 I COMMAND [conn123] CMD: drop db10.tmp.mrs.coll10_1437349175_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.665+0000 m31200| 2015-07-19T23:39:35.665+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.665+0000 m31200| 2015-07-19T23:39:35.665+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.665+0000 m31200| 2015-07-19T23:39:35.665+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.666+0000 m31200| 2015-07-19T23:39:35.665+0000 I COMMAND [conn123] command db10.tmp.mrs.coll10_1437349175_2 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.666+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.666+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.666+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.666+0000 m31200| values...., out: "tmp.mrs.coll10_1437349175_2", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:229 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 74 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 7, R: 8, W: 6 }, timeAcquiringMicros: { r: 2168, w: 68585, R: 34372, W: 41663 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 399ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.673+0000 m31200| 2015-07-19T23:39:35.673+0000 I COMMAND [conn132] CMD: drop db10.tmp.mrs.coll10_1437349175_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.673+0000 m31200| 2015-07-19T23:39:35.673+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349175_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.675+0000 m31102| 2015-07-19T23:39:35.675+0000 I COMMAND [repl writer worker 2] CMD: drop db10.tmp.mrs.coll10_1437349175_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.675+0000 m31101| 2015-07-19T23:39:35.675+0000 I COMMAND [repl writer worker 0] CMD: drop db10.tmp.mrs.coll10_1437349175_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.677+0000 m31102| 2015-07-19T23:39:35.676+0000 I COMMAND [repl writer worker 15] CMD: drop db10.tmp.mrs.coll10_1437349175_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.677+0000 m31101| 2015-07-19T23:39:35.676+0000 I COMMAND [repl writer worker 13] CMD: drop db10.tmp.mrs.coll10_1437349175_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.679+0000 m31201| 2015-07-19T23:39:35.678+0000 I COMMAND [repl writer worker 5] CMD: drop db10.tmp.mrs.coll10_1437349175_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.679+0000 m31202| 2015-07-19T23:39:35.679+0000 I COMMAND [repl writer worker 8] CMD: drop db10.tmp.mrs.coll10_1437349175_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.679+0000 m31201| 2015-07-19T23:39:35.679+0000 I COMMAND [repl writer worker 7] CMD: drop db10.tmp.mrs.coll10_1437349175_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.680+0000 m31100| 2015-07-19T23:39:35.680+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.680+0000 m31200| 2015-07-19T23:39:35.680+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.682+0000 m31100| 2015-07-19T23:39:35.681+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.682+0000 m31202| 2015-07-19T23:39:35.682+0000 I COMMAND [repl writer worker 12] CMD: drop db10.tmp.mrs.coll10_1437349175_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.694+0000 m31200| 2015-07-19T23:39:35.691+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.744+0000 m31200| 2015-07-19T23:39:35.743+0000 I COMMAND [conn117] CMD: drop db10.tmp.mrs.coll10_1437349175_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.745+0000 m31200| 2015-07-19T23:39:35.745+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.746+0000 m31200| 2015-07-19T23:39:35.745+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.746+0000 m31200| 2015-07-19T23:39:35.745+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.753+0000 m31200| 2015-07-19T23:39:35.753+0000 I COMMAND [conn117] command db10.tmp.mrs.coll10_1437349175_3 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.753+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.753+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.753+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.754+0000 m31200| values...., out: "tmp.mrs.coll10_1437349175_3", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:229 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 3518, w: 2028, W: 19 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 3, w: 12, R: 7, W: 5 }, timeAcquiringMicros: { r: 24525, w: 61631, R: 17875, W: 52803 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 459ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.779+0000 m31100| 2015-07-19T23:39:35.779+0000 I COMMAND [conn23] CMD: drop db10.tmp.mrs.coll10_1437349175_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.784+0000 m31100| 2015-07-19T23:39:35.784+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.784+0000 m31100| 2015-07-19T23:39:35.784+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.785+0000 m31100| 2015-07-19T23:39:35.785+0000 I COMMAND [conn119] CMD: drop db10.tmp.mrs.coll10_1437349175_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.786+0000 m31100| 2015-07-19T23:39:35.786+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.787+0000 m31100| 2015-07-19T23:39:35.787+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.788+0000 m31100| 2015-07-19T23:39:35.788+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.789+0000 m31100| 2015-07-19T23:39:35.789+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.790+0000 m31100| 2015-07-19T23:39:35.789+0000 I COMMAND [conn119] command db10.tmp.mrs.coll10_1437349175_3 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.790+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.790+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.790+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.791+0000 m31100| values...., out: "tmp.mrs.coll10_1437349175_3", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:229 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 9394, W: 170 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 21, R: 7, W: 9 }, timeAcquiringMicros: { r: 2286, w: 148578, R: 34383, W: 46615 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 495ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.791+0000 m31100| 2015-07-19T23:39:35.789+0000 I COMMAND [conn23] command db10.tmp.mrs.coll10_1437349175_2 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.791+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.792+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.794+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.794+0000 m31100| values...., out: "tmp.mrs.coll10_1437349175_2", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:229 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 2319, w: 2414, W: 1887 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 2, w: 21, R: 8, W: 9 }, timeAcquiringMicros: { r: 3895, w: 146281, R: 50169, W: 51533 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 523ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.795+0000 m31200| 2015-07-19T23:39:35.791+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.795+0000 m31200| 2015-07-19T23:39:35.792+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.841+0000 m31100| 2015-07-19T23:39:35.840+0000 I COMMAND [conn54] CMD: drop db10.tmp.mrs.coll10_1437349175_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.843+0000 m31100| 2015-07-19T23:39:35.842+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.843+0000 m31100| 2015-07-19T23:39:35.842+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.844+0000 m31100| 2015-07-19T23:39:35.844+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.844+0000 m31100| 2015-07-19T23:39:35.844+0000 I COMMAND [conn54] command db10.tmp.mrs.coll10_1437349175_6 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.844+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.845+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.845+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.845+0000 m31100| values...., out: "tmp.mrs.coll10_1437349175_6", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:229 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 7677, W: 165 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 8, R: 5 }, timeAcquiringMicros: { r: 319, w: 63352, R: 12852 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 249ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.847+0000 m31200| 2015-07-19T23:39:35.847+0000 I COMMAND [conn122] CMD: drop db10.tmp.mrs.coll10_1437349175_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.849+0000 m31200| 2015-07-19T23:39:35.849+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.849+0000 m31200| 2015-07-19T23:39:35.849+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.850+0000 m31200| 2015-07-19T23:39:35.850+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.850+0000 m31200| 2015-07-19T23:39:35.850+0000 I COMMAND [conn122] command db10.tmp.mrs.coll10_1437349175_6 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.850+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.850+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.851+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.851+0000 m31200| values...., out: "tmp.mrs.coll10_1437349175_6", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:229 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 30266 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 4, R: 9 }, timeAcquiringMicros: { w: 32341, R: 24842 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 255ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.852+0000 m31200| 2015-07-19T23:39:35.851+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.863+0000 m31200| 2015-07-19T23:39:35.863+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.866+0000 m31200| 2015-07-19T23:39:35.865+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.868+0000 m31200| 2015-07-19T23:39:35.868+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.869+0000 m31200| 2015-07-19T23:39:35.869+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.870+0000 m31100| 2015-07-19T23:39:35.869+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349175_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.870+0000 m31202| 2015-07-19T23:39:35.870+0000 I COMMAND [repl writer worker 10] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.870+0000 m31201| 2015-07-19T23:39:35.870+0000 I COMMAND [repl writer worker 9] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.871+0000 m31200| 2015-07-19T23:39:35.870+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349175_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.873+0000 m31102| 2015-07-19T23:39:35.872+0000 I COMMAND [repl writer worker 11] CMD: drop db10.tmp.mrs.coll10_1437349175_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.873+0000 m31101| 2015-07-19T23:39:35.872+0000 I COMMAND [repl writer worker 5] CMD: drop db10.tmp.mrs.coll10_1437349175_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.878+0000 m31201| 2015-07-19T23:39:35.877+0000 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.878+0000 m31100| 2015-07-19T23:39:35.877+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.878+0000 m31202| 2015-07-19T23:39:35.877+0000 I COMMAND [repl writer worker 9] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.879+0000 m31200| 2015-07-19T23:39:35.879+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.881+0000 m31201| 2015-07-19T23:39:35.880+0000 I COMMAND [repl writer worker 12] CMD: drop db10.tmp.mrs.coll10_1437349175_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.881+0000 m31202| 2015-07-19T23:39:35.881+0000 I COMMAND [repl writer worker 15] CMD: drop db10.tmp.mrs.coll10_1437349175_2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.883+0000 m31200| 2015-07-19T23:39:35.883+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.883+0000 m31200| 2015-07-19T23:39:35.883+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.884+0000 m31100| 2015-07-19T23:39:35.883+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349175_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.886+0000 m31200| 2015-07-19T23:39:35.885+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349175_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.888+0000 m31102| 2015-07-19T23:39:35.888+0000 I COMMAND [repl writer worker 12] CMD: drop db10.tmp.mrs.coll10_1437349175_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.888+0000 m31101| 2015-07-19T23:39:35.888+0000 I COMMAND [repl writer worker 2] CMD: drop db10.tmp.mrs.coll10_1437349175_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.897+0000 m31201| 2015-07-19T23:39:35.897+0000 I COMMAND [repl writer worker 1] CMD: drop db10.tmp.mrs.coll10_1437349175_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.899+0000 m31202| 2015-07-19T23:39:35.896+0000 I COMMAND [repl writer worker 14] CMD: drop db10.tmp.mrs.coll10_1437349175_3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.899+0000 m31100| 2015-07-19T23:39:35.897+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.904+0000 m31200| 2015-07-19T23:39:35.904+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.918+0000 m31200| 2015-07-19T23:39:35.918+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.921+0000 m31200| 2015-07-19T23:39:35.921+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.921+0000 m31200| 2015-07-19T23:39:35.921+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.922+0000 m31100| 2015-07-19T23:39:35.921+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349175_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.922+0000 m31200| 2015-07-19T23:39:35.922+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349175_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.926+0000 m31101| 2015-07-19T23:39:35.926+0000 I COMMAND [repl writer worker 0] CMD: drop db10.tmp.mrs.coll10_1437349175_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.930+0000 m31102| 2015-07-19T23:39:35.930+0000 I COMMAND [repl writer worker 15] CMD: drop db10.tmp.mrs.coll10_1437349175_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.931+0000 m31201| 2015-07-19T23:39:35.930+0000 I COMMAND [repl writer worker 5] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.931+0000 m31202| 2015-07-19T23:39:35.930+0000 I COMMAND [repl writer worker 13] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.932+0000 m31100| 2015-07-19T23:39:35.932+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.933+0000 m31202| 2015-07-19T23:39:35.933+0000 I COMMAND [repl writer worker 5] CMD: drop db10.tmp.mrs.coll10_1437349175_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.933+0000 m31201| 2015-07-19T23:39:35.933+0000 I COMMAND [repl writer worker 3] CMD: drop db10.tmp.mrs.coll10_1437349175_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:35.948+0000 m31200| 2015-07-19T23:39:35.947+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.034+0000 m31100| 2015-07-19T23:39:36.033+0000 I COMMAND [conn58] CMD: drop db10.tmp.mrs.coll10_1437349175_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.035+0000 m31100| 2015-07-19T23:39:36.035+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.036+0000 m31100| 2015-07-19T23:39:36.035+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.036+0000 m31100| 2015-07-19T23:39:36.036+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.051+0000 m31100| 2015-07-19T23:39:36.051+0000 I COMMAND [conn58] command db10.tmp.mrs.coll10_1437349175_7 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.051+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.051+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.052+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.052+0000 m31100| values...., out: "tmp.mrs.coll10_1437349175_7", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:229 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 4662 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 11, R: 11, W: 7 }, timeAcquiringMicros: { w: 56261, R: 37010, W: 22551 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 372ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.058+0000 m31100| 2015-07-19T23:39:36.058+0000 I COMMAND [conn53] CMD: drop db10.tmp.mrs.coll10_1437349175_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.060+0000 m31100| 2015-07-19T23:39:36.060+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.060+0000 m31100| 2015-07-19T23:39:36.060+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.061+0000 m31100| 2015-07-19T23:39:36.061+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.078+0000 m31100| 2015-07-19T23:39:36.077+0000 I COMMAND [conn53] command db10.tmp.mrs.coll10_1437349175_8 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.078+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.078+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.078+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.079+0000 m31100| values...., out: "tmp.mrs.coll10_1437349175_8", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:229 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 3, W: 1 }, timeAcquiringMicros: { r: 15428, W: 7479 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 3, w: 10, R: 11, W: 7 }, timeAcquiringMicros: { r: 10710, w: 65754, R: 50766, W: 17364 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 398ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.109+0000 m31100| 2015-07-19T23:39:36.109+0000 I COMMAND [conn119] CMD: drop db10.tmp.mrs.coll10_1437349175_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.111+0000 m31100| 2015-07-19T23:39:36.111+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.112+0000 m31100| 2015-07-19T23:39:36.112+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.112+0000 m31100| 2015-07-19T23:39:36.112+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.113+0000 m31100| 2015-07-19T23:39:36.112+0000 I COMMAND [conn119] command db10.tmp.mrs.coll10_1437349175_5 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.113+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.113+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.113+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.113+0000 m31100| values...., out: "tmp.mrs.coll10_1437349175_5", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:229 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 3851, W: 4370 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 4, R: 8, W: 2 }, timeAcquiringMicros: { w: 16680, R: 30169, W: 11895 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 215ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.119+0000 m31100| 2015-07-19T23:39:36.119+0000 I COMMAND [conn23] CMD: drop db10.tmp.mrs.coll10_1437349175_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.121+0000 m31100| 2015-07-19T23:39:36.120+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.121+0000 m31100| 2015-07-19T23:39:36.121+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.121+0000 m31100| 2015-07-19T23:39:36.121+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.122+0000 m31100| 2015-07-19T23:39:36.122+0000 I COMMAND [conn23] command db10.tmp.mrs.coll10_1437349175_4 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.122+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.122+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.122+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.122+0000 m31100| values...., out: "tmp.mrs.coll10_1437349175_4", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:229 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 5383 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 1, R: 7 }, timeAcquiringMicros: { w: 1052, R: 13807 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 244ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.131+0000 m31200| 2015-07-19T23:39:36.131+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 135ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.131+0000 m31200| 2015-07-19T23:39:36.131+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:588 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 135ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.139+0000 m31100| 2015-07-19T23:39:36.138+0000 I COMMAND [conn54] CMD: drop db10.tmp.mrs.coll10_1437349175_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.142+0000 m31200| 2015-07-19T23:39:36.142+0000 I COMMAND [conn103] CMD: drop db10.tmp.mrs.coll10_1437349175_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.145+0000 m31200| 2015-07-19T23:39:36.145+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.145+0000 m31200| 2015-07-19T23:39:36.145+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.146+0000 m31200| 2015-07-19T23:39:36.146+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.148+0000 m31200| 2015-07-19T23:39:36.147+0000 I COMMAND [conn103] command db10.tmp.mrs.coll10_1437349175_7 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.148+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.148+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.148+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.149+0000 m31200| values...., out: "tmp.mrs.coll10_1437349175_7", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:229 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 3, W: 1 }, timeAcquiringMicros: { r: 10539, W: 82 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { w: 20, R: 7, W: 7 }, timeAcquiringMicros: { w: 150931, R: 18398, W: 20964 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 468ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.149+0000 m31200| 2015-07-19T23:39:36.149+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_27 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.150+0000 m31100| 2015-07-19T23:39:36.150+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.151+0000 m31100| 2015-07-19T23:39:36.151+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.154+0000 m31200| 2015-07-19T23:39:36.154+0000 I COMMAND [conn22] CMD: drop db10.tmp.mrs.coll10_1437349175_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.155+0000 m31100| 2015-07-19T23:39:36.155+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.155+0000 m31200| 2015-07-19T23:39:36.155+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.156+0000 m31200| 2015-07-19T23:39:36.155+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.156+0000 m31100| 2015-07-19T23:39:36.156+0000 I COMMAND [conn54] command db10.tmp.mrs.coll10_1437349175_9 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.156+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.156+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.156+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.157+0000 m31100| values...., out: "tmp.mrs.coll10_1437349175_9", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:229 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 5054 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { R: 8, W: 2 }, timeAcquiringMicros: { R: 28199, W: 15369 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 223ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.159+0000 m31200| 2015-07-19T23:39:36.157+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.161+0000 m31200| 2015-07-19T23:39:36.158+0000 I COMMAND [conn22] command db10.tmp.mrs.coll10_1437349175_8 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.161+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.161+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.161+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.165+0000 m31200| values...., out: "tmp.mrs.coll10_1437349175_8", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:229 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 3, W: 1 }, timeAcquiringMicros: { r: 7043, W: 4377 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 2, w: 18, R: 10, W: 6 }, timeAcquiringMicros: { r: 1127, w: 142732, R: 67278, W: 23039 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 477ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.165+0000 m31200| 2015-07-19T23:39:36.158+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_28 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.196+0000 m31200| 2015-07-19T23:39:36.196+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_27 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.201+0000 m31200| 2015-07-19T23:39:36.201+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_27 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.201+0000 m31200| 2015-07-19T23:39:36.201+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_27 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.202+0000 m31202| 2015-07-19T23:39:36.202+0000 I COMMAND [repl writer worker 2] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_27 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.202+0000 m31100| 2015-07-19T23:39:36.202+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349175_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.203+0000 m31200| 2015-07-19T23:39:36.203+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349175_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.203+0000 m31201| 2015-07-19T23:39:36.203+0000 I COMMAND [repl writer worker 7] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_27 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.204+0000 m31102| 2015-07-19T23:39:36.203+0000 I COMMAND [repl writer worker 15] CMD: drop db10.tmp.mrs.coll10_1437349175_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.205+0000 m31101| 2015-07-19T23:39:36.205+0000 I COMMAND [repl writer worker 13] CMD: drop db10.tmp.mrs.coll10_1437349175_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.209+0000 m31200| 2015-07-19T23:39:36.208+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_28 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.211+0000 m31200| 2015-07-19T23:39:36.210+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_28 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.211+0000 m31200| 2015-07-19T23:39:36.211+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_28 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.211+0000 m31100| 2015-07-19T23:39:36.211+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349175_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.212+0000 m31202| 2015-07-19T23:39:36.212+0000 I COMMAND [repl writer worker 5] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_28 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.213+0000 m31201| 2015-07-19T23:39:36.212+0000 I COMMAND [repl writer worker 11] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_28 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.213+0000 m31200| 2015-07-19T23:39:36.213+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349175_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.214+0000 m31100| 2015-07-19T23:39:36.213+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.214+0000 m31201| 2015-07-19T23:39:36.213+0000 I COMMAND [repl writer worker 9] CMD: drop db10.tmp.mrs.coll10_1437349175_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.220+0000 m31202| 2015-07-19T23:39:36.214+0000 I COMMAND [repl writer worker 12] CMD: drop db10.tmp.mrs.coll10_1437349175_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.220+0000 m31102| 2015-07-19T23:39:36.215+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349175_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.220+0000 m31101| 2015-07-19T23:39:36.216+0000 I COMMAND [repl writer worker 2] CMD: drop db10.tmp.mrs.coll10_1437349175_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.220+0000 m31201| 2015-07-19T23:39:36.216+0000 I COMMAND [repl writer worker 14] CMD: drop db10.tmp.mrs.coll10_1437349175_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.222+0000 m31200| 2015-07-19T23:39:36.218+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_29 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.223+0000 m31202| 2015-07-19T23:39:36.218+0000 I COMMAND [repl writer worker 3] CMD: drop db10.tmp.mrs.coll10_1437349175_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.228+0000 m31100| 2015-07-19T23:39:36.228+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.236+0000 m31200| 2015-07-19T23:39:36.236+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_30 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.290+0000 m31200| 2015-07-19T23:39:36.290+0000 I COMMAND [conn117] CMD: drop db10.tmp.mrs.coll10_1437349175_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.294+0000 m31200| 2015-07-19T23:39:36.294+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.295+0000 m31200| 2015-07-19T23:39:36.294+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.295+0000 m31200| 2015-07-19T23:39:36.295+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.302+0000 m31200| 2015-07-19T23:39:36.301+0000 I COMMAND [conn117] command db10.tmp.mrs.coll10_1437349175_5 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.302+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.302+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.302+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.303+0000 m31200| values...., out: "tmp.mrs.coll10_1437349175_5", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:229 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 20459 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 7, w: 14, R: 10, W: 7 }, timeAcquiringMicros: { r: 16697, w: 66402, R: 60902, W: 44042 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 403ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.303+0000 m31200| 2015-07-19T23:39:36.301+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_31 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.339+0000 m31200| 2015-07-19T23:39:36.339+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_31 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.341+0000 m31200| 2015-07-19T23:39:36.341+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_31 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.341+0000 m31200| 2015-07-19T23:39:36.341+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_31 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.342+0000 m31100| 2015-07-19T23:39:36.341+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349175_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.342+0000 m31201| 2015-07-19T23:39:36.342+0000 I COMMAND [repl writer worker 13] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_31 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.343+0000 m31200| 2015-07-19T23:39:36.343+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349175_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.343+0000 m31202| 2015-07-19T23:39:36.343+0000 I COMMAND [repl writer worker 5] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_31 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.345+0000 m31101| 2015-07-19T23:39:36.345+0000 I COMMAND [repl writer worker 8] CMD: drop db10.tmp.mrs.coll10_1437349175_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.345+0000 m31102| 2015-07-19T23:39:36.345+0000 I COMMAND [repl writer worker 0] CMD: drop db10.tmp.mrs.coll10_1437349175_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.349+0000 m31202| 2015-07-19T23:39:36.349+0000 I COMMAND [repl writer worker 4] CMD: drop db10.tmp.mrs.coll10_1437349175_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.363+0000 m31201| 2015-07-19T23:39:36.362+0000 I COMMAND [repl writer worker 3] CMD: drop db10.tmp.mrs.coll10_1437349175_5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.375+0000 m31200| 2015-07-19T23:39:36.375+0000 I COMMAND [conn123] CMD: drop db10.tmp.mrs.coll10_1437349175_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.382+0000 m31100| 2015-07-19T23:39:36.382+0000 I COMMAND [conn53] CMD: drop db10.tmp.mrs.coll10_1437349176_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.388+0000 m31200| 2015-07-19T23:39:36.388+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.389+0000 m31200| 2015-07-19T23:39:36.388+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.389+0000 m31100| 2015-07-19T23:39:36.389+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.390+0000 m31100| 2015-07-19T23:39:36.390+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.390+0000 m31100| 2015-07-19T23:39:36.390+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.399+0000 m31200| 2015-07-19T23:39:36.399+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.401+0000 m31200| 2015-07-19T23:39:36.401+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_32 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.403+0000 m31100| 2015-07-19T23:39:36.403+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.405+0000 m31200| 2015-07-19T23:39:36.405+0000 I COMMAND [conn123] command db10.tmp.mrs.coll10_1437349175_4 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.406+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.406+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.406+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.406+0000 m31200| values...., out: "tmp.mrs.coll10_1437349175_4", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:229 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 12, W: 1 }, timeAcquiringMicros: { r: 11203, w: 6448, W: 47 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 3, w: 19, R: 11, W: 6 }, timeAcquiringMicros: { r: 5166, w: 120097, R: 37267, W: 33489 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 526ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.407+0000 m31200| 2015-07-19T23:39:36.406+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_33 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.411+0000 m31200| 2015-07-19T23:39:36.410+0000 I COMMAND [conn122] CMD: drop db10.tmp.mrs.coll10_1437349175_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.412+0000 m31100| 2015-07-19T23:39:36.412+0000 I COMMAND [conn53] command db10.tmp.mrs.coll10_1437349176_11 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.412+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.412+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.413+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.413+0000 m31100| values...., out: "tmp.mrs.coll10_1437349176_11", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:230 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 1, W: 6 }, timeAcquiringMicros: { w: 12061, W: 25497 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 184ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.414+0000 m31200| 2015-07-19T23:39:36.413+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.414+0000 m31200| 2015-07-19T23:39:36.414+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.415+0000 m31200| 2015-07-19T23:39:36.415+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.431+0000 m31200| 2015-07-19T23:39:36.431+0000 I COMMAND [conn122] command db10.tmp.mrs.coll10_1437349175_9 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.431+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.431+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.431+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.432+0000 m31200| values...., out: "tmp.mrs.coll10_1437349175_9", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:229 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 13, W: 1 }, timeAcquiringMicros: { r: 13981, w: 17216, W: 3011 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 6, w: 17, R: 11, W: 7 }, timeAcquiringMicros: { r: 15295, w: 108683, R: 67484, W: 48554 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 497ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.432+0000 m31200| 2015-07-19T23:39:36.431+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_34 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.454+0000 m31200| 2015-07-19T23:39:36.454+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_33 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.465+0000 m31100| 2015-07-19T23:39:36.465+0000 I COMMAND [conn58] CMD: drop db10.tmp.mrs.coll10_1437349176_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.468+0000 m31100| 2015-07-19T23:39:36.468+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.468+0000 m31100| 2015-07-19T23:39:36.468+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.470+0000 m31100| 2015-07-19T23:39:36.470+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.471+0000 m31100| 2015-07-19T23:39:36.470+0000 I COMMAND [conn58] command db10.tmp.mrs.coll10_1437349176_10 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.471+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.471+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.471+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.471+0000 m31100| values...., out: "tmp.mrs.coll10_1437349176_10", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:230 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 3562 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 2, R: 3 }, timeAcquiringMicros: { r: 464, w: 23043, R: 11898 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 258ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.474+0000 m31200| 2015-07-19T23:39:36.474+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_33 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.475+0000 m31200| 2015-07-19T23:39:36.475+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_33 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.476+0000 m31100| 2015-07-19T23:39:36.476+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349175_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.476+0000 m31201| 2015-07-19T23:39:36.476+0000 I COMMAND [repl writer worker 15] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_33 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.477+0000 m31202| 2015-07-19T23:39:36.477+0000 I COMMAND [repl writer worker 15] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_33 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.478+0000 m31200| 2015-07-19T23:39:36.478+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349175_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.480+0000 m31101| 2015-07-19T23:39:36.479+0000 I COMMAND [repl writer worker 0] CMD: drop db10.tmp.mrs.coll10_1437349175_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.480+0000 m31102| 2015-07-19T23:39:36.479+0000 I COMMAND [repl writer worker 13] CMD: drop db10.tmp.mrs.coll10_1437349175_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.484+0000 m31200| 2015-07-19T23:39:36.484+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_34 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.484+0000 m31200| 2015-07-19T23:39:36.484+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_34 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.485+0000 m31200| 2015-07-19T23:39:36.485+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_34 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.486+0000 m31100| 2015-07-19T23:39:36.485+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349175_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.486+0000 m31201| 2015-07-19T23:39:36.485+0000 I COMMAND [repl writer worker 14] CMD: drop db10.tmp.mrs.coll10_1437349175_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.487+0000 m31202| 2015-07-19T23:39:36.486+0000 I COMMAND [repl writer worker 14] CMD: drop db10.tmp.mrs.coll10_1437349175_4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.487+0000 m31201| 2015-07-19T23:39:36.487+0000 I COMMAND [repl writer worker 3] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_34 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.488+0000 m31202| 2015-07-19T23:39:36.488+0000 I COMMAND [repl writer worker 3] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_34 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.488+0000 m31100| 2015-07-19T23:39:36.488+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.488+0000 m31200| 2015-07-19T23:39:36.488+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349175_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.489+0000 m31102| 2015-07-19T23:39:36.488+0000 I COMMAND [repl writer worker 12] CMD: drop db10.tmp.mrs.coll10_1437349175_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.489+0000 m31101| 2015-07-19T23:39:36.489+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349175_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.493+0000 m31200| 2015-07-19T23:39:36.492+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_35 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.494+0000 m31202| 2015-07-19T23:39:36.493+0000 I COMMAND [repl writer worker 2] CMD: drop db10.tmp.mrs.coll10_1437349175_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.494+0000 m31201| 2015-07-19T23:39:36.494+0000 I COMMAND [repl writer worker 1] CMD: drop db10.tmp.mrs.coll10_1437349175_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.505+0000 m31100| 2015-07-19T23:39:36.504+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.512+0000 m31200| 2015-07-19T23:39:36.512+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_36 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.620+0000 m31200| 2015-07-19T23:39:36.619+0000 I COMMAND [conn103] CMD: drop db10.tmp.mrs.coll10_1437349176_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.622+0000 m31200| 2015-07-19T23:39:36.621+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_29 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.622+0000 m31200| 2015-07-19T23:39:36.621+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_29 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.622+0000 m31200| 2015-07-19T23:39:36.622+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_29 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.639+0000 m31200| 2015-07-19T23:39:36.638+0000 I COMMAND [conn103] command db10.tmp.mrs.coll10_1437349176_10 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.639+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.639+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.639+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.640+0000 m31200| values...., out: "tmp.mrs.coll10_1437349176_10", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 4, W: 1 }, timeAcquiringMicros: { r: 12504, W: 17899 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 6, w: 15, R: 11, W: 10 }, timeAcquiringMicros: { r: 25483, w: 90269, R: 34327, W: 18480 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 421ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.640+0000 m31200| 2015-07-19T23:39:36.639+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_37 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.669+0000 m31100| 2015-07-19T23:39:36.669+0000 I COMMAND [conn23] CMD: drop db10.tmp.mrs.coll10_1437349176_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.671+0000 m31100| 2015-07-19T23:39:36.671+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.671+0000 m31100| 2015-07-19T23:39:36.671+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.674+0000 m31100| 2015-07-19T23:39:36.673+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.674+0000 m31100| 2015-07-19T23:39:36.673+0000 I COMMAND [conn23] command db10.tmp.mrs.coll10_1437349176_7 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.674+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.675+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.675+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.675+0000 m31100| values...., out: "tmp.mrs.coll10_1437349176_7", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:229 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 9878 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 1, R: 5 }, timeAcquiringMicros: { r: 1344, w: 18737, R: 15646 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 187ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.695+0000 m31100| 2015-07-19T23:39:36.695+0000 I COMMAND [conn119] CMD: drop db10.tmp.mrs.coll10_1437349176_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.696+0000 m31100| 2015-07-19T23:39:36.696+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.697+0000 m31100| 2015-07-19T23:39:36.696+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.700+0000 m31100| 2015-07-19T23:39:36.700+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.701+0000 m31100| 2015-07-19T23:39:36.701+0000 I COMMAND [conn119] command db10.tmp.mrs.coll10_1437349176_6 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.701+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.701+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.702+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.702+0000 m31100| values...., out: "tmp.mrs.coll10_1437349176_6", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:229 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 1791, w: 6474, W: 5697 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 4, R: 7, W: 1 }, timeAcquiringMicros: { r: 43, w: 37320, R: 26684, W: 295 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 312ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.702+0000 m31200| 2015-07-19T23:39:36.701+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_37 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.703+0000 m31200| 2015-07-19T23:39:36.703+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_37 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.703+0000 m31200| 2015-07-19T23:39:36.703+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_37 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.704+0000 m31100| 2015-07-19T23:39:36.704+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349176_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.705+0000 m31202| 2015-07-19T23:39:36.704+0000 I COMMAND [repl writer worker 10] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_37 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.705+0000 m31200| 2015-07-19T23:39:36.705+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349176_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.707+0000 m31101| 2015-07-19T23:39:36.707+0000 I COMMAND [repl writer worker 12] CMD: drop db10.tmp.mrs.coll10_1437349176_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.707+0000 m31102| 2015-07-19T23:39:36.707+0000 I COMMAND [repl writer worker 9] CMD: drop db10.tmp.mrs.coll10_1437349176_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.709+0000 m31201| 2015-07-19T23:39:36.708+0000 I COMMAND [repl writer worker 5] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_37 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.712+0000 m31202| 2015-07-19T23:39:36.711+0000 I COMMAND [repl writer worker 14] CMD: drop db10.tmp.mrs.coll10_1437349176_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.713+0000 m31201| 2015-07-19T23:39:36.712+0000 I COMMAND [repl writer worker 14] CMD: drop db10.tmp.mrs.coll10_1437349176_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.716+0000 m31100| 2015-07-19T23:39:36.716+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.717+0000 m31200| 2015-07-19T23:39:36.717+0000 I COMMAND [conn22] CMD: drop db10.tmp.mrs.coll10_1437349176_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.719+0000 m31200| 2015-07-19T23:39:36.718+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_30 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.720+0000 m31200| 2015-07-19T23:39:36.719+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_30 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.721+0000 m31200| 2015-07-19T23:39:36.719+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_30 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.721+0000 m31200| 2015-07-19T23:39:36.719+0000 I COMMAND [conn22] command db10.tmp.mrs.coll10_1437349176_11 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.721+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.721+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.721+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.722+0000 m31200| values...., out: "tmp.mrs.coll10_1437349176_11", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:230 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1 }, timeAcquiringMicros: { r: 9693, w: 19855 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 2, w: 21, R: 12, W: 7 }, timeAcquiringMicros: { r: 2147, w: 165192, R: 57344, W: 8519 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 491ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.722+0000 m31200| 2015-07-19T23:39:36.720+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_39 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.735+0000 m31200| 2015-07-19T23:39:36.735+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_38 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.748+0000 m31200| 2015-07-19T23:39:36.748+0000 I COMMAND [conn117] CMD: drop db10.tmp.mrs.coll10_1437349176_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.749+0000 m31200| 2015-07-19T23:39:36.749+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_32 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.749+0000 m31200| 2015-07-19T23:39:36.749+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_32 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.751+0000 m31200| 2015-07-19T23:39:36.751+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_32 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.756+0000 m31200| 2015-07-19T23:39:36.755+0000 I COMMAND [conn117] command db10.tmp.mrs.coll10_1437349176_6 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.756+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.756+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.756+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.756+0000 m31200| values...., out: "tmp.mrs.coll10_1437349176_6", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:229 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3, W: 1 }, timeAcquiringMicros: { r: 1130, w: 8940, W: 23963 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 2, w: 8, R: 10, W: 4 }, timeAcquiringMicros: { r: 7157, w: 30623, R: 43586, W: 21721 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 367ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.757+0000 m31200| 2015-07-19T23:39:36.756+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_40 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.785+0000 m31200| 2015-07-19T23:39:36.784+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_39 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.786+0000 m31200| 2015-07-19T23:39:36.786+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_39 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.787+0000 m31200| 2015-07-19T23:39:36.786+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_39 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.787+0000 m31100| 2015-07-19T23:39:36.787+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349176_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.788+0000 m31202| 2015-07-19T23:39:36.788+0000 I COMMAND [repl writer worker 4] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_39 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.789+0000 m31201| 2015-07-19T23:39:36.788+0000 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_39 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.794+0000 m31200| 2015-07-19T23:39:36.794+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349176_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.796+0000 m31101| 2015-07-19T23:39:36.796+0000 I COMMAND [repl writer worker 4] CMD: drop db10.tmp.mrs.coll10_1437349176_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.797+0000 m31102| 2015-07-19T23:39:36.796+0000 I COMMAND [repl writer worker 3] CMD: drop db10.tmp.mrs.coll10_1437349176_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.799+0000 m31202| 2015-07-19T23:39:36.798+0000 I COMMAND [repl writer worker 1] CMD: drop db10.tmp.mrs.coll10_1437349176_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.799+0000 m31201| 2015-07-19T23:39:36.799+0000 I COMMAND [repl writer worker 7] CMD: drop db10.tmp.mrs.coll10_1437349176_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.800+0000 m31200| 2015-07-19T23:39:36.800+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_41 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.806+0000 m31200| 2015-07-19T23:39:36.805+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_40 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.806+0000 m31200| 2015-07-19T23:39:36.806+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_40 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.807+0000 m31200| 2015-07-19T23:39:36.807+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_40 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.807+0000 m31100| 2015-07-19T23:39:36.807+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349176_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.809+0000 m31202| 2015-07-19T23:39:36.808+0000 I COMMAND [repl writer worker 13] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_40 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.809+0000 m31201| 2015-07-19T23:39:36.808+0000 I COMMAND [repl writer worker 9] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_40 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.815+0000 m31100| 2015-07-19T23:39:36.815+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.818+0000 m31200| 2015-07-19T23:39:36.817+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349176_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.819+0000 m31101| 2015-07-19T23:39:36.818+0000 I COMMAND [repl writer worker 9] CMD: drop db10.tmp.mrs.coll10_1437349176_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.819+0000 m31102| 2015-07-19T23:39:36.819+0000 I COMMAND [repl writer worker 15] CMD: drop db10.tmp.mrs.coll10_1437349176_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.829+0000 m31202| 2015-07-19T23:39:36.829+0000 I COMMAND [repl writer worker 8] CMD: drop db10.tmp.mrs.coll10_1437349176_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.829+0000 m31201| 2015-07-19T23:39:36.829+0000 I COMMAND [repl writer worker 12] CMD: drop db10.tmp.mrs.coll10_1437349176_6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.832+0000 m31100| 2015-07-19T23:39:36.832+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.835+0000 m31200| 2015-07-19T23:39:36.835+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_42 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.855+0000 m31200| 2015-07-19T23:39:36.855+0000 I COMMAND [conn123] CMD: drop db10.tmp.mrs.coll10_1437349176_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.858+0000 m31200| 2015-07-19T23:39:36.858+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_35 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.858+0000 m31200| 2015-07-19T23:39:36.858+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_35 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.858+0000 m31200| 2015-07-19T23:39:36.858+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_35 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.865+0000 m31200| 2015-07-19T23:39:36.864+0000 I COMMAND [conn123] command db10.tmp.mrs.coll10_1437349176_7 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.865+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.865+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.865+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.865+0000 m31200| values...., out: "tmp.mrs.coll10_1437349176_7", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:229 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 1753, w: 15922, W: 45 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 8, w: 23, R: 13, W: 7 }, timeAcquiringMicros: { r: 21370, w: 86062, R: 34925, W: 18674 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 375ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.866+0000 m31200| 2015-07-19T23:39:36.866+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_43 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.914+0000 m31200| 2015-07-19T23:39:36.913+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_43 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.916+0000 m31100| 2015-07-19T23:39:36.916+0000 I COMMAND [conn54] CMD: drop db10.tmp.mrs.coll10_1437349176_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.918+0000 m31100| 2015-07-19T23:39:36.918+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.918+0000 m31100| 2015-07-19T23:39:36.918+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.922+0000 m31100| 2015-07-19T23:39:36.921+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.922+0000 m31100| 2015-07-19T23:39:36.921+0000 I COMMAND [conn54] command db10.tmp.mrs.coll10_1437349176_12 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.922+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.922+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.922+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.923+0000 m31100| values...., out: "tmp.mrs.coll10_1437349176_12", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 15283, W: 27 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 19, R: 3, W: 5 }, timeAcquiringMicros: { w: 143472, R: 15207, W: 45840 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 417ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.926+0000 m31200| 2015-07-19T23:39:36.925+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_43 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.926+0000 m31200| 2015-07-19T23:39:36.926+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_43 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.926+0000 m31100| 2015-07-19T23:39:36.926+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349176_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.927+0000 m31200| 2015-07-19T23:39:36.926+0000 I COMMAND [conn122] CMD: drop db10.tmp.mrs.coll10_1437349176_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.928+0000 m31200| 2015-07-19T23:39:36.928+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_36 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.928+0000 m31200| 2015-07-19T23:39:36.928+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_36 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.929+0000 m31202| 2015-07-19T23:39:36.929+0000 I COMMAND [repl writer worker 5] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_43 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.929+0000 m31201| 2015-07-19T23:39:36.929+0000 I COMMAND [repl writer worker 14] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_43 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.929+0000 m31200| 2015-07-19T23:39:36.929+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_36 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.930+0000 m31200| 2015-07-19T23:39:36.929+0000 I COMMAND [conn122] command db10.tmp.mrs.coll10_1437349176_12 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.930+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.932+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.933+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.933+0000 m31200| values...., out: "tmp.mrs.coll10_1437349176_12", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:230 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1, W: 1 }, timeAcquiringMicros: { r: 10960, w: 3080, W: 20 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 6, w: 21, R: 14, W: 5 }, timeAcquiringMicros: { r: 15338, w: 106522, R: 28641, W: 23856 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 423ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.933+0000 m31200| 2015-07-19T23:39:36.930+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_44 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.944+0000 m31200| 2015-07-19T23:39:36.943+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349176_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.945+0000 m31101| 2015-07-19T23:39:36.945+0000 I COMMAND [repl writer worker 14] CMD: drop db10.tmp.mrs.coll10_1437349176_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.947+0000 m31102| 2015-07-19T23:39:36.946+0000 I COMMAND [repl writer worker 7] CMD: drop db10.tmp.mrs.coll10_1437349176_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.958+0000 m31201| 2015-07-19T23:39:36.957+0000 I COMMAND [repl writer worker 2] CMD: drop db10.tmp.mrs.coll10_1437349176_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.958+0000 m31202| 2015-07-19T23:39:36.957+0000 I COMMAND [repl writer worker 5] CMD: drop db10.tmp.mrs.coll10_1437349176_7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.958+0000 m31100| 2015-07-19T23:39:36.957+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:36.958+0000 m31200| 2015-07-19T23:39:36.957+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_45 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.005+0000 m31200| 2015-07-19T23:39:37.005+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_44 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.015+0000 m31200| 2015-07-19T23:39:37.015+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_44 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.016+0000 m31200| 2015-07-19T23:39:37.015+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_44 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.016+0000 m31100| 2015-07-19T23:39:37.016+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349176_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.030+0000 m31200| 2015-07-19T23:39:37.029+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349176_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.030+0000 m31102| 2015-07-19T23:39:37.030+0000 I COMMAND [repl writer worker 1] CMD: drop db10.tmp.mrs.coll10_1437349176_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.031+0000 m31101| 2015-07-19T23:39:37.030+0000 I COMMAND [repl writer worker 0] CMD: drop db10.tmp.mrs.coll10_1437349176_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.036+0000 m31201| 2015-07-19T23:39:37.036+0000 I COMMAND [repl writer worker 1] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_44 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.036+0000 m31202| 2015-07-19T23:39:37.036+0000 I COMMAND [repl writer worker 3] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_44 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.038+0000 m31201| 2015-07-19T23:39:37.038+0000 I COMMAND [repl writer worker 12] CMD: drop db10.tmp.mrs.coll10_1437349176_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.038+0000 m31202| 2015-07-19T23:39:37.038+0000 I COMMAND [repl writer worker 14] CMD: drop db10.tmp.mrs.coll10_1437349176_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.050+0000 m31200| 2015-07-19T23:39:37.050+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_46 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.052+0000 m31100| 2015-07-19T23:39:37.051+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.101+0000 m31200| 2015-07-19T23:39:37.101+0000 I COMMAND [conn103] CMD: drop db10.tmp.mrs.coll10_1437349176_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.107+0000 m31200| 2015-07-19T23:39:37.107+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_38 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.107+0000 m31200| 2015-07-19T23:39:37.107+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_38 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.107+0000 m31200| 2015-07-19T23:39:37.107+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_38 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.119+0000 m31200| 2015-07-19T23:39:37.119+0000 I COMMAND [conn103] command db10.tmp.mrs.coll10_1437349176_13 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.119+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.120+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.120+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.120+0000 m31200| values...., out: "tmp.mrs.coll10_1437349176_13", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:230 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 5489, w: 13899, W: 300 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { w: 12, R: 10, W: 7 }, timeAcquiringMicros: { w: 62092, R: 21574, W: 20859 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 401ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.126+0000 m31100| 2015-07-19T23:39:37.126+0000 I COMMAND [conn58] CMD: drop db10.tmp.mrs.coll10_1437349176_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.128+0000 m31100| 2015-07-19T23:39:37.127+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.128+0000 m31100| 2015-07-19T23:39:37.127+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.128+0000 m31100| 2015-07-19T23:39:37.128+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.130+0000 m31100| 2015-07-19T23:39:37.130+0000 I COMMAND [conn58] command db10.tmp.mrs.coll10_1437349176_13 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.130+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.130+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.130+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.131+0000 m31100| values...., out: "tmp.mrs.coll10_1437349176_13", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 424 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 18, R: 8, W: 2 }, timeAcquiringMicros: { w: 127595, R: 49998, W: 113 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 414ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.131+0000 m31200| 2015-07-19T23:39:37.131+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_47 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.133+0000 m31100| 2015-07-19T23:39:37.133+0000 I COMMAND [conn53] CMD: drop db10.tmp.mrs.coll10_1437349176_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.135+0000 m31100| 2015-07-19T23:39:37.135+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.135+0000 m31100| 2015-07-19T23:39:37.135+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.135+0000 m31100| 2015-07-19T23:39:37.135+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.136+0000 m31100| 2015-07-19T23:39:37.136+0000 I COMMAND [conn53] command db10.tmp.mrs.coll10_1437349176_14 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.136+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.136+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.136+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.137+0000 m31100| values...., out: "tmp.mrs.coll10_1437349176_14", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:230 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 2525, W: 1564 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 3, w: 7, R: 12, W: 7 }, timeAcquiringMicros: { r: 371, w: 35200, R: 65468, W: 20625 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 336ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.137+0000 m31100| 2015-07-19T23:39:37.137+0000 I COMMAND [conn119] CMD: drop db10.tmp.mrs.coll10_1437349176_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.140+0000 m31100| 2015-07-19T23:39:37.140+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.140+0000 m31100| 2015-07-19T23:39:37.140+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.141+0000 m31100| 2015-07-19T23:39:37.141+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.142+0000 m31100| 2015-07-19T23:39:37.141+0000 I COMMAND [conn119] command db10.tmp.mrs.coll10_1437349176_8 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.142+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.142+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.142+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.142+0000 m31100| values...., out: "tmp.mrs.coll10_1437349176_8", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:229 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1 }, timeAcquiringMicros: { r: 12301, w: 2213 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 2, w: 5, R: 10, W: 6 }, timeAcquiringMicros: { r: 2844, w: 9737, R: 63788, W: 7603 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 312ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.143+0000 m31200| 2015-07-19T23:39:37.142+0000 I COMMAND [conn22] CMD: drop db10.tmp.mrs.coll10_1437349176_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.147+0000 m31200| 2015-07-19T23:39:37.146+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_41 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.147+0000 m31200| 2015-07-19T23:39:37.146+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_41 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.148+0000 m31200| 2015-07-19T23:39:37.148+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_41 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.148+0000 m31200| 2015-07-19T23:39:37.148+0000 I COMMAND [conn22] command db10.tmp.mrs.coll10_1437349176_14 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.148+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.149+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.150+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.151+0000 m31200| values...., out: "tmp.mrs.coll10_1437349176_14", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:230 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 6680, W: 3889 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 2, w: 12, R: 12, W: 7 }, timeAcquiringMicros: { r: 6334, w: 79927, R: 36677, W: 1161 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 348ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.151+0000 m31200| 2015-07-19T23:39:37.148+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_48 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.184+0000 m31200| 2015-07-19T23:39:37.184+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_47 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.187+0000 m31200| 2015-07-19T23:39:37.187+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_47 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.187+0000 m31200| 2015-07-19T23:39:37.187+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_47 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.188+0000 m31202| 2015-07-19T23:39:37.188+0000 I COMMAND [repl writer worker 15] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_47 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.188+0000 m31100| 2015-07-19T23:39:37.188+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349176_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.189+0000 m31201| 2015-07-19T23:39:37.189+0000 I COMMAND [repl writer worker 1] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_47 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.191+0000 m31200| 2015-07-19T23:39:37.191+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349176_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.192+0000 m31101| 2015-07-19T23:39:37.192+0000 I COMMAND [repl writer worker 4] CMD: drop db10.tmp.mrs.coll10_1437349176_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.193+0000 m31102| 2015-07-19T23:39:37.193+0000 I COMMAND [repl writer worker 6] CMD: drop db10.tmp.mrs.coll10_1437349176_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.202+0000 m31100| 2015-07-19T23:39:37.201+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.202+0000 m31200| 2015-07-19T23:39:37.202+0000 I COMMAND [conn117] CMD: drop db10.tmp.mrs.coll10_1437349176_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.204+0000 m31200| 2015-07-19T23:39:37.203+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_42 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.204+0000 m31200| 2015-07-19T23:39:37.203+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_42 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.204+0000 m31200| 2015-07-19T23:39:37.204+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_42 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.205+0000 m31202| 2015-07-19T23:39:37.205+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349176_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.205+0000 m31201| 2015-07-19T23:39:37.205+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349176_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.205+0000 m31200| 2015-07-19T23:39:37.205+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_49 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.216+0000 m31200| 2015-07-19T23:39:37.215+0000 I COMMAND [conn117] command db10.tmp.mrs.coll10_1437349176_8 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.216+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.216+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.216+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.217+0000 m31200| values...., out: "tmp.mrs.coll10_1437349176_8", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:229 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 3033, w: 8331, W: 25 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 2, w: 9, R: 10, W: 7 }, timeAcquiringMicros: { r: 1130, w: 60269, R: 36333, W: 22935 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 386ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.217+0000 m31200| 2015-07-19T23:39:37.216+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_50 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.218+0000 m31200| 2015-07-19T23:39:37.218+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_48 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.225+0000 m31200| 2015-07-19T23:39:37.224+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_48 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.227+0000 m31200| 2015-07-19T23:39:37.227+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_48 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.228+0000 m31100| 2015-07-19T23:39:37.228+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349176_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.229+0000 m31201| 2015-07-19T23:39:37.229+0000 I COMMAND [repl writer worker 6] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_48 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.231+0000 m31202| 2015-07-19T23:39:37.231+0000 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_48 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.236+0000 m31200| 2015-07-19T23:39:37.236+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349176_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.238+0000 m31201| 2015-07-19T23:39:37.238+0000 I COMMAND [repl writer worker 5] CMD: drop db10.tmp.mrs.coll10_1437349176_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.238+0000 m31202| 2015-07-19T23:39:37.238+0000 I COMMAND [repl writer worker 0] CMD: drop db10.tmp.mrs.coll10_1437349176_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.239+0000 m31102| 2015-07-19T23:39:37.239+0000 I COMMAND [repl writer worker 7] CMD: drop db10.tmp.mrs.coll10_1437349176_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.239+0000 m31101| 2015-07-19T23:39:37.239+0000 I COMMAND [repl writer worker 3] CMD: drop db10.tmp.mrs.coll10_1437349176_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.252+0000 m31100| 2015-07-19T23:39:37.252+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_27 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.254+0000 m31200| 2015-07-19T23:39:37.253+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_51 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.272+0000 m31200| 2015-07-19T23:39:37.272+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_50 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.275+0000 m31200| 2015-07-19T23:39:37.274+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_50 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.275+0000 m31200| 2015-07-19T23:39:37.275+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_50 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.276+0000 m31100| 2015-07-19T23:39:37.275+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349176_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.276+0000 m31201| 2015-07-19T23:39:37.276+0000 I COMMAND [repl writer worker 2] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_50 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.280+0000 m31202| 2015-07-19T23:39:37.280+0000 I COMMAND [repl writer worker 7] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_50 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.286+0000 m31200| 2015-07-19T23:39:37.286+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349176_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.288+0000 m31102| 2015-07-19T23:39:37.288+0000 I COMMAND [repl writer worker 14] CMD: drop db10.tmp.mrs.coll10_1437349176_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.289+0000 m31101| 2015-07-19T23:39:37.288+0000 I COMMAND [repl writer worker 9] CMD: drop db10.tmp.mrs.coll10_1437349176_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.303+0000 m31202| 2015-07-19T23:39:37.302+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349176_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.303+0000 m31201| 2015-07-19T23:39:37.302+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349176_8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.309+0000 m31100| 2015-07-19T23:39:37.308+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_28 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.312+0000 m31200| 2015-07-19T23:39:37.311+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_52 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.383+0000 m31100| 2015-07-19T23:39:37.382+0000 I COMMAND [conn23] CMD: drop db10.tmp.mrs.coll10_1437349176_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.388+0000 m31100| 2015-07-19T23:39:37.388+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.388+0000 m31100| 2015-07-19T23:39:37.388+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.389+0000 m31200| 2015-07-19T23:39:37.389+0000 I COMMAND [conn123] CMD: drop db10.tmp.mrs.coll10_1437349176_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.390+0000 m31100| 2015-07-19T23:39:37.390+0000 I COMMAND [conn54] CMD: drop db10.tmp.mrs.coll10_1437349177_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.393+0000 m31100| 2015-07-19T23:39:37.393+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.393+0000 m31100| 2015-07-19T23:39:37.393+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.394+0000 m31200| 2015-07-19T23:39:37.394+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_45 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.394+0000 m31200| 2015-07-19T23:39:37.394+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_45 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.395+0000 m31100| 2015-07-19T23:39:37.394+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.395+0000 m31100| 2015-07-19T23:39:37.395+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.395+0000 m31100| 2015-07-19T23:39:37.395+0000 I COMMAND [conn54] command db10.tmp.mrs.coll10_1437349177_15 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.396+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.396+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.396+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.396+0000 m31100| values...., out: "tmp.mrs.coll10_1437349177_15", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:230 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 7248 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { w: 5, R: 9, W: 5 }, timeAcquiringMicros: { w: 33109, R: 33393, W: 12279 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 358ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.397+0000 m31200| 2015-07-19T23:39:37.395+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_45 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.397+0000 m31100| 2015-07-19T23:39:37.395+0000 I COMMAND [conn23] command db10.tmp.mrs.coll10_1437349176_9 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.397+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.397+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.398+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.398+0000 m31100| values...., out: "tmp.mrs.coll10_1437349176_9", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:229 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 3103, W: 1722 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 20, R: 8, W: 5 }, timeAcquiringMicros: { w: 112848, R: 38766, W: 15932 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 437ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.398+0000 m31200| 2015-07-19T23:39:37.396+0000 I COMMAND [conn123] command db10.tmp.mrs.coll10_1437349176_9 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.398+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.398+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.399+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.404+0000 m31200| values...., out: "tmp.mrs.coll10_1437349176_9", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:229 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 15715, W: 3646 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 16, R: 9, W: 5 }, timeAcquiringMicros: { w: 123677, R: 32186, W: 23158 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 438ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.404+0000 m31200| 2015-07-19T23:39:37.397+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_53 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.404+0000 m31200| 2015-07-19T23:39:37.397+0000 I COMMAND [conn122] CMD: drop db10.tmp.mrs.coll10_1437349177_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.405+0000 m31200| 2015-07-19T23:39:37.398+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_46 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.407+0000 m31200| 2015-07-19T23:39:37.399+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_46 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.408+0000 m31200| 2015-07-19T23:39:37.404+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_46 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.435+0000 m31200| 2015-07-19T23:39:37.435+0000 I COMMAND [conn122] command db10.tmp.mrs.coll10_1437349177_15 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.435+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.436+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.436+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.436+0000 m31200| values...., out: "tmp.mrs.coll10_1437349177_15", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:230 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 3, W: 1 }, timeAcquiringMicros: { r: 12271, W: 50 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 2, w: 7, R: 14, W: 7 }, timeAcquiringMicros: { r: 1106, w: 38934, R: 56724, W: 45332 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 397ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.437+0000 m31200| 2015-07-19T23:39:37.437+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_54 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.477+0000 m31100| 2015-07-19T23:39:37.477+0000 I COMMAND [conn58] CMD: drop db10.tmp.mrs.coll10_1437349177_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.479+0000 m31100| 2015-07-19T23:39:37.479+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.479+0000 m31100| 2015-07-19T23:39:37.479+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.480+0000 m31100| 2015-07-19T23:39:37.480+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.481+0000 m31100| 2015-07-19T23:39:37.480+0000 I COMMAND [conn58] command db10.tmp.mrs.coll10_1437349177_16 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.481+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.481+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.481+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.481+0000 m31100| values...., out: "tmp.mrs.coll10_1437349177_16", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2112 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 4, R: 7, W: 2 }, timeAcquiringMicros: { w: 38413, R: 30427, W: 8331 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 279ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.482+0000 m31200| 2015-07-19T23:39:37.481+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_54 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.483+0000 m31200| 2015-07-19T23:39:37.483+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_53 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.489+0000 m31200| 2015-07-19T23:39:37.489+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_54 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.489+0000 m31200| 2015-07-19T23:39:37.489+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_54 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.490+0000 m31100| 2015-07-19T23:39:37.489+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349177_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.490+0000 m31201| 2015-07-19T23:39:37.490+0000 I COMMAND [repl writer worker 1] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_54 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.491+0000 m31202| 2015-07-19T23:39:37.490+0000 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_54 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.493+0000 m31201| 2015-07-19T23:39:37.493+0000 I COMMAND [repl writer worker 7] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_53 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.493+0000 m31202| 2015-07-19T23:39:37.493+0000 I COMMAND [repl writer worker 11] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_53 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.495+0000 m31200| 2015-07-19T23:39:37.495+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349177_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.496+0000 m31200| 2015-07-19T23:39:37.496+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_53 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.496+0000 m31200| 2015-07-19T23:39:37.496+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_53 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.496+0000 m31100| 2015-07-19T23:39:37.496+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349176_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.499+0000 m31101| 2015-07-19T23:39:37.497+0000 I COMMAND [repl writer worker 12] CMD: drop db10.tmp.mrs.coll10_1437349177_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.499+0000 m31102| 2015-07-19T23:39:37.497+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349177_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.503+0000 m31202| 2015-07-19T23:39:37.502+0000 I COMMAND [repl writer worker 2] CMD: drop db10.tmp.mrs.coll10_1437349177_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.503+0000 m31201| 2015-07-19T23:39:37.503+0000 I COMMAND [repl writer worker 5] CMD: drop db10.tmp.mrs.coll10_1437349177_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.505+0000 m31200| 2015-07-19T23:39:37.505+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_55 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.510+0000 m31200| 2015-07-19T23:39:37.510+0000 I COMMAND [conn103] CMD: drop db10.tmp.mrs.coll10_1437349177_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.511+0000 m31100| 2015-07-19T23:39:37.510+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_29 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.511+0000 m31200| 2015-07-19T23:39:37.510+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349176_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.512+0000 m31200| 2015-07-19T23:39:37.512+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_49 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.513+0000 m31102| 2015-07-19T23:39:37.512+0000 I COMMAND [repl writer worker 8] CMD: drop db10.tmp.mrs.coll10_1437349176_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.514+0000 m31101| 2015-07-19T23:39:37.513+0000 I COMMAND [repl writer worker 7] CMD: drop db10.tmp.mrs.coll10_1437349176_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.516+0000 m31200| 2015-07-19T23:39:37.516+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_49 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.516+0000 m31200| 2015-07-19T23:39:37.516+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_49 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.517+0000 m31200| 2015-07-19T23:39:37.517+0000 I COMMAND [conn103] command db10.tmp.mrs.coll10_1437349177_16 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.517+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.517+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.517+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.518+0000 m31200| values...., out: "tmp.mrs.coll10_1437349177_16", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 1547, W: 6110 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 3, R: 8, W: 6 }, timeAcquiringMicros: { r: 901, w: 14390, R: 22444, W: 24933 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 314ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.518+0000 m31200| 2015-07-19T23:39:37.518+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_57 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.519+0000 m31202| 2015-07-19T23:39:37.519+0000 I COMMAND [repl writer worker 4] CMD: drop db10.tmp.mrs.coll10_1437349176_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.520+0000 m31201| 2015-07-19T23:39:37.520+0000 I COMMAND [repl writer worker 7] CMD: drop db10.tmp.mrs.coll10_1437349176_9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.526+0000 m31100| 2015-07-19T23:39:37.525+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_30 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.529+0000 m31200| 2015-07-19T23:39:37.529+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_56 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.573+0000 m31200| 2015-07-19T23:39:37.573+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_57 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.576+0000 m31200| 2015-07-19T23:39:37.576+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_57 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.576+0000 m31200| 2015-07-19T23:39:37.576+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_57 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.577+0000 m31100| 2015-07-19T23:39:37.577+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349177_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.582+0000 m31200| 2015-07-19T23:39:37.582+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349177_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.588+0000 m31102| 2015-07-19T23:39:37.587+0000 I COMMAND [repl writer worker 11] CMD: drop db10.tmp.mrs.coll10_1437349177_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.589+0000 m31101| 2015-07-19T23:39:37.589+0000 I COMMAND [repl writer worker 11] CMD: drop db10.tmp.mrs.coll10_1437349177_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.594+0000 m31201| 2015-07-19T23:39:37.594+0000 I COMMAND [repl writer worker 11] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_57 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.594+0000 m31202| 2015-07-19T23:39:37.594+0000 I COMMAND [repl writer worker 15] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_57 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.597+0000 m31100| 2015-07-19T23:39:37.597+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_31 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.598+0000 m31200| 2015-07-19T23:39:37.598+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_58 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.601+0000 m31201| 2015-07-19T23:39:37.601+0000 I COMMAND [repl writer worker 6] CMD: drop db10.tmp.mrs.coll10_1437349177_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.602+0000 m31202| 2015-07-19T23:39:37.602+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349177_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.727+0000 m31100| 2015-07-19T23:39:37.727+0000 I COMMAND [conn119] CMD: drop db10.tmp.mrs.coll10_1437349177_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.729+0000 m31100| 2015-07-19T23:39:37.729+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_28 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.730+0000 m31100| 2015-07-19T23:39:37.729+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_28 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.730+0000 m31100| 2015-07-19T23:39:37.730+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_28 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.731+0000 m31100| 2015-07-19T23:39:37.731+0000 I COMMAND [conn53] CMD: drop db10.tmp.mrs.coll10_1437349177_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.735+0000 m31100| 2015-07-19T23:39:37.735+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_27 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.735+0000 m31100| 2015-07-19T23:39:37.735+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_27 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.736+0000 m31100| 2015-07-19T23:39:37.735+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_27 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.736+0000 m31100| 2015-07-19T23:39:37.736+0000 I COMMAND [conn119] command db10.tmp.mrs.coll10_1437349177_10 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.736+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.737+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.737+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.737+0000 m31100| values...., out: "tmp.mrs.coll10_1437349177_10", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:230 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 4383, W: 43 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { w: 11, R: 12, W: 7 }, timeAcquiringMicros: { w: 89317, R: 57675, W: 9415 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 434ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.741+0000 m31100| 2015-07-19T23:39:37.740+0000 I COMMAND [conn53] command db10.tmp.mrs.coll10_1437349177_17 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.741+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.741+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.742+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.742+0000 m31100| values...., out: "tmp.mrs.coll10_1437349177_17", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:230 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 8988, w: 2916, W: 65 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 15, R: 9, W: 7 }, timeAcquiringMicros: { w: 103215, R: 45939, W: 19310 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 488ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.759+0000 m31200| 2015-07-19T23:39:37.758+0000 I COMMAND [conn117] CMD: drop db10.tmp.mrs.coll10_1437349177_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.760+0000 m31200| 2015-07-19T23:39:37.760+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_52 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.760+0000 m31200| 2015-07-19T23:39:37.760+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_52 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.761+0000 m31200| 2015-07-19T23:39:37.761+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_52 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.762+0000 m31200| 2015-07-19T23:39:37.761+0000 I COMMAND [conn117] command db10.tmp.mrs.coll10_1437349177_10 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.762+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.762+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.762+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.762+0000 m31200| values...., out: "tmp.mrs.coll10_1437349177_10", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:230 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 15431 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 1, w: 16, R: 9, W: 7 }, timeAcquiringMicros: { r: 9549, w: 123007, R: 47663, W: 16021 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 459ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.763+0000 m31200| 2015-07-19T23:39:37.762+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_59 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.763+0000 m31200| 2015-07-19T23:39:37.763+0000 I COMMAND [conn22] CMD: drop db10.tmp.mrs.coll10_1437349177_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.767+0000 m31200| 2015-07-19T23:39:37.766+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_51 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.767+0000 m31200| 2015-07-19T23:39:37.766+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_51 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.769+0000 m31200| 2015-07-19T23:39:37.769+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_51 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.770+0000 m31200| 2015-07-19T23:39:37.770+0000 I COMMAND [conn22] command db10.tmp.mrs.coll10_1437349177_17 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.770+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.770+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.770+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.771+0000 m31200| values...., out: "tmp.mrs.coll10_1437349177_17", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:230 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2, W: 1 }, timeAcquiringMicros: { r: 6959, w: 8449, W: 1733 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 2, w: 17, R: 11, W: 7 }, timeAcquiringMicros: { r: 416, w: 112861, R: 49812, W: 43802 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 516ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.772+0000 m31200| 2015-07-19T23:39:37.771+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_60 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.774+0000 m31100| 2015-07-19T23:39:37.773+0000 I COMMAND [conn54] CMD: drop db10.tmp.mrs.coll10_1437349177_18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.777+0000 m31100| 2015-07-19T23:39:37.777+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_29 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.778+0000 m31100| 2015-07-19T23:39:37.777+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_29 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.779+0000 m31100| 2015-07-19T23:39:37.778+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_29 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.782+0000 m31100| 2015-07-19T23:39:37.782+0000 I COMMAND [conn54] command db10.tmp.mrs.coll10_1437349177_18 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.782+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.782+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.783+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.783+0000 m31100| values...., out: "tmp.mrs.coll10_1437349177_18", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:230 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 7341, W: 2074 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 4, w: 11, R: 9, W: 7 }, timeAcquiringMicros: { r: 12031, w: 58216, R: 25039, W: 33605 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 278ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.799+0000 m31100| 2015-07-19T23:39:37.798+0000 I COMMAND [conn23] CMD: drop db10.tmp.mrs.coll10_1437349177_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.804+0000 m31100| 2015-07-19T23:39:37.803+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_30 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.805+0000 m31100| 2015-07-19T23:39:37.805+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_30 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.809+0000 m31100| 2015-07-19T23:39:37.809+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_30 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.810+0000 m31100| 2015-07-19T23:39:37.810+0000 I COMMAND [conn23] command db10.tmp.mrs.coll10_1437349177_11 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.810+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.810+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.810+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.811+0000 m31100| values...., out: "tmp.mrs.coll10_1437349177_11", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 8860 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 2, w: 5, R: 10, W: 5 }, timeAcquiringMicros: { r: 9526, w: 31163, R: 23252, W: 26070 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 294ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.835+0000 m31100| 2015-07-19T23:39:37.835+0000 I COMMAND [conn58] CMD: drop db10.tmp.mrs.coll10_1437349177_19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.838+0000 m31100| 2015-07-19T23:39:37.838+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_31 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.844+0000 m31100| 2015-07-19T23:39:37.844+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_31 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.845+0000 m31100| 2015-07-19T23:39:37.845+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_31 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.845+0000 m31100| 2015-07-19T23:39:37.845+0000 I COMMAND [conn58] command db10.tmp.mrs.coll10_1437349177_19 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.846+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.846+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.846+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.846+0000 m31100| values...., out: "tmp.mrs.coll10_1437349177_19", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 9811 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { R: 9, W: 2 }, timeAcquiringMicros: { R: 24742, W: 10653 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 248ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.846+0000 m31200| 2015-07-19T23:39:37.846+0000 I COMMAND [conn122] CMD: drop db10.tmp.mrs.coll10_1437349177_18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.848+0000 m31200| 2015-07-19T23:39:37.847+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_55 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.848+0000 m31200| 2015-07-19T23:39:37.847+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_55 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.849+0000 m31200| 2015-07-19T23:39:37.849+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_59 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.850+0000 m31200| 2015-07-19T23:39:37.850+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_55 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.850+0000 m31200| 2015-07-19T23:39:37.850+0000 I COMMAND [conn122] command db10.tmp.mrs.coll10_1437349177_18 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.850+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.850+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.850+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.851+0000 m31200| values...., out: "tmp.mrs.coll10_1437349177_18", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:230 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 26373, W: 50 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { w: 11, R: 14, W: 5 }, timeAcquiringMicros: { w: 69944, R: 23269, W: 14087 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 345ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.851+0000 m31200| 2015-07-19T23:39:37.850+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_61 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.852+0000 m31200| 2015-07-19T23:39:37.852+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_60 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.855+0000 m31200| 2015-07-19T23:39:37.855+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_60 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.855+0000 m31200| 2015-07-19T23:39:37.855+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_60 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.855+0000 m31100| 2015-07-19T23:39:37.855+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349177_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.856+0000 m31200| 2015-07-19T23:39:37.855+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_59 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.856+0000 m31200| 2015-07-19T23:39:37.856+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_59 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.856+0000 m31100| 2015-07-19T23:39:37.856+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349177_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.859+0000 m31200| 2015-07-19T23:39:37.858+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349177_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.860+0000 m31200| 2015-07-19T23:39:37.859+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349177_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.861+0000 m31102| 2015-07-19T23:39:37.860+0000 I COMMAND [repl writer worker 9] CMD: drop db10.tmp.mrs.coll10_1437349177_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.861+0000 m31101| 2015-07-19T23:39:37.860+0000 I COMMAND [repl writer worker 3] CMD: drop db10.tmp.mrs.coll10_1437349177_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.862+0000 m31102| 2015-07-19T23:39:37.861+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349177_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.862+0000 m31101| 2015-07-19T23:39:37.861+0000 I COMMAND [repl writer worker 11] CMD: drop db10.tmp.mrs.coll10_1437349177_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.863+0000 m31202| 2015-07-19T23:39:37.863+0000 I COMMAND [repl writer worker 14] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_59 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.864+0000 m31201| 2015-07-19T23:39:37.864+0000 I COMMAND [repl writer worker 11] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_59 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.864+0000 m31202| 2015-07-19T23:39:37.864+0000 I COMMAND [repl writer worker 6] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_60 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.865+0000 m31201| 2015-07-19T23:39:37.865+0000 I COMMAND [repl writer worker 13] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_60 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.866+0000 m31202| 2015-07-19T23:39:37.866+0000 I COMMAND [repl writer worker 11] CMD: drop db10.tmp.mrs.coll10_1437349177_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.868+0000 m31201| 2015-07-19T23:39:37.867+0000 I COMMAND [repl writer worker 7] CMD: drop db10.tmp.mrs.coll10_1437349177_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.871+0000 m31201| 2015-07-19T23:39:37.871+0000 I COMMAND [repl writer worker 12] CMD: drop db10.tmp.mrs.coll10_1437349177_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.872+0000 m31100| 2015-07-19T23:39:37.872+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_32 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.872+0000 m31100| 2015-07-19T23:39:37.872+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_33 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.873+0000 m31202| 2015-07-19T23:39:37.873+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349177_10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.875+0000 m31200| 2015-07-19T23:39:37.875+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_62 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.876+0000 m31200| 2015-07-19T23:39:37.876+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_63 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.907+0000 m31200| 2015-07-19T23:39:37.907+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_61 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.915+0000 m31200| 2015-07-19T23:39:37.915+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_61 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.918+0000 m31201| 2015-07-19T23:39:37.917+0000 I COMMAND [repl writer worker 11] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_61 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.918+0000 m31200| 2015-07-19T23:39:37.918+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_61 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.919+0000 m31202| 2015-07-19T23:39:37.919+0000 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_61 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.920+0000 m31100| 2015-07-19T23:39:37.920+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349177_18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.921+0000 m31200| 2015-07-19T23:39:37.920+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349177_18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.923+0000 m31102| 2015-07-19T23:39:37.923+0000 I COMMAND [repl writer worker 11] CMD: drop db10.tmp.mrs.coll10_1437349177_18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.924+0000 m31101| 2015-07-19T23:39:37.923+0000 I COMMAND [repl writer worker 8] CMD: drop db10.tmp.mrs.coll10_1437349177_18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.941+0000 m31100| 2015-07-19T23:39:37.940+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_34 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.941+0000 m31201| 2015-07-19T23:39:37.941+0000 I COMMAND [repl writer worker 0] CMD: drop db10.tmp.mrs.coll10_1437349177_18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.942+0000 m31202| 2015-07-19T23:39:37.941+0000 I COMMAND [repl writer worker 2] CMD: drop db10.tmp.mrs.coll10_1437349177_18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.950+0000 m31200| 2015-07-19T23:39:37.950+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_64 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.961+0000 m31200| 2015-07-19T23:39:37.961+0000 I COMMAND [conn123] CMD: drop db10.tmp.mrs.coll10_1437349177_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.966+0000 m31200| 2015-07-19T23:39:37.966+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_56 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.966+0000 m31200| 2015-07-19T23:39:37.966+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_56 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.967+0000 m31200| 2015-07-19T23:39:37.967+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_56 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.974+0000 m31200| 2015-07-19T23:39:37.973+0000 I COMMAND [conn123] command db10.tmp.mrs.coll10_1437349177_11 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.974+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.974+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.974+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.975+0000 m31200| values...., out: "tmp.mrs.coll10_1437349177_11", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 34 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 9, w: 11, R: 11, W: 7 }, timeAcquiringMicros: { r: 37416, w: 70109, R: 52601, W: 39214 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 456ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:37.975+0000 m31200| 2015-07-19T23:39:37.975+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_65 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.034+0000 m31200| 2015-07-19T23:39:38.034+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_65 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.038+0000 m31200| 2015-07-19T23:39:38.038+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_65 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.038+0000 m31200| 2015-07-19T23:39:38.038+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_65 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.039+0000 m31100| 2015-07-19T23:39:38.039+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349177_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.053+0000 m31200| 2015-07-19T23:39:38.053+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349177_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.054+0000 m31101| 2015-07-19T23:39:38.054+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349177_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.054+0000 m31102| 2015-07-19T23:39:38.054+0000 I COMMAND [repl writer worker 7] CMD: drop db10.tmp.mrs.coll10_1437349177_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.065+0000 m31201| 2015-07-19T23:39:38.065+0000 I COMMAND [repl writer worker 9] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_65 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.066+0000 m31202| 2015-07-19T23:39:38.065+0000 I COMMAND [repl writer worker 4] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_65 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.067+0000 m31201| 2015-07-19T23:39:38.067+0000 I COMMAND [repl writer worker 2] CMD: drop db10.tmp.mrs.coll10_1437349177_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.069+0000 m31202| 2015-07-19T23:39:38.068+0000 I COMMAND [repl writer worker 11] CMD: drop db10.tmp.mrs.coll10_1437349177_11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.075+0000 m31100| 2015-07-19T23:39:38.075+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_35 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.081+0000 m31200| 2015-07-19T23:39:38.081+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_66 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.121+0000 m31100| 2015-07-19T23:39:38.121+0000 I COMMAND [conn53] CMD: drop db10.tmp.mrs.coll10_1437349177_20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.123+0000 m31100| 2015-07-19T23:39:38.123+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_33 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.123+0000 m31100| 2015-07-19T23:39:38.123+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_33 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.124+0000 m31100| 2015-07-19T23:39:38.123+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_33 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.135+0000 m31100| 2015-07-19T23:39:38.134+0000 I COMMAND [conn53] command db10.tmp.mrs.coll10_1437349177_20 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.135+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.135+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.135+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.136+0000 m31100| values...., out: "tmp.mrs.coll10_1437349177_20", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1188 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 8, W: 7 }, timeAcquiringMicros: { r: 8320, w: 42910, W: 23015 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 262ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.142+0000 m31100| 2015-07-19T23:39:38.142+0000 I COMMAND [conn119] CMD: drop db10.tmp.mrs.coll10_1437349177_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.144+0000 m31100| 2015-07-19T23:39:38.144+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_32 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.144+0000 m31100| 2015-07-19T23:39:38.144+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_32 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.146+0000 m31100| 2015-07-19T23:39:38.146+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_32 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.146+0000 m31100| 2015-07-19T23:39:38.146+0000 I COMMAND [conn119] command db10.tmp.mrs.coll10_1437349177_12 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.146+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.146+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.147+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.147+0000 m31100| values...., out: "tmp.mrs.coll10_1437349177_12", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 2951 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 12, R: 4, W: 2 }, timeAcquiringMicros: { r: 3907, w: 57370, R: 18641, W: 64 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 274ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.166+0000 m31100| 2015-07-19T23:39:38.165+0000 I COMMAND [conn54] CMD: drop db10.tmp.mrs.coll10_1437349177_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.167+0000 m31100| 2015-07-19T23:39:38.167+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_34 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.167+0000 m31100| 2015-07-19T23:39:38.167+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_34 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.170+0000 m31100| 2015-07-19T23:39:38.170+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_34 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.171+0000 m31100| 2015-07-19T23:39:38.170+0000 I COMMAND [conn54] command db10.tmp.mrs.coll10_1437349177_21 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.171+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.171+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.171+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.171+0000 m31100| values...., out: "tmp.mrs.coll10_1437349177_21", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:230 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 2, R: 5, W: 1 }, timeAcquiringMicros: { w: 4133, R: 13670, W: 2569 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 230ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.195+0000 m31100| 2015-07-19T23:39:38.195+0000 I COMMAND [conn23] CMD: drop db10.tmp.mrs.coll10_1437349178_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.199+0000 m31100| 2015-07-19T23:39:38.199+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_35 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.199+0000 m31100| 2015-07-19T23:39:38.199+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_35 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.199+0000 m31100| 2015-07-19T23:39:38.199+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_35 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.201+0000 m31100| 2015-07-19T23:39:38.201+0000 I COMMAND [conn23] command db10.tmp.mrs.coll10_1437349178_13 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.201+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.201+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.201+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.202+0000 m31100| values...., out: "tmp.mrs.coll10_1437349178_13", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1463 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 2, R: 6, W: 2 }, timeAcquiringMicros: { w: 4235, R: 9320, W: 7425 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 132ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.273+0000 m31200| 2015-07-19T23:39:38.272+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 202ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.273+0000 m31200| 2015-07-19T23:39:38.272+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 202ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.293+0000 m31200| 2015-07-19T23:39:38.293+0000 I COMMAND [conn103] CMD: drop db10.tmp.mrs.coll10_1437349177_19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.294+0000 m31200| 2015-07-19T23:39:38.294+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_58 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.295+0000 m31200| 2015-07-19T23:39:38.294+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_58 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.299+0000 m31200| 2015-07-19T23:39:38.299+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_58 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.300+0000 m31200| 2015-07-19T23:39:38.300+0000 I COMMAND [conn103] command db10.tmp.mrs.coll10_1437349177_19 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.300+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.300+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.301+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.301+0000 m31200| values...., out: "tmp.mrs.coll10_1437349177_19", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 3070, W: 35 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 2, w: 19, R: 11, W: 8 }, timeAcquiringMicros: { r: 4750, w: 305944, R: 52101, W: 28756 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 702ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.301+0000 m31200| 2015-07-19T23:39:38.300+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_67 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.309+0000 m31200| 2015-07-19T23:39:38.308+0000 I COMMAND [conn117] CMD: drop db10.tmp.mrs.coll10_1437349177_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.310+0000 m31200| 2015-07-19T23:39:38.310+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_63 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.311+0000 m31200| 2015-07-19T23:39:38.311+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_63 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.324+0000 m31200| 2015-07-19T23:39:38.324+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_63 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.339+0000 m31200| 2015-07-19T23:39:38.339+0000 I COMMAND [conn117] command db10.tmp.mrs.coll10_1437349177_12 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.339+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.339+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.339+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.340+0000 m31200| values...., out: "tmp.mrs.coll10_1437349177_12", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:230 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 5323, W: 7116 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 3, w: 4, R: 14, W: 9 }, timeAcquiringMicros: { r: 4748, w: 17125, R: 154055, W: 29287 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 465ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.340+0000 m31200| 2015-07-19T23:39:38.340+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_68 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.341+0000 m31200| 2015-07-19T23:39:38.341+0000 I COMMAND [conn22] CMD: drop db10.tmp.mrs.coll10_1437349177_20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.342+0000 m31200| 2015-07-19T23:39:38.342+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_62 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.343+0000 m31200| 2015-07-19T23:39:38.342+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_62 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.345+0000 m31200| 2015-07-19T23:39:38.345+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_62 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.345+0000 m31200| 2015-07-19T23:39:38.345+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_67 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.347+0000 m31200| 2015-07-19T23:39:38.347+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_67 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.348+0000 m31200| 2015-07-19T23:39:38.347+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_67 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.348+0000 m31100| 2015-07-19T23:39:38.348+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349177_19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.349+0000 m31100| 2015-07-19T23:39:38.348+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 146ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.350+0000 m31100| 2015-07-19T23:39:38.348+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 146ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.350+0000 m31200| 2015-07-19T23:39:38.349+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349177_19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.352+0000 m31200| 2015-07-19T23:39:38.350+0000 I COMMAND [conn22] command db10.tmp.mrs.coll10_1437349177_20 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.352+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.352+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.352+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.353+0000 m31200| values...., out: "tmp.mrs.coll10_1437349177_20", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 9117, w: 1743, W: 784 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 3, w: 7, R: 12, W: 7 }, timeAcquiringMicros: { r: 13397, w: 24709, R: 168627, W: 28871 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 478ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.353+0000 m31200| 2015-07-19T23:39:38.351+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_69 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.353+0000 m31101| 2015-07-19T23:39:38.351+0000 I COMMAND [repl writer worker 15] CMD: drop db10.tmp.mrs.coll10_1437349177_19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.353+0000 m31102| 2015-07-19T23:39:38.351+0000 I COMMAND [repl writer worker 3] CMD: drop db10.tmp.mrs.coll10_1437349177_19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.357+0000 m31100| 2015-07-19T23:39:38.355+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_36 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.358+0000 m31202| 2015-07-19T23:39:38.357+0000 I COMMAND [repl writer worker 0] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_67 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.360+0000 m31201| 2015-07-19T23:39:38.358+0000 I COMMAND [repl writer worker 7] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_67 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.361+0000 m31202| 2015-07-19T23:39:38.358+0000 I COMMAND [repl writer worker 14] CMD: drop db10.tmp.mrs.coll10_1437349177_19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.361+0000 m31201| 2015-07-19T23:39:38.359+0000 I COMMAND [repl writer worker 4] CMD: drop db10.tmp.mrs.coll10_1437349177_19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.365+0000 m31200| 2015-07-19T23:39:38.364+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_70 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.401+0000 m31200| 2015-07-19T23:39:38.400+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_69 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.401+0000 m31200| 2015-07-19T23:39:38.401+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_69 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.401+0000 m31200| 2015-07-19T23:39:38.401+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_69 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.402+0000 m31100| 2015-07-19T23:39:38.401+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349177_20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.402+0000 m31200| 2015-07-19T23:39:38.401+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_68 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.402+0000 m31202| 2015-07-19T23:39:38.402+0000 I COMMAND [repl writer worker 1] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_69 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.403+0000 m31201| 2015-07-19T23:39:38.402+0000 I COMMAND [repl writer worker 7] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_69 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.403+0000 m31200| 2015-07-19T23:39:38.403+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_68 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.403+0000 m31200| 2015-07-19T23:39:38.403+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349177_20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.404+0000 m31200| 2015-07-19T23:39:38.403+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_68 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.407+0000 m31100| 2015-07-19T23:39:38.404+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349177_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.407+0000 m31202| 2015-07-19T23:39:38.404+0000 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_68 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.407+0000 m31102| 2015-07-19T23:39:38.405+0000 I COMMAND [repl writer worker 2] CMD: drop db10.tmp.mrs.coll10_1437349177_20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.407+0000 m31201| 2015-07-19T23:39:38.405+0000 I COMMAND [repl writer worker 4] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_68 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.407+0000 m31101| 2015-07-19T23:39:38.405+0000 I COMMAND [repl writer worker 11] CMD: drop db10.tmp.mrs.coll10_1437349177_20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.408+0000 m31200| 2015-07-19T23:39:38.406+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349177_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.408+0000 m31102| 2015-07-19T23:39:38.407+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349177_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.409+0000 m31101| 2015-07-19T23:39:38.409+0000 I COMMAND [repl writer worker 5] CMD: drop db10.tmp.mrs.coll10_1437349177_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.413+0000 m31202| 2015-07-19T23:39:38.413+0000 I COMMAND [repl writer worker 0] CMD: drop db10.tmp.mrs.coll10_1437349177_20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.414+0000 m31100| 2015-07-19T23:39:38.413+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_37 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.414+0000 m31202| 2015-07-19T23:39:38.414+0000 I COMMAND [repl writer worker 4] CMD: drop db10.tmp.mrs.coll10_1437349177_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.418+0000 m31200| 2015-07-19T23:39:38.417+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_71 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.420+0000 m31201| 2015-07-19T23:39:38.420+0000 I COMMAND [repl writer worker 9] CMD: drop db10.tmp.mrs.coll10_1437349177_20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.421+0000 m31201| 2015-07-19T23:39:38.421+0000 I COMMAND [repl writer worker 11] CMD: drop db10.tmp.mrs.coll10_1437349177_12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.426+0000 m31100| 2015-07-19T23:39:38.426+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_38 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.431+0000 m31200| 2015-07-19T23:39:38.431+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_72 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.480+0000 m31200| 2015-07-19T23:39:38.479+0000 I COMMAND [conn122] CMD: drop db10.tmp.mrs.coll10_1437349177_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.484+0000 m31200| 2015-07-19T23:39:38.484+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_64 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.484+0000 m31200| 2015-07-19T23:39:38.484+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_64 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.486+0000 m31200| 2015-07-19T23:39:38.486+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_64 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.486+0000 m31200| 2015-07-19T23:39:38.486+0000 I COMMAND [conn122] command db10.tmp.mrs.coll10_1437349177_21 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.487+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.487+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.487+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.487+0000 m31200| values...., out: "tmp.mrs.coll10_1437349177_21", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 14370, w: 2759, W: 11679 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 9, w: 13, R: 11, W: 5 }, timeAcquiringMicros: { r: 39489, w: 76513, R: 203074, W: 652 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 544ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.487+0000 m31200| 2015-07-19T23:39:38.486+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_73 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.536+0000 m31200| 2015-07-19T23:39:38.536+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_73 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.540+0000 m31200| 2015-07-19T23:39:38.540+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_73 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.540+0000 m31200| 2015-07-19T23:39:38.540+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_73 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.541+0000 m31100| 2015-07-19T23:39:38.541+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349177_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.545+0000 m31200| 2015-07-19T23:39:38.545+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349177_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.548+0000 m31101| 2015-07-19T23:39:38.548+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349177_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.548+0000 m31102| 2015-07-19T23:39:38.548+0000 I COMMAND [repl writer worker 15] CMD: drop db10.tmp.mrs.coll10_1437349177_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.557+0000 m31201| 2015-07-19T23:39:38.557+0000 I COMMAND [repl writer worker 5] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_73 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.558+0000 m31202| 2015-07-19T23:39:38.558+0000 I COMMAND [repl writer worker 6] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_73 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.560+0000 m31202| 2015-07-19T23:39:38.560+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349177_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.560+0000 m31201| 2015-07-19T23:39:38.560+0000 I COMMAND [repl writer worker 9] CMD: drop db10.tmp.mrs.coll10_1437349177_21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.562+0000 m31200| 2015-07-19T23:39:38.561+0000 I COMMAND [conn123] CMD: drop db10.tmp.mrs.coll10_1437349178_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.562+0000 m31200| 2015-07-19T23:39:38.562+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_74 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.563+0000 m31200| 2015-07-19T23:39:38.563+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_66 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.563+0000 m31200| 2015-07-19T23:39:38.563+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_66 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.564+0000 m31200| 2015-07-19T23:39:38.564+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_66 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.565+0000 m31200| 2015-07-19T23:39:38.565+0000 I COMMAND [conn123] command db10.tmp.mrs.coll10_1437349178_13 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.565+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.565+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.565+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.566+0000 m31200| values...., out: "tmp.mrs.coll10_1437349178_13", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2 }, timeAcquiringMicros: { r: 7655, w: 14780 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 14, R: 11, W: 5 }, timeAcquiringMicros: { w: 78229, R: 47329, W: 25166 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 497ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.567+0000 m31200| 2015-07-19T23:39:38.567+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_75 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.574+0000 m31100| 2015-07-19T23:39:38.574+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_39 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.622+0000 m31200| 2015-07-19T23:39:38.621+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_75 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.626+0000 m31200| 2015-07-19T23:39:38.626+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_75 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.626+0000 m31200| 2015-07-19T23:39:38.626+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_75 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.626+0000 m31100| 2015-07-19T23:39:38.626+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349178_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.633+0000 m31200| 2015-07-19T23:39:38.633+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349178_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.636+0000 m31101| 2015-07-19T23:39:38.636+0000 I COMMAND [repl writer worker 0] CMD: drop db10.tmp.mrs.coll10_1437349178_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.636+0000 m31102| 2015-07-19T23:39:38.636+0000 I COMMAND [repl writer worker 11] CMD: drop db10.tmp.mrs.coll10_1437349178_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.638+0000 m31202| 2015-07-19T23:39:38.637+0000 I COMMAND [repl writer worker 15] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_75 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.638+0000 m31201| 2015-07-19T23:39:38.638+0000 I COMMAND [repl writer worker 4] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_75 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.640+0000 m31201| 2015-07-19T23:39:38.640+0000 I COMMAND [repl writer worker 8] CMD: drop db10.tmp.mrs.coll10_1437349178_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.640+0000 m31202| 2015-07-19T23:39:38.640+0000 I COMMAND [repl writer worker 5] CMD: drop db10.tmp.mrs.coll10_1437349178_13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.642+0000 m31200| 2015-07-19T23:39:38.641+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_76 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.660+0000 m31100| 2015-07-19T23:39:38.660+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_40 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.672+0000 m31200| 2015-07-19T23:39:38.672+0000 I COMMAND [conn103] CMD: drop db10.tmp.mrs.coll10_1437349178_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.674+0000 m31200| 2015-07-19T23:39:38.674+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_70 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.674+0000 m31200| 2015-07-19T23:39:38.674+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_70 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.682+0000 m31200| 2015-07-19T23:39:38.682+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_70 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.700+0000 m31100| 2015-07-19T23:39:38.700+0000 I COMMAND [conn58] CMD: drop db10.tmp.mrs.coll10_1437349178_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.701+0000 m31100| 2015-07-19T23:39:38.701+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_36 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.701+0000 m31100| 2015-07-19T23:39:38.701+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_36 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.702+0000 m31200| 2015-07-19T23:39:38.701+0000 I COMMAND [conn103] command db10.tmp.mrs.coll10_1437349178_22 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.702+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.702+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.702+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.703+0000 m31200| values...., out: "tmp.mrs.coll10_1437349178_22", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 36 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 2, w: 10, R: 11, W: 9 }, timeAcquiringMicros: { r: 48, w: 67072, R: 34987, W: 48310 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 339ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.703+0000 m31100| 2015-07-19T23:39:38.703+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_36 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.710+0000 m31100| 2015-07-19T23:39:38.709+0000 I COMMAND [conn58] command db10.tmp.mrs.coll10_1437349178_22 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.710+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.710+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.710+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.710+0000 m31100| values...., out: "tmp.mrs.coll10_1437349178_22", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1469 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 12, R: 6, W: 2 }, timeAcquiringMicros: { w: 128976, R: 23842, W: 5238 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 354ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.711+0000 m31200| 2015-07-19T23:39:38.711+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_77 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.738+0000 m31200| 2015-07-19T23:39:38.738+0000 I COMMAND [conn22] CMD: drop db10.tmp.mrs.coll10_1437349178_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.740+0000 m31200| 2015-07-19T23:39:38.739+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_72 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.740+0000 m31200| 2015-07-19T23:39:38.740+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_72 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.740+0000 m31200| 2015-07-19T23:39:38.740+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_72 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.741+0000 m31200| 2015-07-19T23:39:38.741+0000 I COMMAND [conn22] command db10.tmp.mrs.coll10_1437349178_23 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.741+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.741+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.741+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.742+0000 m31200| values...., out: "tmp.mrs.coll10_1437349178_23", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:230 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 3, W: 1 }, timeAcquiringMicros: { r: 12010, W: 666 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 1, w: 5, R: 11, W: 9 }, timeAcquiringMicros: { r: 7551, w: 42839, R: 35566, W: 23795 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 322ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.744+0000 m31200| 2015-07-19T23:39:38.744+0000 I COMMAND [conn117] CMD: drop db10.tmp.mrs.coll10_1437349178_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.746+0000 m31200| 2015-07-19T23:39:38.745+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_71 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.746+0000 m31200| 2015-07-19T23:39:38.746+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_71 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.746+0000 m31200| 2015-07-19T23:39:38.746+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_71 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.746+0000 m31200| 2015-07-19T23:39:38.746+0000 I COMMAND [conn117] command db10.tmp.mrs.coll10_1437349178_14 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.746+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.747+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.747+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.747+0000 m31200| values...., out: "tmp.mrs.coll10_1437349178_14", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1, W: 1 }, timeAcquiringMicros: { r: 15838, w: 1207, W: 2672 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 4, w: 9, R: 10, W: 5 }, timeAcquiringMicros: { r: 4465, w: 64528, R: 27831, W: 29235 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 333ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.751+0000 m31100| 2015-07-19T23:39:38.751+0000 I COMMAND [conn53] CMD: drop db10.tmp.mrs.coll10_1437349178_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.753+0000 m31100| 2015-07-19T23:39:38.753+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_38 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.755+0000 m31100| 2015-07-19T23:39:38.755+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_38 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.759+0000 m31100| 2015-07-19T23:39:38.759+0000 I COMMAND [conn119] CMD: drop db10.tmp.mrs.coll10_1437349178_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.761+0000 m31100| 2015-07-19T23:39:38.760+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_37 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.762+0000 m31100| 2015-07-19T23:39:38.762+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_37 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.763+0000 m31100| 2015-07-19T23:39:38.763+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_37 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.764+0000 m31100| 2015-07-19T23:39:38.764+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_38 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.779+0000 m31100| 2015-07-19T23:39:38.778+0000 I COMMAND [conn119] command db10.tmp.mrs.coll10_1437349178_14 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.779+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.779+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.779+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.780+0000 m31100| values...., out: "tmp.mrs.coll10_1437349178_14", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:230 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 3132, w: 5847, W: 5029 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { w: 5, R: 9, W: 4 }, timeAcquiringMicros: { w: 45589, R: 50448, W: 21098 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 365ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.780+0000 m31200| 2015-07-19T23:39:38.778+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_77 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.780+0000 m31200| 2015-07-19T23:39:38.779+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_78 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.786+0000 m31100| 2015-07-19T23:39:38.785+0000 I COMMAND [conn53] command db10.tmp.mrs.coll10_1437349178_23 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.786+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.786+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.786+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.786+0000 m31100| values...., out: "tmp.mrs.coll10_1437349178_23", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:230 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 7815, W: 134 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 7, R: 6, W: 8 }, timeAcquiringMicros: { r: 1740, w: 55875, R: 24724, W: 39889 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 367ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.787+0000 m31200| 2015-07-19T23:39:38.787+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_79 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.789+0000 m31200| 2015-07-19T23:39:38.789+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_77 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.790+0000 m31200| 2015-07-19T23:39:38.789+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_77 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.790+0000 m31100| 2015-07-19T23:39:38.790+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349178_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.791+0000 m31202| 2015-07-19T23:39:38.791+0000 I COMMAND [repl writer worker 2] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_77 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.792+0000 m31201| 2015-07-19T23:39:38.791+0000 I COMMAND [repl writer worker 6] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_77 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.795+0000 m31200| 2015-07-19T23:39:38.794+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349178_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.796+0000 m31102| 2015-07-19T23:39:38.795+0000 I COMMAND [repl writer worker 8] CMD: drop db10.tmp.mrs.coll10_1437349178_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.796+0000 m31101| 2015-07-19T23:39:38.796+0000 I COMMAND [repl writer worker 6] CMD: drop db10.tmp.mrs.coll10_1437349178_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.797+0000 m31100| 2015-07-19T23:39:38.797+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_41 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.798+0000 m31200| 2015-07-19T23:39:38.798+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_80 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.803+0000 m31202| 2015-07-19T23:39:38.802+0000 I COMMAND [repl writer worker 9] CMD: drop db10.tmp.mrs.coll10_1437349178_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.809+0000 m31201| 2015-07-19T23:39:38.808+0000 I COMMAND [repl writer worker 12] CMD: drop db10.tmp.mrs.coll10_1437349178_22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.848+0000 m31200| 2015-07-19T23:39:38.848+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_79 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.853+0000 m31200| 2015-07-19T23:39:38.853+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_79 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.853+0000 m31200| 2015-07-19T23:39:38.853+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_79 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.853+0000 m31100| 2015-07-19T23:39:38.853+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349178_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.855+0000 m31202| 2015-07-19T23:39:38.855+0000 I COMMAND [repl writer worker 1] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_79 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.855+0000 m31201| 2015-07-19T23:39:38.855+0000 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_79 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.861+0000 m31200| 2015-07-19T23:39:38.861+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_78 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.864+0000 m31200| 2015-07-19T23:39:38.864+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349178_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.865+0000 m31101| 2015-07-19T23:39:38.864+0000 I COMMAND [repl writer worker 7] CMD: drop db10.tmp.mrs.coll10_1437349178_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.865+0000 m31102| 2015-07-19T23:39:38.864+0000 I COMMAND [repl writer worker 7] CMD: drop db10.tmp.mrs.coll10_1437349178_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.868+0000 m31200| 2015-07-19T23:39:38.867+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_78 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.868+0000 m31202| 2015-07-19T23:39:38.868+0000 I COMMAND [repl writer worker 9] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_78 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.868+0000 m31201| 2015-07-19T23:39:38.868+0000 I COMMAND [repl writer worker 2] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_78 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.869+0000 m31200| 2015-07-19T23:39:38.868+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_78 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.869+0000 m31100| 2015-07-19T23:39:38.869+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349178_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.871+0000 m31202| 2015-07-19T23:39:38.871+0000 I COMMAND [repl writer worker 14] CMD: drop db10.tmp.mrs.coll10_1437349178_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.871+0000 m31201| 2015-07-19T23:39:38.871+0000 I COMMAND [repl writer worker 7] CMD: drop db10.tmp.mrs.coll10_1437349178_23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.876+0000 m31200| 2015-07-19T23:39:38.876+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_81 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.877+0000 m31100| 2015-07-19T23:39:38.876+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_42 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.877+0000 m31200| 2015-07-19T23:39:38.877+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349178_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.879+0000 m31102| 2015-07-19T23:39:38.878+0000 I COMMAND [repl writer worker 12] CMD: drop db10.tmp.mrs.coll10_1437349178_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.879+0000 m31101| 2015-07-19T23:39:38.878+0000 I COMMAND [repl writer worker 14] CMD: drop db10.tmp.mrs.coll10_1437349178_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.882+0000 m31202| 2015-07-19T23:39:38.882+0000 I COMMAND [repl writer worker 2] CMD: drop db10.tmp.mrs.coll10_1437349178_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.883+0000 m31201| 2015-07-19T23:39:38.882+0000 I COMMAND [repl writer worker 13] CMD: drop db10.tmp.mrs.coll10_1437349178_14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.885+0000 m31200| 2015-07-19T23:39:38.885+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_82 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.904+0000 m31100| 2015-07-19T23:39:38.904+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_43 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.944+0000 m31200| 2015-07-19T23:39:38.943+0000 I COMMAND [conn122] CMD: drop db10.tmp.mrs.coll10_1437349178_24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.950+0000 m31200| 2015-07-19T23:39:38.949+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_74 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.950+0000 m31200| 2015-07-19T23:39:38.949+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_74 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.952+0000 m31200| 2015-07-19T23:39:38.951+0000 I COMMAND [conn122] CMD: drop db10.tmp.mr.coll10_74 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.952+0000 m31200| 2015-07-19T23:39:38.952+0000 I COMMAND [conn122] command db10.tmp.mrs.coll10_1437349178_24 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.952+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.952+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.952+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.953+0000 m31200| values...., out: "tmp.mrs.coll10_1437349178_24", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:230 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 4815, W: 19 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 7, w: 15, R: 7 }, timeAcquiringMicros: { r: 28480, w: 81815, R: 31635 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 392ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.993+0000 m31200| 2015-07-19T23:39:38.993+0000 I COMMAND [conn123] CMD: drop db10.tmp.mrs.coll10_1437349178_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.995+0000 m31200| 2015-07-19T23:39:38.995+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_76 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.997+0000 m31200| 2015-07-19T23:39:38.997+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_76 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.998+0000 m31200| 2015-07-19T23:39:38.998+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_76 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.999+0000 m31200| 2015-07-19T23:39:38.999+0000 I COMMAND [conn123] command db10.tmp.mrs.coll10_1437349178_15 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.999+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.999+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:38.999+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.000+0000 m31200| values...., out: "tmp.mrs.coll10_1437349178_15", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 1788, W: 124 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 6, R: 9, W: 3 }, timeAcquiringMicros: { r: 4305, w: 36918, R: 40314, W: 10661 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 357ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.018+0000 m31100| 2015-07-19T23:39:39.017+0000 I COMMAND [conn54] CMD: drop db10.tmp.mrs.coll10_1437349178_24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.019+0000 m31100| 2015-07-19T23:39:39.019+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_39 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.019+0000 m31100| 2015-07-19T23:39:39.019+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_39 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.021+0000 m31100| 2015-07-19T23:39:39.020+0000 I COMMAND [conn23] CMD: drop db10.tmp.mrs.coll10_1437349178_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.022+0000 m31100| 2015-07-19T23:39:39.022+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_40 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.023+0000 m31100| 2015-07-19T23:39:39.022+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_40 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.024+0000 m31100| 2015-07-19T23:39:39.023+0000 I COMMAND [conn54] CMD: drop db10.tmp.mr.coll10_39 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.035+0000 m31100| 2015-07-19T23:39:39.035+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_40 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.041+0000 m31100| 2015-07-19T23:39:39.040+0000 I COMMAND [conn54] command db10.tmp.mrs.coll10_1437349178_24 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.041+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.041+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.041+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.042+0000 m31100| values...., out: "tmp.mrs.coll10_1437349178_24", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 15682, w: 2066, W: 54 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 11, R: 8, W: 6 }, timeAcquiringMicros: { w: 108217, R: 37141, W: 58268 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 481ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.042+0000 m31200| 2015-07-19T23:39:39.041+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_83 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.053+0000 m31100| 2015-07-19T23:39:39.053+0000 I COMMAND [conn23] command db10.tmp.mrs.coll10_1437349178_15 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.053+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.053+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.054+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.054+0000 m31100| values...., out: "tmp.mrs.coll10_1437349178_15", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 8149 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 2, w: 10, R: 8, W: 9 }, timeAcquiringMicros: { r: 18298, w: 103188, R: 19585, W: 40230 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 412ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.066+0000 m31200| 2015-07-19T23:39:39.066+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_84 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.073+0000 m31100| 2015-07-19T23:39:39.073+0000 I COMMAND [conn58] CMD: drop db10.tmp.mrs.coll10_1437349178_25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.075+0000 m31100| 2015-07-19T23:39:39.074+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_41 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.075+0000 m31100| 2015-07-19T23:39:39.075+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_41 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.080+0000 m31100| 2015-07-19T23:39:39.080+0000 I COMMAND [conn58] CMD: drop db10.tmp.mr.coll10_41 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.080+0000 m31200| 2015-07-19T23:39:39.080+0000 I COMMAND [conn103] CMD: drop db10.tmp.mrs.coll10_1437349178_25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.080+0000 m31100| 2015-07-19T23:39:39.080+0000 I COMMAND [conn58] command db10.tmp.mrs.coll10_1437349178_25 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.081+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.081+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.081+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.081+0000 m31100| values...., out: "tmp.mrs.coll10_1437349178_25", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:230 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 5, R: 7 }, timeAcquiringMicros: { w: 41317, R: 22241 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 283ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.086+0000 m31200| 2015-07-19T23:39:39.086+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_80 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.087+0000 m31200| 2015-07-19T23:39:39.087+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_80 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.090+0000 m31200| 2015-07-19T23:39:39.089+0000 I COMMAND [conn103] CMD: drop db10.tmp.mr.coll10_80 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.091+0000 m31200| 2015-07-19T23:39:39.091+0000 I COMMAND [conn103] command db10.tmp.mrs.coll10_1437349178_25 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.091+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.091+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.091+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.092+0000 m31200| values...., out: "tmp.mrs.coll10_1437349178_25", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 2, R: 9 }, timeAcquiringMicros: { w: 3867, R: 41811 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 292ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.093+0000 m31200| 2015-07-19T23:39:39.093+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_85 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.126+0000 m31200| 2015-07-19T23:39:39.126+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_83 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.130+0000 m31200| 2015-07-19T23:39:39.129+0000 I COMMAND [conn22] CMD: drop db10.tmp.mrs.coll10_1437349178_26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.131+0000 m31200| 2015-07-19T23:39:39.131+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_81 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.133+0000 m31200| 2015-07-19T23:39:39.133+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_81 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.134+0000 m31202| 2015-07-19T23:39:39.134+0000 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_83 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.135+0000 m31200| 2015-07-19T23:39:39.134+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_83 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.135+0000 m31201| 2015-07-19T23:39:39.134+0000 I COMMAND [repl writer worker 11] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_83 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.135+0000 m31200| 2015-07-19T23:39:39.135+0000 I COMMAND [conn122] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll10_83 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.136+0000 m31100| 2015-07-19T23:39:39.135+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349178_24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.136+0000 m31200| 2015-07-19T23:39:39.136+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349178_24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.139+0000 m31102| 2015-07-19T23:39:39.138+0000 I COMMAND [repl writer worker 5] CMD: drop db10.tmp.mrs.coll10_1437349178_24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.139+0000 m31101| 2015-07-19T23:39:39.138+0000 I COMMAND [repl writer worker 1] CMD: drop db10.tmp.mrs.coll10_1437349178_24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.146+0000 m31200| 2015-07-19T23:39:39.146+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_84 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.147+0000 m31201| 2015-07-19T23:39:39.146+0000 I COMMAND [repl writer worker 7] CMD: drop db10.tmp.mrs.coll10_1437349178_24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.149+0000 m30998| 2015-07-19T23:39:39.149+0000 I NETWORK [conn61] end connection 10.139.123.131:36001 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.150+0000 m31200| 2015-07-19T23:39:39.150+0000 I COMMAND [conn22] CMD: drop db10.tmp.mr.coll10_81 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.150+0000 m31200| 2015-07-19T23:39:39.150+0000 I COMMAND [conn22] command db10.tmp.mrs.coll10_1437349178_26 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.150+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.150+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.150+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.151+0000 m31200| values...., out: "tmp.mrs.coll10_1437349178_26", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 3763, W: 1362 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 1, R: 7, W: 7 }, timeAcquiringMicros: { w: 1281, R: 10521, W: 13455 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 279ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.151+0000 m31202| 2015-07-19T23:39:39.150+0000 I COMMAND [repl writer worker 3] CMD: drop db10.tmp.mrs.coll10_1437349178_24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.153+0000 m31200| 2015-07-19T23:39:39.153+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_84 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.153+0000 m31200| 2015-07-19T23:39:39.153+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_84 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.154+0000 m31100| 2015-07-19T23:39:39.154+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349178_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.156+0000 m31202| 2015-07-19T23:39:39.155+0000 I COMMAND [repl writer worker 4] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_84 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.156+0000 m31201| 2015-07-19T23:39:39.155+0000 I COMMAND [repl writer worker 10] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_84 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.159+0000 m31200| 2015-07-19T23:39:39.159+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_85 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.159+0000 m31200| 2015-07-19T23:39:39.159+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349178_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.162+0000 m31101| 2015-07-19T23:39:39.161+0000 I COMMAND [repl writer worker 13] CMD: drop db10.tmp.mrs.coll10_1437349178_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.162+0000 m31102| 2015-07-19T23:39:39.162+0000 I COMMAND [repl writer worker 2] CMD: drop db10.tmp.mrs.coll10_1437349178_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.169+0000 m31100| 2015-07-19T23:39:39.169+0000 I COMMAND [conn119] CMD: drop db10.tmp.mrs.coll10_1437349178_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.169+0000 m31200| 2015-07-19T23:39:39.169+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_85 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.170+0000 m31200| 2015-07-19T23:39:39.169+0000 I COMMAND [conn103] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_85 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.170+0000 m31100| 2015-07-19T23:39:39.169+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349178_25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.171+0000 m31200| 2015-07-19T23:39:39.171+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_86 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.171+0000 m31100| 2015-07-19T23:39:39.171+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_43 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.172+0000 m31201| 2015-07-19T23:39:39.171+0000 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_85 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.172+0000 m31202| 2015-07-19T23:39:39.171+0000 I COMMAND [repl writer worker 1] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll10_85 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.172+0000 m31201| 2015-07-19T23:39:39.172+0000 I COMMAND [repl writer worker 11] CMD: drop db10.tmp.mrs.coll10_1437349178_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.173+0000 m31202| 2015-07-19T23:39:39.172+0000 I COMMAND [repl writer worker 8] CMD: drop db10.tmp.mrs.coll10_1437349178_15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.175+0000 m31100| 2015-07-19T23:39:39.173+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_43 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.175+0000 m31200| 2015-07-19T23:39:39.173+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349178_25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.176+0000 m31100| 2015-07-19T23:39:39.173+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_44 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.177+0000 m31100| 2015-07-19T23:39:39.177+0000 I COMMAND [conn119] CMD: drop db10.tmp.mr.coll10_43 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.178+0000 m31102| 2015-07-19T23:39:39.178+0000 I COMMAND [repl writer worker 12] CMD: drop db10.tmp.mrs.coll10_1437349178_25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.181+0000 m31101| 2015-07-19T23:39:39.179+0000 I COMMAND [repl writer worker 14] CMD: drop db10.tmp.mrs.coll10_1437349178_25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.182+0000 m31100| 2015-07-19T23:39:39.182+0000 I COMMAND [conn119] command db10.tmp.mrs.coll10_1437349178_16 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.182+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.182+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.182+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.183+0000 m31100| values...., out: "tmp.mrs.coll10_1437349178_16", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:230 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 78 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 2, R: 11, W: 9 }, timeAcquiringMicros: { r: 14018, w: 4762, R: 47974, W: 13544 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 297ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.195+0000 m30998| 2015-07-19T23:39:39.195+0000 I NETWORK [conn60] end connection 10.139.123.131:36000 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.203+0000 m31202| 2015-07-19T23:39:39.202+0000 I COMMAND [repl writer worker 9] CMD: drop db10.tmp.mrs.coll10_1437349178_25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.203+0000 m31201| 2015-07-19T23:39:39.202+0000 I COMMAND [repl writer worker 8] CMD: drop db10.tmp.mrs.coll10_1437349178_25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.207+0000 m31100| 2015-07-19T23:39:39.207+0000 I COMMAND [conn53] CMD: drop db10.tmp.mrs.coll10_1437349178_26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.209+0000 m31100| 2015-07-19T23:39:39.209+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_42 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.209+0000 m31100| 2015-07-19T23:39:39.209+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_42 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.211+0000 m31100| 2015-07-19T23:39:39.211+0000 I COMMAND [conn53] CMD: drop db10.tmp.mr.coll10_42 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.211+0000 m31100| 2015-07-19T23:39:39.211+0000 I COMMAND [conn53] command db10.tmp.mrs.coll10_1437349178_26 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.211+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.211+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.211+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.212+0000 m31100| values...., out: "tmp.mrs.coll10_1437349178_26", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:230 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 3029 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 5, w: 9, R: 9, W: 5 }, timeAcquiringMicros: { r: 10617, w: 22586, R: 23948, W: 30406 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 341ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.212+0000 m31200| 2015-07-19T23:39:39.211+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_87 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.222+0000 m31200| 2015-07-19T23:39:39.222+0000 I COMMAND [conn117] CMD: drop db10.tmp.mrs.coll10_1437349178_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.227+0000 m31200| 2015-07-19T23:39:39.226+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_82 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.227+0000 m31200| 2015-07-19T23:39:39.226+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_82 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.232+0000 m31200| 2015-07-19T23:39:39.231+0000 I COMMAND [conn117] CMD: drop db10.tmp.mr.coll10_82 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.232+0000 m31200| 2015-07-19T23:39:39.232+0000 I COMMAND [conn117] command db10.tmp.mrs.coll10_1437349178_16 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.232+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.232+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.232+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.233+0000 m31200| values...., out: "tmp.mrs.coll10_1437349178_16", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 15023 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 4, w: 6, R: 5, W: 5 }, timeAcquiringMicros: { r: 13139, w: 31928, R: 6742, W: 22667 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 347ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.233+0000 m31200| 2015-07-19T23:39:39.232+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_88 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.272+0000 m31200| 2015-07-19T23:39:39.271+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_87 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.273+0000 m31200| 2015-07-19T23:39:39.273+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_87 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.274+0000 m31200| 2015-07-19T23:39:39.273+0000 I COMMAND [conn22] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_87 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.274+0000 m31100| 2015-07-19T23:39:39.274+0000 I COMMAND [conn110] CMD: drop db10.tmp.mrs.coll10_1437349178_26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.275+0000 m31202| 2015-07-19T23:39:39.275+0000 I COMMAND [repl writer worker 9] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_87 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.275+0000 m31201| 2015-07-19T23:39:39.275+0000 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll10_87 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.276+0000 m31200| 2015-07-19T23:39:39.275+0000 I COMMAND [conn87] CMD: drop db10.tmp.mrs.coll10_1437349178_26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.277+0000 m31101| 2015-07-19T23:39:39.277+0000 I COMMAND [repl writer worker 0] CMD: drop db10.tmp.mrs.coll10_1437349178_26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.277+0000 m31102| 2015-07-19T23:39:39.277+0000 I COMMAND [repl writer worker 4] CMD: drop db10.tmp.mrs.coll10_1437349178_26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.279+0000 m31201| 2015-07-19T23:39:39.279+0000 I COMMAND [repl writer worker 13] CMD: drop db10.tmp.mrs.coll10_1437349178_26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.281+0000 m31202| 2015-07-19T23:39:39.281+0000 I COMMAND [repl writer worker 15] CMD: drop db10.tmp.mrs.coll10_1437349178_26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.283+0000 m30998| 2015-07-19T23:39:39.282+0000 I NETWORK [conn59] end connection 10.139.123.131:35999 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.288+0000 m31200| 2015-07-19T23:39:39.288+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_88 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.289+0000 m31200| 2015-07-19T23:39:39.289+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_88 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.289+0000 m31200| 2015-07-19T23:39:39.289+0000 I COMMAND [conn117] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_88 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.290+0000 m31100| 2015-07-19T23:39:39.290+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349178_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.290+0000 m31202| 2015-07-19T23:39:39.290+0000 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_88 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.291+0000 m31201| 2015-07-19T23:39:39.290+0000 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll10_88 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.295+0000 m31200| 2015-07-19T23:39:39.295+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349178_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.297+0000 m31101| 2015-07-19T23:39:39.297+0000 I COMMAND [repl writer worker 10] CMD: drop db10.tmp.mrs.coll10_1437349178_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.297+0000 m31102| 2015-07-19T23:39:39.297+0000 I COMMAND [repl writer worker 7] CMD: drop db10.tmp.mrs.coll10_1437349178_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.300+0000 m31201| 2015-07-19T23:39:39.300+0000 I COMMAND [repl writer worker 7] CMD: drop db10.tmp.mrs.coll10_1437349178_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.300+0000 m31202| 2015-07-19T23:39:39.300+0000 I COMMAND [repl writer worker 2] CMD: drop db10.tmp.mrs.coll10_1437349178_16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.302+0000 m30999| 2015-07-19T23:39:39.302+0000 I NETWORK [conn61] end connection 10.139.123.131:57361 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.325+0000 m31100| 2015-07-19T23:39:39.325+0000 I COMMAND [conn23] CMD: drop db10.tmp.mrs.coll10_1437349179_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.329+0000 m31100| 2015-07-19T23:39:39.329+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_44 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.334+0000 m31100| 2015-07-19T23:39:39.333+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_44 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.335+0000 m31100| 2015-07-19T23:39:39.335+0000 I COMMAND [conn23] CMD: drop db10.tmp.mr.coll10_44 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.340+0000 m31100| 2015-07-19T23:39:39.340+0000 I COMMAND [conn23] command db10.tmp.mrs.coll10_1437349179_17 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.340+0000 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.340+0000 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.340+0000 m31100| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.340+0000 m31100| values...., out: "tmp.mrs.coll10_1437349179_17", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:230 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 2676 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 3, R: 4, W: 5 }, timeAcquiringMicros: { w: 553, R: 1348, W: 1947 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 169ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.352+0000 m31200| 2015-07-19T23:39:39.352+0000 I COMMAND [conn123] CMD: drop db10.tmp.mrs.coll10_1437349179_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.353+0000 m31200| 2015-07-19T23:39:39.353+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_86 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.355+0000 m31200| 2015-07-19T23:39:39.355+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_86 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.357+0000 m31200| 2015-07-19T23:39:39.356+0000 I COMMAND [conn123] CMD: drop db10.tmp.mr.coll10_86 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.357+0000 m31200| 2015-07-19T23:39:39.357+0000 I COMMAND [conn123] command db10.tmp.mrs.coll10_1437349179_17 command: mapReduce { mapreduce: "coll10", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.357+0000 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.357+0000 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.357+0000 m31200| [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.358+0000 m31200| values...., out: "tmp.mrs.coll10_1437349179_17", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:230 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 10246 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { R: 2, W: 5 }, timeAcquiringMicros: { R: 4349, W: 24063 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 186ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.358+0000 m31200| 2015-07-19T23:39:39.358+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_89 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.392+0000 m31200| 2015-07-19T23:39:39.392+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_89 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.393+0000 m31200| 2015-07-19T23:39:39.393+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_89 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.393+0000 m31200| 2015-07-19T23:39:39.393+0000 I COMMAND [conn123] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_89 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.393+0000 m31100| 2015-07-19T23:39:39.393+0000 I COMMAND [conn43] CMD: drop db10.tmp.mrs.coll10_1437349179_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.394+0000 m31200| 2015-07-19T23:39:39.394+0000 I COMMAND [conn133] CMD: drop db10.tmp.mrs.coll10_1437349179_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.395+0000 m31201| 2015-07-19T23:39:39.395+0000 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_89 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.396+0000 m31101| 2015-07-19T23:39:39.396+0000 I COMMAND [repl writer worker 0] CMD: drop db10.tmp.mrs.coll10_1437349179_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.398+0000 m31202| 2015-07-19T23:39:39.397+0000 I COMMAND [repl writer worker 0] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll10_89 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.398+0000 m31201| 2015-07-19T23:39:39.398+0000 I COMMAND [repl writer worker 9] CMD: drop db10.tmp.mrs.coll10_1437349179_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.398+0000 m31102| 2015-07-19T23:39:39.396+0000 I COMMAND [repl writer worker 12] CMD: drop db10.tmp.mrs.coll10_1437349179_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.399+0000 m30999| 2015-07-19T23:39:39.399+0000 I NETWORK [conn60] end connection 10.139.123.131:57357 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.399+0000 m31202| 2015-07-19T23:39:39.399+0000 I COMMAND [repl writer worker 4] CMD: drop db10.tmp.mrs.coll10_1437349179_17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.419+0000 m30999| 2015-07-19T23:39:39.419+0000 I COMMAND [conn1] DROP DATABASE: map_reduce_merge_nonatomic0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.420+0000 m30999| 2015-07-19T23:39:39.419+0000 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.420+0000 m30999| 2015-07-19T23:39:39.419+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:39.419+0000-55ac353bd2c1f750d15483bb", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349179419), what: "dropDatabase.start", ns: "map_reduce_merge_nonatomic0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.521+0000 m30999| 2015-07-19T23:39:39.521+0000 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic0 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.521+0000 m31200| 2015-07-19T23:39:39.521+0000 I COMMAND [conn111] dropDatabase map_reduce_merge_nonatomic0 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.522+0000 m31200| 2015-07-19T23:39:39.522+0000 I COMMAND [conn111] dropDatabase map_reduce_merge_nonatomic0 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.522+0000 m31200| 2015-07-19T23:39:39.522+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:132 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 54 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 124ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.523+0000 m30999| 2015-07-19T23:39:39.522+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:39.522+0000-55ac353bd2c1f750d15483bc", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349179522), what: "dropDatabase", ns: "map_reduce_merge_nonatomic0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.523+0000 m31200| 2015-07-19T23:39:39.522+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:132 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 65 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 125ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.523+0000 m31202| 2015-07-19T23:39:39.523+0000 I COMMAND [repl writer worker 6] dropDatabase map_reduce_merge_nonatomic0 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.523+0000 m31201| 2015-07-19T23:39:39.522+0000 I COMMAND [repl writer worker 2] dropDatabase map_reduce_merge_nonatomic0 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.524+0000 m31201| 2015-07-19T23:39:39.523+0000 I COMMAND [repl writer worker 2] dropDatabase map_reduce_merge_nonatomic0 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.524+0000 m31202| 2015-07-19T23:39:39.523+0000 I COMMAND [repl writer worker 6] dropDatabase map_reduce_merge_nonatomic0 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.574+0000 m30999| 2015-07-19T23:39:39.574+0000 I COMMAND [conn1] DROP DATABASE: map_reduce_merge_nonatomic1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.574+0000 m30999| 2015-07-19T23:39:39.574+0000 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.574+0000 m30999| 2015-07-19T23:39:39.574+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:39.574+0000-55ac353bd2c1f750d15483bd", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349179574), what: "dropDatabase.start", ns: "map_reduce_merge_nonatomic1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.676+0000 m30999| 2015-07-19T23:39:39.675+0000 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic1 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.676+0000 m31200| 2015-07-19T23:39:39.675+0000 I COMMAND [conn111] dropDatabase map_reduce_merge_nonatomic1 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.677+0000 m31200| 2015-07-19T23:39:39.676+0000 I COMMAND [conn111] dropDatabase map_reduce_merge_nonatomic1 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.677+0000 m31200| 2015-07-19T23:39:39.676+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:132 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 41 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 151ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.678+0000 m30999| 2015-07-19T23:39:39.676+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:39.676+0000-55ac353bd2c1f750d15483be", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349179676), what: "dropDatabase", ns: "map_reduce_merge_nonatomic1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.678+0000 m31200| 2015-07-19T23:39:39.677+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:132 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 259 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 152ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.678+0000 m31201| 2015-07-19T23:39:39.677+0000 I COMMAND [repl writer worker 10] dropDatabase map_reduce_merge_nonatomic1 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.678+0000 m31202| 2015-07-19T23:39:39.677+0000 I COMMAND [repl writer worker 15] dropDatabase map_reduce_merge_nonatomic1 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.678+0000 m31201| 2015-07-19T23:39:39.677+0000 I COMMAND [repl writer worker 10] dropDatabase map_reduce_merge_nonatomic1 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.679+0000 m31202| 2015-07-19T23:39:39.678+0000 I COMMAND [repl writer worker 15] dropDatabase map_reduce_merge_nonatomic1 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.728+0000 m30999| 2015-07-19T23:39:39.728+0000 I COMMAND [conn1] DROP DATABASE: map_reduce_merge_nonatomic2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.729+0000 m30999| 2015-07-19T23:39:39.728+0000 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.729+0000 m30999| 2015-07-19T23:39:39.728+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:39.728+0000-55ac353bd2c1f750d15483bf", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349179728), what: "dropDatabase.start", ns: "map_reduce_merge_nonatomic2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.830+0000 m30999| 2015-07-19T23:39:39.830+0000 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic2 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.830+0000 m31200| 2015-07-19T23:39:39.830+0000 I COMMAND [conn111] dropDatabase map_reduce_merge_nonatomic2 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.831+0000 m31200| 2015-07-19T23:39:39.831+0000 I COMMAND [conn111] dropDatabase map_reduce_merge_nonatomic2 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.831+0000 m30999| 2015-07-19T23:39:39.831+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:39.831+0000-55ac353bd2c1f750d15483c0", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349179831), what: "dropDatabase", ns: "map_reduce_merge_nonatomic2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.832+0000 m31200| 2015-07-19T23:39:39.831+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:132 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 152ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.832+0000 m31200| 2015-07-19T23:39:39.831+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:132 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 151ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.832+0000 m31201| 2015-07-19T23:39:39.831+0000 I COMMAND [repl writer worker 8] dropDatabase map_reduce_merge_nonatomic2 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.832+0000 m31201| 2015-07-19T23:39:39.832+0000 I COMMAND [repl writer worker 8] dropDatabase map_reduce_merge_nonatomic2 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.833+0000 m31202| 2015-07-19T23:39:39.832+0000 I COMMAND [repl writer worker 9] dropDatabase map_reduce_merge_nonatomic2 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.833+0000 m31202| 2015-07-19T23:39:39.833+0000 I COMMAND [repl writer worker 9] dropDatabase map_reduce_merge_nonatomic2 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.885+0000 m30999| 2015-07-19T23:39:39.885+0000 I COMMAND [conn1] DROP DATABASE: map_reduce_merge_nonatomic3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.885+0000 m30999| 2015-07-19T23:39:39.885+0000 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.885+0000 m30999| 2015-07-19T23:39:39.885+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:39.885+0000-55ac353bd2c1f750d15483c1", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349179885), what: "dropDatabase.start", ns: "map_reduce_merge_nonatomic3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.987+0000 m30999| 2015-07-19T23:39:39.986+0000 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic3 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.987+0000 m31200| 2015-07-19T23:39:39.986+0000 I COMMAND [conn111] dropDatabase map_reduce_merge_nonatomic3 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.987+0000 m31200| 2015-07-19T23:39:39.987+0000 I COMMAND [conn111] dropDatabase map_reduce_merge_nonatomic3 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.989+0000 m31200| 2015-07-19T23:39:39.987+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:132 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 154ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.989+0000 m31200| 2015-07-19T23:39:39.987+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:132 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 154ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.989+0000 m31201| 2015-07-19T23:39:39.988+0000 I COMMAND [repl writer worker 1] dropDatabase map_reduce_merge_nonatomic3 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.990+0000 m31202| 2015-07-19T23:39:39.988+0000 I COMMAND [repl writer worker 12] dropDatabase map_reduce_merge_nonatomic3 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.990+0000 m31202| 2015-07-19T23:39:39.988+0000 I COMMAND [repl writer worker 12] dropDatabase map_reduce_merge_nonatomic3 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.990+0000 m31201| 2015-07-19T23:39:39.988+0000 I COMMAND [repl writer worker 1] dropDatabase map_reduce_merge_nonatomic3 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:39.990+0000 m30999| 2015-07-19T23:39:39.989+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:39.989+0000-55ac353bd2c1f750d15483c2", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349179989), what: "dropDatabase", ns: "map_reduce_merge_nonatomic3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.040+0000 m30999| 2015-07-19T23:39:40.040+0000 I COMMAND [conn1] DROP DATABASE: map_reduce_merge_nonatomic4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.041+0000 m30999| 2015-07-19T23:39:40.040+0000 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.041+0000 m30999| 2015-07-19T23:39:40.040+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:40.040+0000-55ac353cd2c1f750d15483c3", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349180040), what: "dropDatabase.start", ns: "map_reduce_merge_nonatomic4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.142+0000 m30999| 2015-07-19T23:39:40.142+0000 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic4 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.142+0000 m31200| 2015-07-19T23:39:40.142+0000 I COMMAND [conn111] dropDatabase map_reduce_merge_nonatomic4 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.143+0000 m31200| 2015-07-19T23:39:40.143+0000 I COMMAND [conn111] dropDatabase map_reduce_merge_nonatomic4 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.144+0000 m30999| 2015-07-19T23:39:40.143+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:40.143+0000-55ac353cd2c1f750d15483c4", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349180143), what: "dropDatabase", ns: "map_reduce_merge_nonatomic4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.144+0000 m31200| 2015-07-19T23:39:40.143+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:132 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 52 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 153ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.145+0000 m31200| 2015-07-19T23:39:40.143+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:132 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 166 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 153ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.145+0000 m31202| 2015-07-19T23:39:40.143+0000 I COMMAND [repl writer worker 7] dropDatabase map_reduce_merge_nonatomic4 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.145+0000 m31201| 2015-07-19T23:39:40.143+0000 I COMMAND [repl writer worker 11] dropDatabase map_reduce_merge_nonatomic4 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.145+0000 m31202| 2015-07-19T23:39:40.144+0000 I COMMAND [repl writer worker 7] dropDatabase map_reduce_merge_nonatomic4 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.146+0000 m31201| 2015-07-19T23:39:40.144+0000 I COMMAND [repl writer worker 11] dropDatabase map_reduce_merge_nonatomic4 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.194+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.194+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.194+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.194+0000 jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js: Workload completed in 5008 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.194+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.194+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.195+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.195+0000 m30999| 2015-07-19T23:39:40.194+0000 I COMMAND [conn1] DROP: db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.195+0000 m30999| 2015-07-19T23:39:40.194+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:40.194+0000-55ac353cd2c1f750d15483c5", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349180194), what: "dropCollection.start", ns: "db10.coll10", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.246+0000 m30999| 2015-07-19T23:39:40.246+0000 I SHARDING [conn1] distributed lock 'db10.coll10/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac353cd2c1f750d15483c6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.246+0000 m31100| 2015-07-19T23:39:40.246+0000 I COMMAND [conn127] CMD: drop db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.247+0000 m31100| 2015-07-19T23:39:40.247+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 848ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.247+0000 m31100| 2015-07-19T23:39:40.247+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 848ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.248+0000 m31200| 2015-07-19T23:39:40.248+0000 I COMMAND [conn14] CMD: drop db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.248+0000 m31200| 2015-07-19T23:39:40.248+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 102ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.249+0000 m31200| 2015-07-19T23:39:40.248+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.252+0000 m31102| 2015-07-19T23:39:40.249+0000 I COMMAND [repl writer worker 6] CMD: drop db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.252+0000 m31101| 2015-07-19T23:39:40.249+0000 I COMMAND [repl writer worker 10] CMD: drop db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.253+0000 m31201| 2015-07-19T23:39:40.250+0000 I COMMAND [repl writer worker 5] CMD: drop db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.253+0000 m31202| 2015-07-19T23:39:40.251+0000 I COMMAND [repl writer worker 14] CMD: drop db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.301+0000 m31100| 2015-07-19T23:39:40.300+0000 I SHARDING [conn127] remotely refreshing metadata for db10.coll10 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||55ac3535d2c1f750d15483b7, current metadata version is 2|3||55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.301+0000 m31100| 2015-07-19T23:39:40.301+0000 W SHARDING [conn127] no chunks found when reloading db10.coll10, previous version was 0|0||55ac3535d2c1f750d15483b7, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.301+0000 m31100| 2015-07-19T23:39:40.301+0000 I SHARDING [conn127] dropping metadata for db10.coll10 at shard version 2|3||55ac3535d2c1f750d15483b7, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.302+0000 m31200| 2015-07-19T23:39:40.301+0000 I SHARDING [conn14] remotely refreshing metadata for db10.coll10 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||55ac3535d2c1f750d15483b7, current metadata version is 2|5||55ac3535d2c1f750d15483b7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.302+0000 m31200| 2015-07-19T23:39:40.302+0000 W SHARDING [conn14] no chunks found when reloading db10.coll10, previous version was 0|0||55ac3535d2c1f750d15483b7, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.302+0000 m31200| 2015-07-19T23:39:40.302+0000 I SHARDING [conn14] dropping metadata for db10.coll10 at shard version 2|5||55ac3535d2c1f750d15483b7, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.302+0000 m30999| 2015-07-19T23:39:40.302+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:40.302+0000-55ac353cd2c1f750d15483c7", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349180302), what: "dropCollection", ns: "db10.coll10", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.354+0000 m30999| 2015-07-19T23:39:40.353+0000 I SHARDING [conn1] distributed lock 'db10.coll10/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.405+0000 m30999| 2015-07-19T23:39:40.405+0000 I COMMAND [conn1] DROP DATABASE: db10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.405+0000 m30999| 2015-07-19T23:39:40.405+0000 I SHARDING [conn1] DBConfig::dropDatabase: db10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.406+0000 m30999| 2015-07-19T23:39:40.405+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:40.405+0000-55ac353cd2c1f750d15483c8", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349180405), what: "dropDatabase.start", ns: "db10", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.495+0000 m30999| 2015-07-19T23:39:40.495+0000 I SHARDING [conn1] DBConfig::dropDatabase: db10 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.496+0000 m31200| 2015-07-19T23:39:40.495+0000 I COMMAND [conn111] dropDatabase db10 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.496+0000 m31200| 2015-07-19T23:39:40.495+0000 I COMMAND [conn111] dropDatabase db10 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.497+0000 m30999| 2015-07-19T23:39:40.495+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:40.495+0000-55ac353cd2c1f750d15483c9", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349180495), what: "dropDatabase", ns: "db10", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.497+0000 m31200| 2015-07-19T23:39:40.495+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:109 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 35 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 242ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.497+0000 m31200| 2015-07-19T23:39:40.495+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:109 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 242ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.497+0000 m31201| 2015-07-19T23:39:40.496+0000 I COMMAND [repl writer worker 14] dropDatabase db10 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.497+0000 m31201| 2015-07-19T23:39:40.496+0000 I COMMAND [repl writer worker 14] dropDatabase db10 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.498+0000 m31202| 2015-07-19T23:39:40.496+0000 I COMMAND [repl writer worker 13] dropDatabase db10 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.498+0000 m31202| 2015-07-19T23:39:40.496+0000 I COMMAND [repl writer worker 13] dropDatabase db10 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.551+0000 m31100| 2015-07-19T23:39:40.550+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 299ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.553+0000 m31100| 2015-07-19T23:39:40.550+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 298ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.557+0000 m31100| 2015-07-19T23:39:40.556+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.559+0000 m31102| 2015-07-19T23:39:40.559+0000 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.561+0000 m31101| 2015-07-19T23:39:40.559+0000 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.570+0000 m31200| 2015-07-19T23:39:40.570+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.571+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.571+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.571+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.572+0000 jstests/concurrency/fsm_workloads/remove_where.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.572+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.572+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.572+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.573+0000 m31201| 2015-07-19T23:39:40.573+0000 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.573+0000 m31202| 2015-07-19T23:39:40.573+0000 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.575+0000 m30999| 2015-07-19T23:39:40.574+0000 I SHARDING [conn1] distributed lock 'db11/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac353cd2c1f750d15483ca [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.576+0000 m30999| 2015-07-19T23:39:40.576+0000 I SHARDING [conn1] Placing [db11] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.576+0000 m30999| 2015-07-19T23:39:40.576+0000 I SHARDING [conn1] Enabling sharding for database [db11] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.628+0000 m30999| 2015-07-19T23:39:40.627+0000 I SHARDING [conn1] distributed lock 'db11/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.635+0000 m31200| 2015-07-19T23:39:40.635+0000 I INDEX [conn28] build index on: db11.coll11 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db11.coll11" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.635+0000 m31200| 2015-07-19T23:39:40.635+0000 I INDEX [conn28] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.637+0000 m31200| 2015-07-19T23:39:40.637+0000 I INDEX [conn28] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.638+0000 m30999| 2015-07-19T23:39:40.637+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db11.coll11", key: { tid: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.639+0000 m30999| 2015-07-19T23:39:40.639+0000 I SHARDING [conn1] distributed lock 'db11.coll11/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac353cd2c1f750d15483cb [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.639+0000 m30999| 2015-07-19T23:39:40.639+0000 I SHARDING [conn1] enable sharding on: db11.coll11 with shard key: { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.639+0000 m30999| 2015-07-19T23:39:40.639+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:40.639+0000-55ac353cd2c1f750d15483cc", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349180639), what: "shardCollection.start", ns: "db11.coll11", details: { shardKey: { tid: 1.0 }, collection: "db11.coll11", primary: "test-rs1:test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.642+0000 m31202| 2015-07-19T23:39:40.642+0000 I INDEX [repl writer worker 8] build index on: db11.coll11 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db11.coll11" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.642+0000 m31202| 2015-07-19T23:39:40.642+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.643+0000 m31202| 2015-07-19T23:39:40.643+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.647+0000 m31201| 2015-07-19T23:39:40.647+0000 I INDEX [repl writer worker 7] build index on: db11.coll11 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db11.coll11" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.647+0000 m31201| 2015-07-19T23:39:40.647+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.648+0000 m31201| 2015-07-19T23:39:40.648+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.690+0000 m30999| 2015-07-19T23:39:40.690+0000 I SHARDING [conn1] going to create 1 chunk(s) for: db11.coll11 using new epoch 55ac353cd2c1f750d15483cd [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.741+0000 m30999| 2015-07-19T23:39:40.741+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db11.coll11: 0ms sequenceNumber: 55 version: 1|0||55ac353cd2c1f750d15483cd based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.793+0000 m30999| 2015-07-19T23:39:40.793+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db11.coll11: 0ms sequenceNumber: 56 version: 1|0||55ac353cd2c1f750d15483cd based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.793+0000 m31200| 2015-07-19T23:39:40.793+0000 I SHARDING [conn123] remotely refreshing metadata for db11.coll11 with requested shard version 1|0||55ac353cd2c1f750d15483cd, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.794+0000 m31200| 2015-07-19T23:39:40.794+0000 I SHARDING [conn123] collection db11.coll11 was previously unsharded, new metadata loaded with shard version 1|0||55ac353cd2c1f750d15483cd [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.794+0000 m31200| 2015-07-19T23:39:40.794+0000 I SHARDING [conn123] collection version was loaded at version 1|0||55ac353cd2c1f750d15483cd, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.794+0000 m30999| 2015-07-19T23:39:40.794+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:40.794+0000-55ac353cd2c1f750d15483ce", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349180794), what: "shardCollection", ns: "db11.coll11", details: { version: "1|0||55ac353cd2c1f750d15483cd" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.845+0000 m30999| 2015-07-19T23:39:40.845+0000 I SHARDING [conn1] distributed lock 'db11.coll11/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.845+0000 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.981+0000 m30998| 2015-07-19T23:39:40.981+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36022 #62 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:40.992+0000 m30999| 2015-07-19T23:39:40.992+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57382 #62 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.000+0000 m30999| 2015-07-19T23:39:40.999+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57383 #63 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.007+0000 m30998| 2015-07-19T23:39:41.007+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36025 #63 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.010+0000 m30999| 2015-07-19T23:39:41.010+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57385 #64 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.013+0000 m30998| 2015-07-19T23:39:41.013+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36027 #64 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.015+0000 m30998| 2015-07-19T23:39:41.014+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36028 #65 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.030+0000 m30999| 2015-07-19T23:39:41.030+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57388 #65 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.031+0000 m30999| 2015-07-19T23:39:41.031+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57389 #66 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.506+0000 m30998| 2015-07-19T23:39:41.037+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36031 #66 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.506+0000 setting random seed: 9852260458283 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.506+0000 setting random seed: 6079952241852 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.506+0000 setting random seed: 7608031374402 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.507+0000 setting random seed: 2253127330914 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.507+0000 setting random seed: 2472638152 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.507+0000 setting random seed: 1837472398765 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.507+0000 setting random seed: 7510820743627 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.507+0000 setting random seed: 2812188058160 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.507+0000 m30998| 2015-07-19T23:39:41.056+0000 I SHARDING [conn66] ChunkManager: time to load chunks for db11.coll11: 0ms sequenceNumber: 14 version: 1|0||55ac353cd2c1f750d15483cd based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.508+0000 m31200| 2015-07-19T23:39:41.057+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:134 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 415ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.508+0000 m31200| 2015-07-19T23:39:41.057+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:134 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 415ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.508+0000 setting random seed: 5934528815560 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.508+0000 setting random seed: 2140755811706 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.508+0000 m31200| 2015-07-19T23:39:41.097+0000 I SHARDING [conn136] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.509+0000 m31200| 2015-07-19T23:39:41.097+0000 W SHARDING [conn136] possible low cardinality key detected in db11.coll11 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.509+0000 m31200| 2015-07-19T23:39:41.097+0000 W SHARDING [conn136] possible low cardinality key detected in db11.coll11 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.509+0000 m31200| 2015-07-19T23:39:41.098+0000 I SHARDING [conn87] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac353cd2c1f750d15483cd') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.509+0000 m31200| 2015-07-19T23:39:41.099+0000 I SHARDING [conn87] distributed lock 'db11.coll11/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac353dd9a63f6196b17280 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.509+0000 m31200| 2015-07-19T23:39:41.099+0000 I SHARDING [conn87] remotely refreshing metadata for db11.coll11 based on current shard version 1|0||55ac353cd2c1f750d15483cd, current metadata version is 1|0||55ac353cd2c1f750d15483cd [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.510+0000 m31200| 2015-07-19T23:39:41.114+0000 I SHARDING [conn87] metadata of collection db11.coll11 already up to date (shard version : 1|0||55ac353cd2c1f750d15483cd, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.510+0000 m31200| 2015-07-19T23:39:41.114+0000 I SHARDING [conn87] splitChunk accepted at version 1|0||55ac353cd2c1f750d15483cd [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.510+0000 m31200| 2015-07-19T23:39:41.129+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39793 #142 (70 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.510+0000 m31200| 2015-07-19T23:39:41.129+0000 I SHARDING [conn87] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:41.129+0000-55ac353dd9a63f6196b17281", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39658", time: new Date(1437349181129), what: "multi-split", ns: "db11.coll11", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 1, of: 3, chunk: { min: { tid: MinKey }, max: { tid: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('55ac353cd2c1f750d15483cd') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.510+0000 m31200| 2015-07-19T23:39:41.150+0000 I SHARDING [conn136] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.511+0000 m31200| 2015-07-19T23:39:41.152+0000 I SHARDING [conn132] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac353cd2c1f750d15483cd') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.511+0000 m31200| 2015-07-19T23:39:41.153+0000 W SHARDING [conn132] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.511+0000 m30998| 2015-07-19T23:39:41.153+0000 W SHARDING [conn65] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac353cd2c1f750d15483cd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.512+0000 m31200| 2015-07-19T23:39:41.171+0000 I SHARDING [conn136] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.512+0000 m31200| 2015-07-19T23:39:41.172+0000 W SHARDING [conn136] possible low cardinality key detected in db11.coll11 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.512+0000 m31200| 2015-07-19T23:39:41.172+0000 W SHARDING [conn136] possible low cardinality key detected in db11.coll11 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.512+0000 m31200| 2015-07-19T23:39:41.172+0000 W SHARDING [conn136] possible low cardinality key detected in db11.coll11 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.512+0000 m31200| 2015-07-19T23:39:41.172+0000 W SHARDING [conn136] possible low cardinality key detected in db11.coll11 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.512+0000 m31200| 2015-07-19T23:39:41.172+0000 W SHARDING [conn136] possible low cardinality key detected in db11.coll11 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.513+0000 m31200| 2015-07-19T23:39:41.172+0000 W SHARDING [conn136] possible low cardinality key detected in db11.coll11 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.513+0000 m31200| 2015-07-19T23:39:41.172+0000 W SHARDING [conn136] possible low cardinality key detected in db11.coll11 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.513+0000 m31200| 2015-07-19T23:39:41.172+0000 W SHARDING [conn136] possible low cardinality key detected in db11.coll11 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.513+0000 m31200| 2015-07-19T23:39:41.173+0000 I SHARDING [conn132] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac353cd2c1f750d15483cd') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.513+0000 m31200| 2015-07-19T23:39:41.174+0000 W SHARDING [conn132] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.514+0000 m30998| 2015-07-19T23:39:41.174+0000 W SHARDING [conn62] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac353cd2c1f750d15483cd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.515+0000 m31200| 2015-07-19T23:39:41.174+0000 I SHARDING [conn136] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.515+0000 m31200| 2015-07-19T23:39:41.175+0000 I SHARDING [conn132] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac353cd2c1f750d15483cd') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.515+0000 m31200| 2015-07-19T23:39:41.177+0000 W SHARDING [conn132] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.516+0000 m30998| 2015-07-19T23:39:41.177+0000 W SHARDING [conn64] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac353cd2c1f750d15483cd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.516+0000 m31200| 2015-07-19T23:39:41.180+0000 I SHARDING [conn87] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:41.180+0000-55ac353dd9a63f6196b17282", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39658", time: new Date(1437349181180), what: "multi-split", ns: "db11.coll11", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 2, of: 3, chunk: { min: { tid: 0.0 }, max: { tid: 9.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('55ac353cd2c1f750d15483cd') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.516+0000 m30998| 2015-07-19T23:39:41.180+0000 I SHARDING [conn64] ChunkManager: time to load chunks for db11.coll11: 0ms sequenceNumber: 15 version: 1|3||55ac353cd2c1f750d15483cd based on: 1|0||55ac353cd2c1f750d15483cd [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.516+0000 m31200| 2015-07-19T23:39:41.195+0000 I SHARDING [conn142] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.517+0000 m31200| 2015-07-19T23:39:41.197+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.517+0000 m31200| 2015-07-19T23:39:41.197+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.521+0000 m31200| 2015-07-19T23:39:41.197+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.521+0000 m31200| 2015-07-19T23:39:41.197+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.521+0000 m31200| 2015-07-19T23:39:41.197+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.524+0000 m31200| 2015-07-19T23:39:41.197+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.524+0000 m31200| 2015-07-19T23:39:41.197+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.525+0000 m31200| 2015-07-19T23:39:41.197+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.525+0000 m31200| 2015-07-19T23:39:41.198+0000 I SHARDING [conn133] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac353cd2c1f750d15483cd') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.525+0000 m31200| 2015-07-19T23:39:41.199+0000 W SHARDING [conn133] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.526+0000 m30999| 2015-07-19T23:39:41.199+0000 W SHARDING [conn64] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac353cd2c1f750d15483cd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.526+0000 m31200| 2015-07-19T23:39:41.202+0000 I SHARDING [conn142] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.526+0000 m31200| 2015-07-19T23:39:41.204+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.527+0000 m31200| 2015-07-19T23:39:41.204+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.527+0000 m31200| 2015-07-19T23:39:41.204+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.527+0000 m31200| 2015-07-19T23:39:41.204+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.527+0000 m31200| 2015-07-19T23:39:41.204+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.527+0000 m31200| 2015-07-19T23:39:41.205+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.528+0000 m31200| 2015-07-19T23:39:41.205+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.528+0000 m31200| 2015-07-19T23:39:41.205+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.528+0000 m31200| 2015-07-19T23:39:41.205+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.528+0000 m31200| 2015-07-19T23:39:41.207+0000 I SHARDING [conn142] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.528+0000 m31200| 2015-07-19T23:39:41.208+0000 W SHARDING [conn142] possible low cardinality key detected in db11.coll11 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.529+0000 m30999| 2015-07-19T23:39:41.210+0000 W SHARDING [conn65] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac353cd2c1f750d15483cd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.529+0000 m31200| 2015-07-19T23:39:41.208+0000 I SHARDING [conn133] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac353cd2c1f750d15483cd') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.529+0000 m31200| 2015-07-19T23:39:41.209+0000 I SHARDING [conn97] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac353cd2c1f750d15483cd') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.530+0000 m31200| 2015-07-19T23:39:41.210+0000 W SHARDING [conn133] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.530+0000 m29000| 2015-07-19T23:39:41.212+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55692 #39 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.530+0000 m31200| 2015-07-19T23:39:41.231+0000 I SHARDING [conn87] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:41.231+0000-55ac353dd9a63f6196b17283", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39658", time: new Date(1437349181231), what: "multi-split", ns: "db11.coll11", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 3, of: 3, chunk: { min: { tid: 9.0 }, max: { tid: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('55ac353cd2c1f750d15483cd') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.530+0000 m31200| 2015-07-19T23:39:41.245+0000 W SHARDING [conn97] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.531+0000 m30999| 2015-07-19T23:39:41.245+0000 W SHARDING [conn63] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac353cd2c1f750d15483cd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.531+0000 m30999| 2015-07-19T23:39:41.247+0000 I SHARDING [conn63] ChunkManager: time to load chunks for db11.coll11: 0ms sequenceNumber: 57 version: 1|3||55ac353cd2c1f750d15483cd based on: 1|0||55ac353cd2c1f750d15483cd [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.531+0000 m31200| 2015-07-19T23:39:41.282+0000 I SHARDING [conn87] distributed lock 'db11.coll11/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.531+0000 m31200| 2015-07-19T23:39:41.282+0000 I COMMAND [conn87] command db11.coll11 command: splitChunk { splitChunk: "db11.coll11", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac353cd2c1f750d15483cd') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 28678 } } } protocol:op_command 184ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.532+0000 m30998| 2015-07-19T23:39:41.282+0000 I SHARDING [conn66] autosplitted db11.coll11 shard: ns: db11.coll11, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { tid: MinKey }, max: { tid: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.532+0000 m31200| 2015-07-19T23:39:41.469+0000 I QUERY [conn22] query db11.coll11 query: { $where: "this.tid === 7" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1300 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:11 nreturned:100 reslen:4620 locks:{ Global: { acquireCount: { r: 28 } }, Database: { acquireCount: { r: 14 } }, Collection: { acquireCount: { r: 14 } } } 306ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.532+0000 m31200| 2015-07-19T23:39:41.488+0000 I QUERY [conn103] query db11.coll11 query: { $where: "this.tid === 3" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1300 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:100 reslen:4620 locks:{ Global: { acquireCount: { r: 26 } }, Database: { acquireCount: { r: 13 } }, Collection: { acquireCount: { r: 13 } } } 239ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.533+0000 m31200| 2015-07-19T23:39:41.526+0000 I WRITE [conn28] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" } ndeleted:9 keyUpdates:0 writeConflicts:0 numYields:11 locks:{ Global: { acquireCount: { r: 25, w: 21 } }, Database: { acquireCount: { r: 2, w: 21 } }, Collection: { acquireCount: { r: 2, w: 12 } }, Metadata: { acquireCount: { w: 9 } }, oplog: { acquireCount: { w: 9 } } } 393ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.533+0000 m31200| 2015-07-19T23:39:41.526+0000 I COMMAND [conn28] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 25, w: 21 } }, Database: { acquireCount: { r: 2, w: 21 } }, Collection: { acquireCount: { r: 2, w: 12 } }, Metadata: { acquireCount: { w: 9 } }, oplog: { acquireCount: { w: 9 } } } protocol:op_command 393ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.560+0000 m31200| 2015-07-19T23:39:41.560+0000 I QUERY [conn117] query db11.coll11 query: { $where: "this.tid === 0" } planSummary: COLLSCAN cursorid:941382945608 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:771 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 236ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.562+0000 m31100| 2015-07-19T23:39:41.561+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.562+0000 m31100| 2015-07-19T23:39:41.562+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.605+0000 m31200| 2015-07-19T23:39:41.605+0000 I QUERY [conn122] query db11.coll11 query: { $where: "this.tid === 1" } planSummary: COLLSCAN cursorid:942264456489 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:988 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 26 } }, Database: { acquireCount: { r: 13 } }, Collection: { acquireCount: { r: 13 } } } 270ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.639+0000 m31200| 2015-07-19T23:39:41.638+0000 I WRITE [conn115] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" } ndeleted:4 keyUpdates:0 writeConflicts:0 numYields:15 locks:{ Global: { acquireCount: { r: 24, w: 20 } }, Database: { acquireCount: { r: 2, w: 20 } }, Collection: { acquireCount: { r: 2, w: 16 } }, Metadata: { acquireCount: { w: 4 } }, oplog: { acquireCount: { w: 4 } } } 482ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.639+0000 m31200| 2015-07-19T23:39:41.639+0000 I COMMAND [conn115] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 24, w: 20 } }, Database: { acquireCount: { r: 2, w: 20 } }, Collection: { acquireCount: { r: 2, w: 16 } }, Metadata: { acquireCount: { w: 4 } }, oplog: { acquireCount: { w: 4 } } } protocol:op_command 482ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.656+0000 m31200| 2015-07-19T23:39:41.656+0000 I QUERY [conn123] query db11.coll11 query: { $where: "this.tid === 4" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1590 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:100 reslen:4620 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 357ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.726+0000 m31200| 2015-07-19T23:39:41.725+0000 I WRITE [conn116] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" } ndeleted:7 keyUpdates:0 writeConflicts:0 numYields:16 locks:{ Global: { acquireCount: { r: 28, w: 24 } }, Database: { acquireCount: { r: 2, w: 24 } }, Collection: { acquireCount: { r: 2, w: 17 } }, Metadata: { acquireCount: { w: 7 } }, oplog: { acquireCount: { w: 7 } } } 440ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.727+0000 m31200| 2015-07-19T23:39:41.726+0000 I COMMAND [conn116] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 28, w: 24 } }, Database: { acquireCount: { r: 2, w: 24 } }, Collection: { acquireCount: { r: 2, w: 17 } }, Metadata: { acquireCount: { w: 7 } }, oplog: { acquireCount: { w: 7 } } } protocol:op_command 441ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.788+0000 m31200| 2015-07-19T23:39:41.788+0000 I QUERY [conn103] query db11.coll11 query: { $where: "this.tid === 3" } planSummary: COLLSCAN cursorid:941626015283 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1248 keyUpdates:0 writeConflicts:0 numYields:12 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 26 } }, Database: { acquireCount: { r: 13 } }, Collection: { acquireCount: { r: 13 } } } 216ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.796+0000 m31200| 2015-07-19T23:39:41.795+0000 I WRITE [conn19] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 8" } ndeleted:17 keyUpdates:0 writeConflicts:0 numYields:19 locks:{ Global: { acquireCount: { r: 41, w: 37 } }, Database: { acquireCount: { r: 2, w: 37 } }, Collection: { acquireCount: { r: 2, w: 20 } }, Metadata: { acquireCount: { w: 17 } }, oplog: { acquireCount: { w: 17 } } } 498ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.796+0000 m31200| 2015-07-19T23:39:41.796+0000 I COMMAND [conn19] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 8" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 41, w: 37 } }, Database: { acquireCount: { r: 2, w: 37 } }, Collection: { acquireCount: { r: 2, w: 20 } }, Metadata: { acquireCount: { w: 17 } }, oplog: { acquireCount: { w: 17 } } } protocol:op_command 498ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.806+0000 m31200| 2015-07-19T23:39:41.806+0000 I WRITE [conn29] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 9" } ndeleted:6 keyUpdates:0 writeConflicts:0 numYields:19 locks:{ Global: { acquireCount: { r: 30, w: 26 } }, Database: { acquireCount: { r: 2, w: 26 } }, Collection: { acquireCount: { r: 2, w: 20 } }, Metadata: { acquireCount: { w: 6 } }, oplog: { acquireCount: { w: 6 } } } 520ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.807+0000 m31200| 2015-07-19T23:39:41.807+0000 I COMMAND [conn29] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 9" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 30, w: 26 } }, Database: { acquireCount: { r: 2, w: 26 } }, Collection: { acquireCount: { r: 2, w: 20 } }, Metadata: { acquireCount: { w: 6 } }, oplog: { acquireCount: { w: 6 } } } protocol:op_command 520ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.835+0000 m31200| 2015-07-19T23:39:41.835+0000 I QUERY [conn123] query db11.coll11 query: { $where: "this.tid === 0" } planSummary: COLLSCAN cursorid:941506802529 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:736 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 141ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.859+0000 m31200| 2015-07-19T23:39:41.859+0000 I WRITE [conn28] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" } ndeleted:26 keyUpdates:0 writeConflicts:0 numYields:17 locks:{ Global: { acquireCount: { r: 44, w: 44 } }, Database: { acquireCount: { w: 44 } }, Collection: { acquireCount: { w: 18 } }, Metadata: { acquireCount: { w: 26 } }, oplog: { acquireCount: { w: 26 } } } 262ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.859+0000 m31200| 2015-07-19T23:39:41.859+0000 I COMMAND [conn28] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 44, w: 44 } }, Database: { acquireCount: { w: 44 } }, Collection: { acquireCount: { w: 18 } }, Metadata: { acquireCount: { w: 26 } }, oplog: { acquireCount: { w: 26 } } } protocol:op_command 263ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.886+0000 m31200| 2015-07-19T23:39:41.886+0000 I WRITE [conn114] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" } ndeleted:10 keyUpdates:0 writeConflicts:0 numYields:21 locks:{ Global: { acquireCount: { r: 32, w: 32 } }, Database: { acquireCount: { w: 32 } }, Collection: { acquireCount: { w: 22 } }, Metadata: { acquireCount: { w: 10 } }, oplog: { acquireCount: { w: 10 } } } 413ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.887+0000 m31200| 2015-07-19T23:39:41.886+0000 I COMMAND [conn114] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 32, w: 32 } }, Database: { acquireCount: { w: 32 } }, Collection: { acquireCount: { w: 22 } }, Metadata: { acquireCount: { w: 10 } }, oplog: { acquireCount: { w: 10 } } } protocol:op_command 413ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.934+0000 m31200| 2015-07-19T23:39:41.933+0000 I QUERY [conn118] query db11.coll11 query: { $where: "this.tid === 8" } planSummary: COLLSCAN cursorid:941785657191 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1124 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 135ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.947+0000 m31200| 2015-07-19T23:39:41.947+0000 I QUERY [conn122] query db11.coll11 query: { $where: "this.tid === 5" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1881 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:18 nreturned:96 reslen:4436 locks:{ Global: { acquireCount: { r: 38 } }, Database: { acquireCount: { r: 19 } }, Collection: { acquireCount: { r: 19 } } } 306ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.976+0000 m31200| 2015-07-19T23:39:41.976+0000 I QUERY [conn30] getmore db11.coll11 query: { $where: "this.tid === 0" } cursorid:941506802529 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:199 reslen:9174 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 139ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.998+0000 m31200| 2015-07-19T23:39:41.998+0000 I WRITE [conn115] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 1" } ndeleted:15 keyUpdates:0 writeConflicts:0 numYields:17 locks:{ Global: { acquireCount: { r: 33, w: 33 } }, Database: { acquireCount: { w: 33 } }, Collection: { acquireCount: { w: 18 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } 298ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:41.999+0000 m31200| 2015-07-19T23:39:41.998+0000 I COMMAND [conn115] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 1" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 33, w: 33 } }, Database: { acquireCount: { w: 33 } }, Collection: { acquireCount: { w: 18 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 298ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.023+0000 m31200| 2015-07-19T23:39:42.023+0000 I QUERY [conn117] query db11.coll11 query: { $where: "this.tid === 6" } planSummary: COLLSCAN cursorid:942458307914 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1695 keyUpdates:0 writeConflicts:0 numYields:16 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 34 } }, Database: { acquireCount: { r: 17 } }, Collection: { acquireCount: { r: 17 } } } 251ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.046+0000 m31200| 2015-07-19T23:39:42.045+0000 I QUERY [conn22] query db11.coll11 query: { $where: "this.tid === 3" } planSummary: COLLSCAN cursorid:940969367427 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1210 keyUpdates:0 writeConflicts:0 numYields:11 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 24 } }, Database: { acquireCount: { r: 12 } }, Collection: { acquireCount: { r: 12 } } } 173ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.087+0000 m31200| 2015-07-19T23:39:42.087+0000 I QUERY [conn123] query db11.coll11 query: { $where: "this.tid === 2" } planSummary: COLLSCAN cursorid:941902796957 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1380 keyUpdates:0 writeConflicts:0 numYields:13 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 28 } }, Database: { acquireCount: { r: 14 } }, Collection: { acquireCount: { r: 14 } } } 226ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.131+0000 m31200| 2015-07-19T23:39:42.129+0000 I QUERY [conn30] getmore db11.coll11 query: { $where: "this.tid === 6" } cursorid:942458307914 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:92 reslen:4252 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 102ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.144+0000 m31200| 2015-07-19T23:39:42.144+0000 I QUERY [conn130] getmore db11.coll11 query: { $where: "this.tid === 8" } cursorid:941785657191 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:12 nreturned:82 reslen:3792 locks:{ Global: { acquireCount: { r: 26 } }, Database: { acquireCount: { r: 13 } }, Collection: { acquireCount: { r: 13 } } } 208ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.243+0000 m31200| 2015-07-19T23:39:42.242+0000 I WRITE [conn112] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 4" } ndeleted:20 keyUpdates:0 writeConflicts:0 numYields:29 locks:{ Global: { acquireCount: { r: 50, w: 50 } }, Database: { acquireCount: { w: 50 } }, Collection: { acquireCount: { w: 30 } }, Metadata: { acquireCount: { w: 20 } }, oplog: { acquireCount: { w: 20 } } } 552ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.245+0000 m31200| 2015-07-19T23:39:42.244+0000 I COMMAND [conn112] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 4" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 50, w: 50 } }, Database: { acquireCount: { w: 50 } }, Collection: { acquireCount: { w: 30 } }, Metadata: { acquireCount: { w: 20 } }, oplog: { acquireCount: { w: 20 } } } protocol:op_command 554ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.308+0000 m31200| 2015-07-19T23:39:42.308+0000 I QUERY [conn32] getmore db11.coll11 query: { $where: "this.tid === 3" } cursorid:940969367427 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:199 reslen:9174 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 259ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.314+0000 m31200| 2015-07-19T23:39:42.314+0000 I QUERY [conn33] getmore db11.coll11 query: { $where: "this.tid === 2" } cursorid:941902796957 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:64 reslen:2964 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 225ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.387+0000 m31200| 2015-07-19T23:39:42.387+0000 I QUERY [conn103] query db11.coll11 query: { $where: "this.tid === 9" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:3086 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:31 nreturned:94 reslen:4344 locks:{ Global: { acquireCount: { r: 64 } }, Database: { acquireCount: { r: 32 } }, Collection: { acquireCount: { r: 32 } } } 577ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.453+0000 m31200| 2015-07-19T23:39:42.452+0000 I QUERY [conn117] query db11.coll11 query: { $where: "this.tid === 6" } planSummary: COLLSCAN cursorid:941215983488 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1628 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 238ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.458+0000 m31200| 2015-07-19T23:39:42.457+0000 I WRITE [conn29] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" } ndeleted:7 keyUpdates:0 writeConflicts:0 numYields:29 locks:{ Global: { acquireCount: { r: 37, w: 37 } }, Database: { acquireCount: { w: 37 } }, Collection: { acquireCount: { w: 30 } }, Metadata: { acquireCount: { w: 7 } }, oplog: { acquireCount: { w: 7 } } } 504ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.458+0000 m31200| 2015-07-19T23:39:42.458+0000 I COMMAND [conn29] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 37, w: 37 } }, Database: { acquireCount: { w: 37 } }, Collection: { acquireCount: { w: 30 } }, Metadata: { acquireCount: { w: 7 } }, oplog: { acquireCount: { w: 7 } } } protocol:op_command 505ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.464+0000 m31200| 2015-07-19T23:39:42.464+0000 I QUERY [conn22] query db11.coll11 query: { $where: "this.tid === 3" } planSummary: COLLSCAN cursorid:941042414999 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1143 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 130ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.489+0000 m31200| 2015-07-19T23:39:42.489+0000 I QUERY [conn118] query db11.coll11 query: { $where: "this.tid === 2" } planSummary: COLLSCAN cursorid:940991990453 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1313 keyUpdates:0 writeConflicts:0 numYields:11 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 24 } }, Database: { acquireCount: { r: 12 } }, Collection: { acquireCount: { r: 12 } } } 156ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.525+0000 m31200| 2015-07-19T23:39:42.525+0000 I QUERY [conn123] query db11.coll11 query: { $where: "this.tid === 4" } planSummary: COLLSCAN cursorid:942328187679 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1511 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 244ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.535+0000 m31200| 2015-07-19T23:39:42.534+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39796 #143 (71 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.565+0000 m31100| 2015-07-19T23:39:42.564+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.566+0000 m31100| 2015-07-19T23:39:42.565+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.572+0000 m31200| 2015-07-19T23:39:42.572+0000 I WRITE [conn114] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" } ndeleted:27 keyUpdates:0 writeConflicts:0 numYields:37 locks:{ Global: { acquireCount: { r: 65, w: 65 } }, Database: { acquireCount: { w: 65 } }, Collection: { acquireCount: { w: 38 } }, Metadata: { acquireCount: { w: 27 } }, oplog: { acquireCount: { w: 27 } } } 621ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.573+0000 m31200| 2015-07-19T23:39:42.573+0000 I COMMAND [conn114] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 65, w: 65 } }, Database: { acquireCount: { w: 65 } }, Collection: { acquireCount: { w: 38 } }, Metadata: { acquireCount: { w: 27 } }, oplog: { acquireCount: { w: 27 } } } protocol:op_command 621ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.609+0000 m31200| 2015-07-19T23:39:42.609+0000 I QUERY [conn130] getmore db11.coll11 query: { $where: "this.tid === 0" } cursorid:942449373652 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:24 nreturned:599 reslen:27574 locks:{ Global: { acquireCount: { r: 50 } }, Database: { acquireCount: { r: 25 } }, Collection: { acquireCount: { r: 25 } } } 340ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.622+0000 m30999| 2015-07-19T23:39:42.622+0000 I NETWORK [conn63] end connection 10.139.123.131:57383 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.630+0000 m31200| 2015-07-19T23:39:42.630+0000 I WRITE [conn116] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 8" } ndeleted:26 keyUpdates:0 writeConflicts:0 numYields:32 locks:{ Global: { acquireCount: { r: 59, w: 59 } }, Database: { acquireCount: { w: 59 } }, Collection: { acquireCount: { w: 33 } }, Metadata: { acquireCount: { w: 26 } }, oplog: { acquireCount: { w: 26 } } } 482ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.631+0000 m31200| 2015-07-19T23:39:42.630+0000 I COMMAND [conn116] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 8" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 59, w: 59 } }, Database: { acquireCount: { w: 59 } }, Collection: { acquireCount: { w: 33 } }, Metadata: { acquireCount: { w: 26 } }, oplog: { acquireCount: { w: 26 } } } protocol:op_command 483ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.663+0000 m31200| 2015-07-19T23:39:42.662+0000 I QUERY [conn33] getmore db11.coll11 query: { $where: "this.tid === 6" } cursorid:941215983488 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:292 reslen:13452 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 207ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.678+0000 m31200| 2015-07-19T23:39:42.678+0000 I WRITE [conn115] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 1" } ndeleted:57 keyUpdates:0 writeConflicts:0 numYields:30 locks:{ Global: { acquireCount: { r: 88, w: 88 } }, Database: { acquireCount: { w: 88 } }, Collection: { acquireCount: { w: 31 } }, Metadata: { acquireCount: { w: 57 } }, oplog: { acquireCount: { w: 57 } } } 488ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.679+0000 m31200| 2015-07-19T23:39:42.678+0000 I COMMAND [conn115] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 1" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 88, w: 88 } }, Database: { acquireCount: { w: 88 } }, Collection: { acquireCount: { w: 31 } }, Metadata: { acquireCount: { w: 57 } }, oplog: { acquireCount: { w: 57 } } } protocol:op_command 488ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.736+0000 m31200| 2015-07-19T23:39:42.735+0000 I QUERY [conn30] getmore db11.coll11 query: { $where: "this.tid === 2" } cursorid:940991990453 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:164 reslen:7564 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 244ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.767+0000 m31200| 2015-07-19T23:39:42.767+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 146ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.768+0000 m31200| 2015-07-19T23:39:42.767+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 146ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.772+0000 m31200| 2015-07-19T23:39:42.771+0000 I QUERY [conn103] query db11.coll11 query: { $where: "this.tid === 7" } planSummary: COLLSCAN cursorid:942742753431 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1737 keyUpdates:0 writeConflicts:0 numYields:14 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 30 } }, Database: { acquireCount: { r: 15 } }, Collection: { acquireCount: { r: 15 } } } 197ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.773+0000 m31200| 2015-07-19T23:39:42.773+0000 I QUERY [conn143] getmore db11.coll11 query: { $where: "this.tid === 4" } cursorid:942328187679 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:14 nreturned:179 reslen:8254 locks:{ Global: { acquireCount: { r: 30 } }, Database: { acquireCount: { r: 15 } }, Collection: { acquireCount: { r: 15 } } } 236ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.779+0000 m31200| 2015-07-19T23:39:42.779+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39797 #144 (72 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.834+0000 m31200| 2015-07-19T23:39:42.834+0000 I QUERY [conn32] getmore db11.coll11 query: { $where: "this.tid === 3" } cursorid:941042414999 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:21 nreturned:299 reslen:13774 locks:{ Global: { acquireCount: { r: 44 } }, Database: { acquireCount: { r: 22 } }, Collection: { acquireCount: { r: 22 } } } 368ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.835+0000 m31200| 2015-07-19T23:39:42.835+0000 I WRITE [conn25] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 9" } ndeleted:6 keyUpdates:0 writeConflicts:0 numYields:29 locks:{ Global: { acquireCount: { r: 36, w: 36 } }, Database: { acquireCount: { w: 36 } }, Collection: { acquireCount: { w: 30 } }, Metadata: { acquireCount: { w: 6 } }, oplog: { acquireCount: { w: 6 } } } 444ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.836+0000 m31200| 2015-07-19T23:39:42.835+0000 I COMMAND [conn25] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 9" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 36, w: 36 } }, Database: { acquireCount: { w: 36 } }, Collection: { acquireCount: { w: 30 } }, Metadata: { acquireCount: { w: 6 } }, oplog: { acquireCount: { w: 6 } } } protocol:op_command 444ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.963+0000 m31200| 2015-07-19T23:39:42.963+0000 I QUERY [conn22] query db11.coll11 query: { $where: "this.tid === 5" } planSummary: COLLSCAN cursorid:940612278144 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:2975 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 397ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:42.995+0000 m31200| 2015-07-19T23:39:42.995+0000 I QUERY [conn123] query db11.coll11 query: { $where: "this.tid === 2" } planSummary: COLLSCAN cursorid:942405022691 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1300 keyUpdates:0 writeConflicts:0 numYields:14 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 30 } }, Database: { acquireCount: { r: 15 } }, Collection: { acquireCount: { r: 15 } } } 256ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.055+0000 m31200| 2015-07-19T23:39:43.055+0000 I QUERY [conn33] getmore db11.coll11 query: { $where: "this.tid === 8" } cursorid:942697268316 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:20 nreturned:56 reslen:2596 locks:{ Global: { acquireCount: { r: 42 } }, Database: { acquireCount: { r: 21 } }, Collection: { acquireCount: { r: 21 } } } 330ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.101+0000 m31200| 2015-07-19T23:39:43.100+0000 I QUERY [conn144] getmore db11.coll11 query: { $where: "this.tid === 7" } cursorid:942742753431 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:16 nreturned:62 reslen:2872 locks:{ Global: { acquireCount: { r: 34 } }, Database: { acquireCount: { r: 17 } }, Collection: { acquireCount: { r: 17 } } } 312ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.182+0000 m31200| 2015-07-19T23:39:43.181+0000 I QUERY [conn31] getmore db11.coll11 query: { $where: "this.tid === 1" } cursorid:942293783906 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:327 reslen:15062 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 417ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.252+0000 m31200| 2015-07-19T23:39:43.251+0000 I QUERY [conn123] query db11.coll11 query: { $where: "this.tid === 8" } planSummary: COLLSCAN cursorid:941703022322 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1042 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 176ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.305+0000 m31200| 2015-07-19T23:39:43.305+0000 I WRITE [conn116] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" } ndeleted:37 keyUpdates:0 writeConflicts:0 numYields:38 locks:{ Global: { acquireCount: { r: 76, w: 76 } }, Database: { acquireCount: { w: 76 } }, Collection: { acquireCount: { w: 39 } }, Metadata: { acquireCount: { w: 37 } }, oplog: { acquireCount: { w: 37 } } } 638ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.306+0000 m31200| 2015-07-19T23:39:43.305+0000 I COMMAND [conn116] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 76, w: 76 } }, Database: { acquireCount: { w: 76 } }, Collection: { acquireCount: { w: 39 } }, Metadata: { acquireCount: { w: 37 } }, oplog: { acquireCount: { w: 37 } } } protocol:op_command 638ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.386+0000 m31200| 2015-07-19T23:39:43.386+0000 I QUERY [conn143] getmore db11.coll11 query: { $where: "this.tid === 2" } cursorid:942405022691 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:164 reslen:7564 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 389ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.530+0000 m31200| 2015-07-19T23:39:43.529+0000 I QUERY [conn122] query db11.coll11 query: { $where: "this.tid === 7" } planSummary: COLLSCAN cursorid:940719143478 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1666 keyUpdates:0 writeConflicts:0 numYields:20 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 42 } }, Database: { acquireCount: { r: 21 } }, Collection: { acquireCount: { r: 21 } } } 409ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.565+0000 m31200| 2015-07-19T23:39:43.564+0000 I QUERY [conn22] query db11.coll11 query: { $where: "this.tid === 5" } planSummary: COLLSCAN cursorid:941046156550 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:2871 keyUpdates:0 writeConflicts:0 numYields:29 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 60 } }, Database: { acquireCount: { r: 30 } }, Collection: { acquireCount: { r: 30 } } } 549ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.567+0000 m31100| 2015-07-19T23:39:43.567+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.568+0000 m31100| 2015-07-19T23:39:43.568+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.571+0000 m31200| 2015-07-19T23:39:43.570+0000 I WRITE [conn25] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 3" } ndeleted:44 keyUpdates:0 writeConflicts:0 numYields:37 locks:{ Global: { acquireCount: { r: 82, w: 82 } }, Database: { acquireCount: { w: 82 } }, Collection: { acquireCount: { w: 38 } }, Metadata: { acquireCount: { w: 44 } }, oplog: { acquireCount: { w: 44 } } } 713ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.571+0000 m31200| 2015-07-19T23:39:43.571+0000 I COMMAND [conn25] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 3" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 82, w: 82 } }, Database: { acquireCount: { w: 82 } }, Collection: { acquireCount: { w: 38 } }, Metadata: { acquireCount: { w: 44 } }, oplog: { acquireCount: { w: 44 } } } protocol:op_command 717ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.685+0000 m30998| 2015-07-19T23:39:43.684+0000 I NETWORK [conn65] end connection 10.139.123.131:36028 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.706+0000 m31200| 2015-07-19T23:39:43.706+0000 I WRITE [conn112] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 4" } ndeleted:28 keyUpdates:0 writeConflicts:0 numYields:47 locks:{ Global: { acquireCount: { r: 76, w: 76 } }, Database: { acquireCount: { w: 76 } }, Collection: { acquireCount: { w: 48 } }, Metadata: { acquireCount: { w: 28 } }, oplog: { acquireCount: { w: 28 } } } 929ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.707+0000 m31200| 2015-07-19T23:39:43.706+0000 I COMMAND [conn112] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 4" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 76, w: 76 } }, Database: { acquireCount: { w: 76 } }, Collection: { acquireCount: { w: 48 } }, Metadata: { acquireCount: { w: 28 } }, oplog: { acquireCount: { w: 28 } } } protocol:op_command 930ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.715+0000 m31200| 2015-07-19T23:39:43.715+0000 I QUERY [conn103] query db11.coll11 query: { $where: "this.tid === 9" } planSummary: COLLSCAN cursorid:941491260889 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:3069 keyUpdates:0 writeConflicts:0 numYields:41 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 84 } }, Database: { acquireCount: { r: 42 } }, Collection: { acquireCount: { r: 42 } } } 846ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.739+0000 m31200| 2015-07-19T23:39:43.738+0000 I QUERY [conn33] getmore db11.coll11 query: { $where: "this.tid === 8" } cursorid:941703022322 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:156 reslen:7196 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 484ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.741+0000 m31200| 2015-07-19T23:39:43.741+0000 I QUERY [conn22] query db11.coll11 query: { $where: "this.tid === 3" } planSummary: COLLSCAN cursorid:942562293205 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1095 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 167ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.820+0000 m31200| 2015-07-19T23:39:43.819+0000 I QUERY [conn123] query db11.coll11 query: { $where: "this.tid === 6" } planSummary: COLLSCAN cursorid:942110710283 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1547 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 513ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.878+0000 m31200| 2015-07-19T23:39:43.878+0000 I QUERY [conn117] query db11.coll11 query: { $where: "this.tid === 8" } planSummary: COLLSCAN cursorid:942630026421 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1020 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.906+0000 m31200| 2015-07-19T23:39:43.905+0000 I QUERY [conn118] query db11.coll11 query: { $where: "this.tid === 4" } planSummary: COLLSCAN cursorid:941809981453 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1441 keyUpdates:0 writeConflicts:0 numYields:12 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 26 } }, Database: { acquireCount: { r: 13 } }, Collection: { acquireCount: { r: 13 } } } 197ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:43.928+0000 m31200| 2015-07-19T23:39:43.928+0000 I QUERY [conn31] getmore db11.coll11 query: { $where: "this.tid === 7" } cursorid:940719143478 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:21 nreturned:162 reslen:7472 locks:{ Global: { acquireCount: { r: 44 } }, Database: { acquireCount: { r: 22 } }, Collection: { acquireCount: { r: 22 } } } 396ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.058+0000 m31200| 2015-07-19T23:39:44.057+0000 I WRITE [conn116] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" } ndeleted:24 keyUpdates:0 writeConflicts:0 numYields:37 locks:{ Global: { acquireCount: { r: 62, w: 62 } }, Database: { acquireCount: { w: 62 } }, Collection: { acquireCount: { w: 38 } }, Metadata: { acquireCount: { w: 24 } }, oplog: { acquireCount: { w: 24 } } } 668ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.059+0000 m31200| 2015-07-19T23:39:44.058+0000 I COMMAND [conn116] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 62, w: 62 } }, Database: { acquireCount: { w: 62 } }, Collection: { acquireCount: { w: 38 } }, Metadata: { acquireCount: { w: 24 } }, oplog: { acquireCount: { w: 24 } } } protocol:op_command 669ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.098+0000 m31200| 2015-07-19T23:39:44.098+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 121ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.099+0000 m31200| 2015-07-19T23:39:44.098+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 121ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.102+0000 m31200| 2015-07-19T23:39:44.102+0000 I WRITE [conn115] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 1" } ndeleted:39 keyUpdates:0 writeConflicts:0 numYields:45 locks:{ Global: { acquireCount: { r: 85, w: 85 } }, Database: { acquireCount: { w: 85 } }, Collection: { acquireCount: { w: 46 } }, Metadata: { acquireCount: { w: 39 } }, oplog: { acquireCount: { w: 39 } } } 916ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.103+0000 m31200| 2015-07-19T23:39:44.102+0000 I COMMAND [conn115] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 1" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 85, w: 85 } }, Database: { acquireCount: { w: 85 } }, Collection: { acquireCount: { w: 46 } }, Metadata: { acquireCount: { w: 39 } }, oplog: { acquireCount: { w: 39 } } } protocol:op_command 916ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.107+0000 m30998| 2015-07-19T23:39:44.107+0000 I NETWORK [conn62] end connection 10.139.123.131:36022 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.145+0000 m31200| 2015-07-19T23:39:44.145+0000 I QUERY [conn32] getmore db11.coll11 query: { $where: "this.tid === 3" } cursorid:942562293205 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:255 reslen:11750 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 402ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.153+0000 m30998| 2015-07-19T23:39:44.153+0000 I NETWORK [conn64] end connection 10.139.123.131:36027 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.205+0000 m31200| 2015-07-19T23:39:44.205+0000 I QUERY [conn33] getmore db11.coll11 query: { $where: "this.tid === 6" } cursorid:942110710283 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:21 nreturned:255 reslen:11750 locks:{ Global: { acquireCount: { r: 44 } }, Database: { acquireCount: { r: 22 } }, Collection: { acquireCount: { r: 22 } } } 384ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.237+0000 m30999| 2015-07-19T23:39:44.237+0000 I NETWORK [conn66] end connection 10.139.123.131:57389 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.268+0000 m31200| 2015-07-19T23:39:44.267+0000 I QUERY [conn143] getmore db11.coll11 query: { $where: "this.tid === 8" } cursorid:942630026421 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:256 reslen:11796 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 387ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.275+0000 m30999| 2015-07-19T23:39:44.275+0000 I NETWORK [conn65] end connection 10.139.123.131:57388 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.348+0000 m31200| 2015-07-19T23:39:44.348+0000 I QUERY [conn22] query db11.coll11 query: { $where: "this.tid === 9" } planSummary: COLLSCAN cursorid:942666347842 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:3004 keyUpdates:0 writeConflicts:0 numYields:30 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 62 } }, Database: { acquireCount: { r: 31 } }, Collection: { acquireCount: { r: 31 } } } 537ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.366+0000 m31200| 2015-07-19T23:39:44.366+0000 I WRITE [conn25] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" } ndeleted:33 keyUpdates:0 writeConflicts:0 numYields:33 locks:{ Global: { acquireCount: { r: 67, w: 67 } }, Database: { acquireCount: { w: 67 } }, Collection: { acquireCount: { w: 34 } }, Metadata: { acquireCount: { w: 33 } }, oplog: { acquireCount: { w: 33 } } } 411ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.367+0000 m31200| 2015-07-19T23:39:44.366+0000 I COMMAND [conn25] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 67, w: 67 } }, Database: { acquireCount: { w: 67 } }, Collection: { acquireCount: { w: 34 } }, Metadata: { acquireCount: { w: 33 } }, oplog: { acquireCount: { w: 33 } } } protocol:op_command 411ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.371+0000 m30998| 2015-07-19T23:39:44.370+0000 I NETWORK [conn63] end connection 10.139.123.131:36025 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.379+0000 m31200| 2015-07-19T23:39:44.379+0000 I QUERY [conn30] getmore db11.coll11 query: { $where: "this.tid === 4" } cursorid:941809981453 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:24 nreturned:151 reslen:6966 locks:{ Global: { acquireCount: { r: 50 } }, Database: { acquireCount: { r: 25 } }, Collection: { acquireCount: { r: 25 } } } 471ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.464+0000 m31200| 2015-07-19T23:39:44.464+0000 I WRITE [conn116] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" } ndeleted:29 keyUpdates:0 writeConflicts:0 numYields:31 locks:{ Global: { acquireCount: { r: 61, w: 61 } }, Database: { acquireCount: { w: 61 } }, Collection: { acquireCount: { w: 32 } }, Metadata: { acquireCount: { w: 29 } }, oplog: { acquireCount: { w: 29 } } } 403ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.464+0000 m31200| 2015-07-19T23:39:44.464+0000 I COMMAND [conn116] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 61, w: 61 } }, Database: { acquireCount: { w: 61 } }, Collection: { acquireCount: { w: 32 } }, Metadata: { acquireCount: { w: 29 } }, oplog: { acquireCount: { w: 29 } } } protocol:op_command 403ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.469+0000 m30999| 2015-07-19T23:39:44.469+0000 I NETWORK [conn62] end connection 10.139.123.131:57382 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.540+0000 m31200| 2015-07-19T23:39:44.539+0000 I QUERY [conn22] query db11.coll11 query: { $where: "this.tid === 9" } planSummary: COLLSCAN cursorid:941764429026 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:2945 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 126ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.570+0000 m31100| 2015-07-19T23:39:44.570+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.570+0000 m31100| 2015-07-19T23:39:44.570+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.591+0000 m31200| 2015-07-19T23:39:44.591+0000 I WRITE [conn112] remove db11.coll11 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 4" } ndeleted:25 keyUpdates:0 writeConflicts:0 numYields:30 locks:{ Global: { acquireCount: { r: 56, w: 56 } }, Database: { acquireCount: { w: 56 } }, Collection: { acquireCount: { w: 31 } }, Metadata: { acquireCount: { w: 25 } }, oplog: { acquireCount: { w: 25 } } } 207ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.591+0000 m31200| 2015-07-19T23:39:44.591+0000 I COMMAND [conn112] command db11.$cmd command: delete { delete: "coll11", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 4" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|3, ObjectId('55ac353cd2c1f750d15483cd') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 56, w: 56 } }, Database: { acquireCount: { w: 56 } }, Collection: { acquireCount: { w: 31 } }, Metadata: { acquireCount: { w: 25 } }, oplog: { acquireCount: { w: 25 } } } protocol:op_command 207ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.608+0000 m30998| 2015-07-19T23:39:44.608+0000 I NETWORK [conn66] end connection 10.139.123.131:36031 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.690+0000 m30999| 2015-07-19T23:39:44.690+0000 I NETWORK [conn64] end connection 10.139.123.131:57385 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.707+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.707+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.707+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.708+0000 jstests/concurrency/fsm_workloads/remove_where.js: Workload completed in 3862 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.708+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.708+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.708+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.708+0000 m30999| 2015-07-19T23:39:44.707+0000 I COMMAND [conn1] DROP: db11.coll11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.708+0000 m30999| 2015-07-19T23:39:44.707+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:44.707+0000-55ac3540d2c1f750d15483cf", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349184707), what: "dropCollection.start", ns: "db11.coll11", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.760+0000 m30999| 2015-07-19T23:39:44.759+0000 I SHARDING [conn1] distributed lock 'db11.coll11/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3540d2c1f750d15483d0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.760+0000 m31100| 2015-07-19T23:39:44.760+0000 I COMMAND [conn127] CMD: drop db11.coll11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.760+0000 m31200| 2015-07-19T23:39:44.760+0000 I COMMAND [conn142] CMD: drop db11.coll11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.761+0000 m31200| 2015-07-19T23:39:44.760+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 152ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.761+0000 m31200| 2015-07-19T23:39:44.760+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 154ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.763+0000 m31201| 2015-07-19T23:39:44.763+0000 I COMMAND [repl writer worker 6] CMD: drop db11.coll11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.763+0000 m31202| 2015-07-19T23:39:44.763+0000 I COMMAND [repl writer worker 13] CMD: drop db11.coll11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.813+0000 m31200| 2015-07-19T23:39:44.813+0000 I SHARDING [conn142] remotely refreshing metadata for db11.coll11 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||55ac353cd2c1f750d15483cd, current metadata version is 1|3||55ac353cd2c1f750d15483cd [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.813+0000 m31200| 2015-07-19T23:39:44.813+0000 W SHARDING [conn142] no chunks found when reloading db11.coll11, previous version was 0|0||55ac353cd2c1f750d15483cd, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.813+0000 m31200| 2015-07-19T23:39:44.813+0000 I SHARDING [conn142] dropping metadata for db11.coll11 at shard version 1|3||55ac353cd2c1f750d15483cd, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.814+0000 m30999| 2015-07-19T23:39:44.813+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:44.813+0000-55ac3540d2c1f750d15483d1", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349184813), what: "dropCollection", ns: "db11.coll11", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.865+0000 m30999| 2015-07-19T23:39:44.865+0000 I SHARDING [conn1] distributed lock 'db11.coll11/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.917+0000 m30999| 2015-07-19T23:39:44.916+0000 I COMMAND [conn1] DROP DATABASE: db11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.917+0000 m30999| 2015-07-19T23:39:44.916+0000 I SHARDING [conn1] DBConfig::dropDatabase: db11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:44.917+0000 m30999| 2015-07-19T23:39:44.916+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:44.916+0000-55ac3540d2c1f750d15483d2", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349184916), what: "dropDatabase.start", ns: "db11", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.018+0000 m30999| 2015-07-19T23:39:45.018+0000 I SHARDING [conn1] DBConfig::dropDatabase: db11 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.019+0000 m31200| 2015-07-19T23:39:45.018+0000 I COMMAND [conn111] dropDatabase db11 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.019+0000 m31200| 2015-07-19T23:39:45.018+0000 I COMMAND [conn111] dropDatabase db11 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.019+0000 m30999| 2015-07-19T23:39:45.019+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:45.019+0000-55ac3541d2c1f750d15483d3", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349185019), what: "dropDatabase", ns: "db11", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.020+0000 m31200| 2015-07-19T23:39:45.019+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:109 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 253ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.020+0000 m31200| 2015-07-19T23:39:45.019+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:109 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 31 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 253ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.020+0000 m31202| 2015-07-19T23:39:45.019+0000 I COMMAND [repl writer worker 15] dropDatabase db11 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.020+0000 m31202| 2015-07-19T23:39:45.019+0000 I COMMAND [repl writer worker 15] dropDatabase db11 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.020+0000 m31201| 2015-07-19T23:39:45.019+0000 I COMMAND [repl writer worker 2] dropDatabase db11 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.021+0000 m31201| 2015-07-19T23:39:45.019+0000 I COMMAND [repl writer worker 2] dropDatabase db11 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.074+0000 m31100| 2015-07-19T23:39:45.074+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 501ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.075+0000 m31100| 2015-07-19T23:39:45.074+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 501ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.082+0000 m31100| 2015-07-19T23:39:45.082+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.085+0000 m31102| 2015-07-19T23:39:45.085+0000 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.085+0000 m31101| 2015-07-19T23:39:45.085+0000 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.092+0000 m31200| 2015-07-19T23:39:45.092+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.093+0000 m31202| 2015-07-19T23:39:45.093+0000 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.093+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.093+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.094+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.094+0000 jstests/concurrency/fsm_workloads/map_reduce_drop.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.094+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.094+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.094+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.096+0000 m30999| 2015-07-19T23:39:45.096+0000 I SHARDING [conn1] distributed lock 'db12/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3541d2c1f750d15483d4 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.097+0000 m30999| 2015-07-19T23:39:45.097+0000 I SHARDING [conn1] Placing [db12] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.097+0000 m30999| 2015-07-19T23:39:45.097+0000 I SHARDING [conn1] Enabling sharding for database [db12] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.149+0000 m30999| 2015-07-19T23:39:45.148+0000 I SHARDING [conn1] distributed lock 'db12/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.153+0000 m31201| 2015-07-19T23:39:45.153+0000 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.154+0000 m31200| 2015-07-19T23:39:45.154+0000 I INDEX [conn112] build index on: db12.coll12 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db12.coll12" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.155+0000 m31200| 2015-07-19T23:39:45.155+0000 I INDEX [conn112] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.156+0000 m31200| 2015-07-19T23:39:45.156+0000 I INDEX [conn112] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.157+0000 m30999| 2015-07-19T23:39:45.157+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db12.coll12", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.158+0000 m30999| 2015-07-19T23:39:45.158+0000 I SHARDING [conn1] distributed lock 'db12.coll12/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3541d2c1f750d15483d5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.158+0000 m30999| 2015-07-19T23:39:45.158+0000 I SHARDING [conn1] enable sharding on: db12.coll12 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.159+0000 m30999| 2015-07-19T23:39:45.158+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:45.158+0000-55ac3541d2c1f750d15483d6", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349185158), what: "shardCollection.start", ns: "db12.coll12", details: { shardKey: { _id: "hashed" }, collection: "db12.coll12", primary: "test-rs1:test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.159+0000 m31202| 2015-07-19T23:39:45.159+0000 I INDEX [repl writer worker 6] build index on: db12.coll12 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db12.coll12" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.160+0000 m31202| 2015-07-19T23:39:45.159+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.160+0000 m31201| 2015-07-19T23:39:45.160+0000 I INDEX [repl writer worker 13] build index on: db12.coll12 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db12.coll12" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.160+0000 m31201| 2015-07-19T23:39:45.160+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.164+0000 m31202| 2015-07-19T23:39:45.164+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.165+0000 m31201| 2015-07-19T23:39:45.165+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.209+0000 m30999| 2015-07-19T23:39:45.209+0000 I SHARDING [conn1] going to create 2 chunk(s) for: db12.coll12 using new epoch 55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.311+0000 m30999| 2015-07-19T23:39:45.311+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db12.coll12: 0ms sequenceNumber: 58 version: 1|1||55ac3541d2c1f750d15483d7 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.363+0000 m30999| 2015-07-19T23:39:45.363+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db12.coll12: 0ms sequenceNumber: 59 version: 1|1||55ac3541d2c1f750d15483d7 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.363+0000 m31200| 2015-07-19T23:39:45.363+0000 I SHARDING [conn118] remotely refreshing metadata for db12.coll12 with requested shard version 1|1||55ac3541d2c1f750d15483d7, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.364+0000 m31200| 2015-07-19T23:39:45.364+0000 I SHARDING [conn118] collection db12.coll12 was previously unsharded, new metadata loaded with shard version 1|1||55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.364+0000 m31200| 2015-07-19T23:39:45.364+0000 I SHARDING [conn118] collection version was loaded at version 1|1||55ac3541d2c1f750d15483d7, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.364+0000 m30999| 2015-07-19T23:39:45.364+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:45.364+0000-55ac3541d2c1f750d15483d8", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349185364), what: "shardCollection", ns: "db12.coll12", details: { version: "1|1||55ac3541d2c1f750d15483d7" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.415+0000 m30999| 2015-07-19T23:39:45.415+0000 I SHARDING [conn1] distributed lock 'db12.coll12/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.416+0000 m30999| 2015-07-19T23:39:45.415+0000 I SHARDING [conn1] moving chunk ns: db12.coll12 moving ( ns: db12.coll12, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.416+0000 m31200| 2015-07-19T23:39:45.416+0000 I SHARDING [conn97] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.416+0000 m31200| 2015-07-19T23:39:45.416+0000 I SHARDING [conn97] received moveChunk request: { moveChunk: "db12.coll12", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", to: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac3541d2c1f750d15483d7') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.417+0000 m31200| 2015-07-19T23:39:45.417+0000 I SHARDING [conn97] distributed lock 'db12.coll12/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3541d9a63f6196b17285 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.417+0000 m31200| 2015-07-19T23:39:45.417+0000 I SHARDING [conn97] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:45.417+0000-55ac3541d9a63f6196b17286", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39668", time: new Date(1437349185417), what: "moveChunk.start", ns: "db12.coll12", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.468+0000 m31200| 2015-07-19T23:39:45.468+0000 I SHARDING [conn97] remotely refreshing metadata for db12.coll12 based on current shard version 1|1||55ac3541d2c1f750d15483d7, current metadata version is 1|1||55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.469+0000 m31200| 2015-07-19T23:39:45.468+0000 I SHARDING [conn97] metadata of collection db12.coll12 already up to date (shard version : 1|1||55ac3541d2c1f750d15483d7, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.469+0000 m31200| 2015-07-19T23:39:45.469+0000 I SHARDING [conn97] moveChunk request accepted at version 1|1||55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.469+0000 m31200| 2015-07-19T23:39:45.469+0000 I SHARDING [conn97] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.469+0000 m31100| 2015-07-19T23:39:45.469+0000 I SHARDING [conn19] remotely refreshing metadata for db12.coll12, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.470+0000 m31100| 2015-07-19T23:39:45.470+0000 I SHARDING [conn19] collection db12.coll12 was previously unsharded, new metadata loaded with shard version 0|0||55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.470+0000 m31100| 2015-07-19T23:39:45.470+0000 I SHARDING [conn19] collection version was loaded at version 1|1||55ac3541d2c1f750d15483d7, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.470+0000 m31100| 2015-07-19T23:39:45.470+0000 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db12.coll12 from test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 at epoch 55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.471+0000 m31200| 2015-07-19T23:39:45.471+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db12.coll12", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.473+0000 m31200| 2015-07-19T23:39:45.473+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db12.coll12", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.478+0000 m31200| 2015-07-19T23:39:45.478+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db12.coll12", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.486+0000 m31200| 2015-07-19T23:39:45.486+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db12.coll12", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.502+0000 m31200| 2015-07-19T23:39:45.502+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db12.coll12", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.525+0000 m31100| 2015-07-19T23:39:45.524+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:110 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 437ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.525+0000 m31100| 2015-07-19T23:39:45.524+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:110 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 437ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.527+0000 m31100| 2015-07-19T23:39:45.527+0000 I INDEX [migrateThread] build index on: db12.coll12 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db12.coll12" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.527+0000 m31100| 2015-07-19T23:39:45.527+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.530+0000 m31100| 2015-07-19T23:39:45.529+0000 I INDEX [migrateThread] build index on: db12.coll12 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db12.coll12" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.530+0000 m31100| 2015-07-19T23:39:45.529+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.533+0000 m31100| 2015-07-19T23:39:45.532+0000 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.533+0000 m31100| 2015-07-19T23:39:45.533+0000 I SHARDING [migrateThread] Deleter starting delete for: db12.coll12 from { _id: MinKey } -> { _id: 0 }, with opId: 37414 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.533+0000 m31100| 2015-07-19T23:39:45.533+0000 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db12.coll12 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.535+0000 m31200| 2015-07-19T23:39:45.534+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db12.coll12", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.538+0000 m31101| 2015-07-19T23:39:45.537+0000 I INDEX [repl writer worker 11] build index on: db12.coll12 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db12.coll12" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.538+0000 m31101| 2015-07-19T23:39:45.537+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.538+0000 m31102| 2015-07-19T23:39:45.537+0000 I INDEX [repl writer worker 11] build index on: db12.coll12 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db12.coll12" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.538+0000 m31102| 2015-07-19T23:39:45.537+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.539+0000 m31101| 2015-07-19T23:39:45.539+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.540+0000 m31102| 2015-07-19T23:39:45.540+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.540+0000 m31100| 2015-07-19T23:39:45.540+0000 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.540+0000 m31100| 2015-07-19T23:39:45.540+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db12.coll12' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.599+0000 m31200| 2015-07-19T23:39:45.599+0000 I SHARDING [conn97] moveChunk data transfer progress: { active: true, ns: "db12.coll12", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.599+0000 m31200| 2015-07-19T23:39:45.599+0000 I SHARDING [conn97] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.599+0000 m31200| 2015-07-19T23:39:45.599+0000 I SHARDING [conn97] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.599+0000 m31200| 2015-07-19T23:39:45.599+0000 I SHARDING [conn97] moveChunk setting version to: 2|0||55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.602+0000 m31100| 2015-07-19T23:39:45.602+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db12.coll12' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.602+0000 m31100| 2015-07-19T23:39:45.602+0000 I SHARDING [migrateThread] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:45.602+0000-55ac354168c42881b59cba46", server: "ip-10-139-123-131", clientAddr: "", time: new Date(1437349185602), what: "moveChunk.to", ns: "db12.coll12", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 62, step 2 of 5: 7, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 61, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.654+0000 m31200| 2015-07-19T23:39:45.653+0000 I SHARDING [conn97] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db12.coll12", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.654+0000 m31200| 2015-07-19T23:39:45.653+0000 I SHARDING [conn97] moveChunk updating self version to: 2|1||55ac3541d2c1f750d15483d7 through { _id: 0 } -> { _id: MaxKey } for collection 'db12.coll12' [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.654+0000 m31200| 2015-07-19T23:39:45.653+0000 I SHARDING [conn97] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:45.653+0000-55ac3541d9a63f6196b17287", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39668", time: new Date(1437349185653), what: "moveChunk.commit", ns: "db12.coll12", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.704+0000 m31200| 2015-07-19T23:39:45.704+0000 I SHARDING [conn97] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.705+0000 m31200| 2015-07-19T23:39:45.704+0000 I SHARDING [conn97] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.705+0000 m31200| 2015-07-19T23:39:45.704+0000 I SHARDING [conn97] Deleter starting delete for: db12.coll12 from { _id: MinKey } -> { _id: 0 }, with opId: 39015 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.705+0000 m31200| 2015-07-19T23:39:45.704+0000 I SHARDING [conn97] rangeDeleter deleted 0 documents for db12.coll12 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.705+0000 m31200| 2015-07-19T23:39:45.704+0000 I SHARDING [conn97] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.705+0000 m31200| 2015-07-19T23:39:45.705+0000 I SHARDING [conn97] distributed lock 'db12.coll12/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.705+0000 m31200| 2015-07-19T23:39:45.705+0000 I SHARDING [conn97] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:45.705+0000-55ac3541d9a63f6196b17288", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39668", time: new Date(1437349185705), what: "moveChunk.from", ns: "db12.coll12", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 52, step 3 of 6: 1, step 4 of 6: 129, step 5 of 6: 105, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.756+0000 m31200| 2015-07-19T23:39:45.756+0000 I COMMAND [conn97] command db12.coll12 command: moveChunk { moveChunk: "db12.coll12", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", to: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac3541d2c1f750d15483d7') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 340ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.757+0000 m30999| 2015-07-19T23:39:45.757+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db12.coll12: 0ms sequenceNumber: 60 version: 2|1||55ac3541d2c1f750d15483d7 based on: 1|1||55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.757+0000 m31100| 2015-07-19T23:39:45.757+0000 I SHARDING [conn43] received splitChunk request: { splitChunk: "db12.coll12", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3541d2c1f750d15483d7') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.758+0000 m31100| 2015-07-19T23:39:45.758+0000 I SHARDING [conn43] distributed lock 'db12.coll12/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac354168c42881b59cba47 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.758+0000 m31100| 2015-07-19T23:39:45.758+0000 I SHARDING [conn43] remotely refreshing metadata for db12.coll12 based on current shard version 0|0||55ac3541d2c1f750d15483d7, current metadata version is 1|1||55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.759+0000 m31100| 2015-07-19T23:39:45.759+0000 I SHARDING [conn43] updating metadata for db12.coll12 from shard version 0|0||55ac3541d2c1f750d15483d7 to shard version 2|0||55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.759+0000 m31100| 2015-07-19T23:39:45.759+0000 I SHARDING [conn43] collection version was loaded at version 2|1||55ac3541d2c1f750d15483d7, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.759+0000 m31100| 2015-07-19T23:39:45.759+0000 I SHARDING [conn43] splitChunk accepted at version 2|0||55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.760+0000 m31100| 2015-07-19T23:39:45.759+0000 I SHARDING [conn43] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:45.759+0000-55ac354168c42881b59cba48", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47680", time: new Date(1437349185759), what: "split", ns: "db12.coll12", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('55ac3541d2c1f750d15483d7') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('55ac3541d2c1f750d15483d7') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.811+0000 m31100| 2015-07-19T23:39:45.810+0000 I SHARDING [conn43] distributed lock 'db12.coll12/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.811+0000 m30999| 2015-07-19T23:39:45.811+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db12.coll12: 0ms sequenceNumber: 61 version: 2|3||55ac3541d2c1f750d15483d7 based on: 2|1||55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.812+0000 m31200| 2015-07-19T23:39:45.811+0000 I SHARDING [conn97] received splitChunk request: { splitChunk: "db12.coll12", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3541d2c1f750d15483d7') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.813+0000 m31200| 2015-07-19T23:39:45.812+0000 I SHARDING [conn97] distributed lock 'db12.coll12/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3541d9a63f6196b17289 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.813+0000 m31200| 2015-07-19T23:39:45.812+0000 I SHARDING [conn97] remotely refreshing metadata for db12.coll12 based on current shard version 2|0||55ac3541d2c1f750d15483d7, current metadata version is 2|0||55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.813+0000 m31200| 2015-07-19T23:39:45.813+0000 I SHARDING [conn97] updating metadata for db12.coll12 from shard version 2|0||55ac3541d2c1f750d15483d7 to shard version 2|1||55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.813+0000 m31200| 2015-07-19T23:39:45.813+0000 I SHARDING [conn97] collection version was loaded at version 2|3||55ac3541d2c1f750d15483d7, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.814+0000 m31200| 2015-07-19T23:39:45.813+0000 I SHARDING [conn97] splitChunk accepted at version 2|1||55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.814+0000 m31200| 2015-07-19T23:39:45.814+0000 I SHARDING [conn97] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:45.814+0000-55ac3541d9a63f6196b1728a", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39668", time: new Date(1437349185814), what: "split", ns: "db12.coll12", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('55ac3541d2c1f750d15483d7') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('55ac3541d2c1f750d15483d7') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.865+0000 m31200| 2015-07-19T23:39:45.865+0000 I SHARDING [conn97] distributed lock 'db12.coll12/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.866+0000 m30999| 2015-07-19T23:39:45.865+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db12.coll12: 0ms sequenceNumber: 62 version: 2|5||55ac3541d2c1f750d15483d7 based on: 2|3||55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.866+0000 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.923+0000 m30998| 2015-07-19T23:39:45.923+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36037 #67 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.924+0000 m30999| 2015-07-19T23:39:45.924+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57397 #67 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.933+0000 m30999| 2015-07-19T23:39:45.933+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57398 #68 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.941+0000 m30998| 2015-07-19T23:39:45.940+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36040 #68 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.948+0000 m30999| 2015-07-19T23:39:45.948+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57400 #69 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.952+0000 setting random seed: 8994022482074 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.952+0000 setting random seed: 5959718963131 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.952+0000 setting random seed: 1257293266244 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.954+0000 setting random seed: 595693527720 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.958+0000 setting random seed: 2858003927394 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.969+0000 m30999| 2015-07-19T23:39:45.969+0000 I SHARDING [conn68] distributed lock 'map_reduce_drop/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3541d2c1f750d15483d9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.969+0000 m30998| 2015-07-19T23:39:45.969+0000 I SHARDING [conn68] could not acquire lock 'map_reduce_drop/ip-10-139-123-131:30998:1437349129:1804289383' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.969+0000 m30998| 2015-07-19T23:39:45.969+0000 I SHARDING [conn68] distributed lock 'map_reduce_drop/ip-10-139-123-131:30998:1437349129:1804289383' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.970+0000 m30999| 2015-07-19T23:39:45.970+0000 I SHARDING [conn68] Placing [map_reduce_drop] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:45.974+0000 m29000| 2015-07-19T23:39:45.974+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55701 #40 (40 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.022+0000 m30999| 2015-07-19T23:39:46.021+0000 I SHARDING [conn68] distributed lock 'map_reduce_drop/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.026+0000 m31200| 2015-07-19T23:39:46.026+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:121 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 866ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.026+0000 m31200| 2015-07-19T23:39:46.026+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:121 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 866ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.081+0000 m31200| 2015-07-19T23:39:46.081+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_90 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.133+0000 m31200| 2015-07-19T23:39:46.133+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.135+0000 m31200| 2015-07-19T23:39:46.135+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_90 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.135+0000 m31200| 2015-07-19T23:39:46.135+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_90 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.136+0000 m31200| 2015-07-19T23:39:46.135+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_90 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.178+0000 m31200| 2015-07-19T23:39:46.178+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_91 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.302+0000 m31200| 2015-07-19T23:39:46.302+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.305+0000 m31200| 2015-07-19T23:39:46.305+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_91 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.306+0000 m31200| 2015-07-19T23:39:46.305+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_91 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.306+0000 m31202| 2015-07-19T23:39:46.306+0000 I COMMAND [repl writer worker 14] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.306+0000 m31201| 2015-07-19T23:39:46.306+0000 I COMMAND [repl writer worker 14] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.308+0000 m31200| 2015-07-19T23:39:46.308+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_91 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.308+0000 m31200| 2015-07-19T23:39:46.308+0000 I COMMAND [conn118] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.309+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.309+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.309+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.309+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 2469, w: 1470, W: 3 } }, Database: { acquireCount: { r: 491, w: 1462, R: 7, W: 10 } }, Collection: { acquireCount: { r: 491, w: 976 } }, Metadata: { acquireCount: { w: 488 } }, oplog: { acquireCount: { w: 488 } } } protocol:op_command 131ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.310+0000 m30999| 2015-07-19T23:39:46.310+0000 I COMMAND [conn68] DROP: map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.310+0000 m30999| 2015-07-19T23:39:46.310+0000 I COMMAND [conn68] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.311+0000 m31200| 2015-07-19T23:39:46.310+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.313+0000 m31202| 2015-07-19T23:39:46.313+0000 I COMMAND [repl writer worker 7] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.313+0000 m31201| 2015-07-19T23:39:46.313+0000 I COMMAND [repl writer worker 7] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.349+0000 m31200| 2015-07-19T23:39:46.349+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_92 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.414+0000 m31200| 2015-07-19T23:39:46.414+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.420+0000 m31200| 2015-07-19T23:39:46.420+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_92 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.420+0000 m31200| 2015-07-19T23:39:46.420+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_92 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.420+0000 m31200| 2015-07-19T23:39:46.420+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_92 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.421+0000 m31202| 2015-07-19T23:39:46.420+0000 I COMMAND [repl writer worker 1] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.421+0000 m30999| 2015-07-19T23:39:46.421+0000 I COMMAND [conn68] DROP: map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.421+0000 m30999| 2015-07-19T23:39:46.421+0000 I COMMAND [conn68] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.422+0000 m31200| 2015-07-19T23:39:46.421+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.423+0000 m31201| 2015-07-19T23:39:46.423+0000 I COMMAND [repl writer worker 13] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.423+0000 m31202| 2015-07-19T23:39:46.423+0000 I COMMAND [repl writer worker 13] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.430+0000 m31201| 2015-07-19T23:39:46.430+0000 I COMMAND [repl writer worker 9] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.445+0000 m31200| 2015-07-19T23:39:46.445+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_93 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.470+0000 m30998| 2015-07-19T23:39:46.470+0000 I SHARDING [conn68] distributed lock 'map_reduce_drop/ip-10-139-123-131:30998:1437349129:1804289383' acquired, ts : 55ac3542230355f00547ef25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.471+0000 m30998| 2015-07-19T23:39:46.471+0000 I SHARDING [conn68] distributed lock 'map_reduce_drop/ip-10-139-123-131:30998:1437349129:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.475+0000 m30999| 2015-07-19T23:39:46.475+0000 I SHARDING [conn67] could not acquire lock 'map_reduce_drop/ip-10-139-123-131:30999:1437349128:1804289383' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.475+0000 m30999| 2015-07-19T23:39:46.475+0000 I SHARDING [conn67] distributed lock 'map_reduce_drop/ip-10-139-123-131:30999:1437349128:1804289383' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.475+0000 m30998| 2015-07-19T23:39:46.475+0000 I SHARDING [conn67] distributed lock 'map_reduce_drop/ip-10-139-123-131:30998:1437349129:1804289383' acquired, ts : 55ac3542230355f00547ef26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.476+0000 m30998| 2015-07-19T23:39:46.476+0000 I SHARDING [conn67] distributed lock 'map_reduce_drop/ip-10-139-123-131:30998:1437349129:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.538+0000 m31100| 2015-07-19T23:39:46.538+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.538+0000 m31100| 2015-07-19T23:39:46.538+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.562+0000 m31200| 2015-07-19T23:39:46.561+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_94 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.579+0000 m31200| 2015-07-19T23:39:46.579+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_95 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.624+0000 m31200| 2015-07-19T23:39:46.624+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.627+0000 m31200| 2015-07-19T23:39:46.627+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_93 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.628+0000 m31200| 2015-07-19T23:39:46.628+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_93 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.629+0000 m31202| 2015-07-19T23:39:46.629+0000 I COMMAND [repl writer worker 3] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.629+0000 m31201| 2015-07-19T23:39:46.629+0000 I COMMAND [repl writer worker 8] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.629+0000 m31200| 2015-07-19T23:39:46.629+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_93 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.630+0000 m31200| 2015-07-19T23:39:46.630+0000 I COMMAND [conn118] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.630+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.630+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.631+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.631+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 1278, w: 759, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 554 } }, Database: { acquireCount: { r: 254, w: 751, R: 4, W: 10 }, acquireWaitCount: { r: 4, w: 16, W: 4 }, timeAcquiringMicros: { r: 10275, w: 40234, W: 646 } }, Collection: { acquireCount: { r: 254, w: 502 } }, Metadata: { acquireCount: { w: 251 } }, oplog: { acquireCount: { w: 251 } } } protocol:op_command 185ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.631+0000 m30999| 2015-07-19T23:39:46.630+0000 I COMMAND [conn68] DROP: map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.631+0000 m30999| 2015-07-19T23:39:46.630+0000 I COMMAND [conn68] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.631+0000 m31200| 2015-07-19T23:39:46.631+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.637+0000 m31202| 2015-07-19T23:39:46.637+0000 I COMMAND [repl writer worker 6] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.638+0000 m31201| 2015-07-19T23:39:46.638+0000 I COMMAND [repl writer worker 6] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.681+0000 m31200| 2015-07-19T23:39:46.681+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_96 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.800+0000 m31200| 2015-07-19T23:39:46.800+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.804+0000 m31200| 2015-07-19T23:39:46.804+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_96 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.804+0000 m31200| 2015-07-19T23:39:46.804+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_96 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.806+0000 m31202| 2015-07-19T23:39:46.805+0000 I COMMAND [repl writer worker 7] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.806+0000 m31201| 2015-07-19T23:39:46.806+0000 I COMMAND [repl writer worker 2] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.806+0000 m31200| 2015-07-19T23:39:46.806+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_96 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.807+0000 m31200| 2015-07-19T23:39:46.807+0000 I COMMAND [conn118] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.807+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.807+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.807+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.808+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 1268, w: 753, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 202 } }, Database: { acquireCount: { r: 252, w: 745, R: 4, W: 10 }, acquireWaitCount: { R: 4, W: 8 }, timeAcquiringMicros: { R: 577, W: 2131 } }, Collection: { acquireCount: { r: 252, w: 498 } }, Metadata: { acquireCount: { w: 249 } }, oplog: { acquireCount: { w: 249 } } } protocol:op_command 126ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.808+0000 m30999| 2015-07-19T23:39:46.807+0000 I COMMAND [conn68] DROP: map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.808+0000 m30999| 2015-07-19T23:39:46.807+0000 I COMMAND [conn68] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.808+0000 m31200| 2015-07-19T23:39:46.808+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.815+0000 m31201| 2015-07-19T23:39:46.815+0000 I COMMAND [repl writer worker 13] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.819+0000 m31202| 2015-07-19T23:39:46.819+0000 I COMMAND [repl writer worker 6] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.888+0000 m31200| 2015-07-19T23:39:46.888+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_97 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.977+0000 m30999| 2015-07-19T23:39:46.976+0000 I SHARDING [conn67] distributed lock 'map_reduce_drop/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3542d2c1f750d15483db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:46.977+0000 m30999| 2015-07-19T23:39:46.977+0000 I SHARDING [conn67] distributed lock 'map_reduce_drop/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.075+0000 m31200| 2015-07-19T23:39:47.075+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.080+0000 m31200| 2015-07-19T23:39:47.080+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_97 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.080+0000 m31200| 2015-07-19T23:39:47.080+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_97 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.081+0000 m31200| 2015-07-19T23:39:47.081+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_97 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.082+0000 m31200| 2015-07-19T23:39:47.082+0000 I COMMAND [conn118] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.082+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.082+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.082+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.083+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 1263, w: 750, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 7926 } }, Database: { acquireCount: { r: 251, w: 742, R: 4, W: 10 }, acquireWaitCount: { R: 2, W: 5 }, timeAcquiringMicros: { R: 327, W: 688 } }, Collection: { acquireCount: { r: 251, w: 496 } }, Metadata: { acquireCount: { w: 248 } }, oplog: { acquireCount: { w: 248 } } } protocol:op_command 194ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.085+0000 m31201| 2015-07-19T23:39:47.085+0000 I COMMAND [repl writer worker 7] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.086+0000 m31202| 2015-07-19T23:39:47.086+0000 I COMMAND [repl writer worker 6] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.087+0000 m30999| 2015-07-19T23:39:47.087+0000 I NETWORK [conn68] end connection 10.139.123.131:57398 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.107+0000 m31200| 2015-07-19T23:39:47.107+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_98 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.116+0000 m31200| 2015-07-19T23:39:47.116+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.118+0000 m31200| 2015-07-19T23:39:47.118+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_94 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.118+0000 m31200| 2015-07-19T23:39:47.118+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_94 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.119+0000 m31200| 2015-07-19T23:39:47.119+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_94 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.123+0000 m31202| 2015-07-19T23:39:47.123+0000 I COMMAND [repl writer worker 15] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.124+0000 m31201| 2015-07-19T23:39:47.124+0000 I COMMAND [repl writer worker 7] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.126+0000 m31200| 2015-07-19T23:39:47.126+0000 I COMMAND [conn22] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.126+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.126+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.126+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.127+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 3680, w: 2193, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 16926, w: 3725, W: 4158 } }, Database: { acquireCount: { r: 733, w: 2185, R: 9, W: 10 }, acquireWaitCount: { r: 8, w: 24, R: 9, W: 8 }, timeAcquiringMicros: { r: 18135, w: 42028, R: 11818, W: 7503 } }, Collection: { acquireCount: { r: 733, w: 1458 } }, Metadata: { acquireCount: { w: 729 } }, oplog: { acquireCount: { w: 729 } } } protocol:op_command 599ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.177+0000 m31200| 2015-07-19T23:39:47.176+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.181+0000 m31200| 2015-07-19T23:39:47.181+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_95 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.182+0000 m31200| 2015-07-19T23:39:47.182+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_95 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.186+0000 m31200| 2015-07-19T23:39:47.186+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_95 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.187+0000 m31200| 2015-07-19T23:39:47.186+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_99 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.187+0000 m31200| 2015-07-19T23:39:47.187+0000 I COMMAND [conn103] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.188+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.188+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.188+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.188+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 3680, w: 2193, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 16772, w: 10251, W: 2059 } }, Database: { acquireCount: { r: 733, w: 2185, R: 9, W: 10 }, acquireWaitCount: { r: 11, w: 31, R: 8, W: 8 }, timeAcquiringMicros: { r: 25151, w: 82560, R: 5201, W: 6548 } }, Collection: { acquireCount: { r: 733, w: 1458 } }, Metadata: { acquireCount: { w: 729 } }, oplog: { acquireCount: { w: 729 } } } protocol:op_command 642ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.189+0000 m31202| 2015-07-19T23:39:47.187+0000 I COMMAND [repl writer worker 3] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.196+0000 m31201| 2015-07-19T23:39:47.195+0000 I COMMAND [repl writer worker 1] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.248+0000 m31200| 2015-07-19T23:39:47.248+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.479+0000 m30999| 2015-07-19T23:39:47.479+0000 I SHARDING [conn69] distributed lock 'map_reduce_drop/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3543d2c1f750d15483dc [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.480+0000 m30999| 2015-07-19T23:39:47.480+0000 I SHARDING [conn69] distributed lock 'map_reduce_drop/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.541+0000 m31100| 2015-07-19T23:39:47.540+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.541+0000 m31100| 2015-07-19T23:39:47.541+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.559+0000 m31200| 2015-07-19T23:39:47.559+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_101 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.579+0000 m31200| 2015-07-19T23:39:47.579+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.587+0000 m31200| 2015-07-19T23:39:47.587+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_98 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.588+0000 m31200| 2015-07-19T23:39:47.587+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_98 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.588+0000 m31201| 2015-07-19T23:39:47.588+0000 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.589+0000 m31202| 2015-07-19T23:39:47.588+0000 I COMMAND [repl writer worker 8] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.589+0000 m31200| 2015-07-19T23:39:47.589+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_98 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.593+0000 m31200| 2015-07-19T23:39:47.593+0000 I COMMAND [conn117] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.593+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.593+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.594+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.594+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 2983, w: 1776, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 5974, w: 7100, W: 4171 } }, Database: { acquireCount: { r: 594, w: 1768, R: 8, W: 10 }, acquireWaitCount: { r: 2, w: 41, R: 7, W: 8 }, timeAcquiringMicros: { r: 3481, w: 95538, R: 19717, W: 4656 } }, Collection: { acquireCount: { r: 594, w: 1180 } }, Metadata: { acquireCount: { w: 590 } }, oplog: { acquireCount: { w: 590 } } } protocol:op_command 515ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.596+0000 m30999| 2015-07-19T23:39:47.596+0000 I COMMAND [conn67] DROP: map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.596+0000 m30999| 2015-07-19T23:39:47.596+0000 I COMMAND [conn67] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.596+0000 m31200| 2015-07-19T23:39:47.596+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.602+0000 m31202| 2015-07-19T23:39:47.602+0000 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.607+0000 m31201| 2015-07-19T23:39:47.607+0000 I COMMAND [repl writer worker 0] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.672+0000 m31200| 2015-07-19T23:39:47.672+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_102 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.837+0000 m31200| 2015-07-19T23:39:47.836+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.841+0000 m31200| 2015-07-19T23:39:47.841+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_102 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.841+0000 m31200| 2015-07-19T23:39:47.841+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_102 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.842+0000 m31200| 2015-07-19T23:39:47.842+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_102 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.843+0000 m31200| 2015-07-19T23:39:47.843+0000 I COMMAND [conn117] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.843+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.843+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.843+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.844+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 1273, w: 756, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 171 } }, Database: { acquireCount: { r: 253, w: 748, R: 4, W: 10 }, acquireWaitCount: { R: 4, W: 8 }, timeAcquiringMicros: { R: 1752, W: 1320 } }, Collection: { acquireCount: { r: 253, w: 500 } }, Metadata: { acquireCount: { w: 250 } }, oplog: { acquireCount: { w: 250 } } } protocol:op_command 171ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.844+0000 m31201| 2015-07-19T23:39:47.843+0000 I COMMAND [repl writer worker 13] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.845+0000 m30999| 2015-07-19T23:39:47.845+0000 I COMMAND [conn67] DROP: map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.845+0000 m30999| 2015-07-19T23:39:47.845+0000 I COMMAND [conn67] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.845+0000 m31200| 2015-07-19T23:39:47.845+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.847+0000 m31202| 2015-07-19T23:39:47.847+0000 I COMMAND [repl writer worker 12] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.851+0000 m31201| 2015-07-19T23:39:47.850+0000 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.862+0000 m31202| 2015-07-19T23:39:47.861+0000 I COMMAND [repl writer worker 13] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.898+0000 m31200| 2015-07-19T23:39:47.898+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_103 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.914+0000 m31200| 2015-07-19T23:39:47.914+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.921+0000 m31200| 2015-07-19T23:39:47.920+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.927+0000 m31200| 2015-07-19T23:39:47.927+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.928+0000 m31200| 2015-07-19T23:39:47.928+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.930+0000 m31201| 2015-07-19T23:39:47.930+0000 I COMMAND [repl writer worker 10] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.932+0000 m31200| 2015-07-19T23:39:47.932+0000 I COMMAND [conn103] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.932+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.933+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.933+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.933+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 4804, w: 2865, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 5516, w: 10473, W: 239 } }, Database: { acquireCount: { r: 956, w: 2857, R: 12, W: 10 }, acquireWaitCount: { r: 13, w: 39, R: 12, W: 8 }, timeAcquiringMicros: { r: 52712, w: 34626, R: 1521, W: 3643 } }, Collection: { acquireCount: { r: 956, w: 1906 } }, Metadata: { acquireCount: { w: 953 } }, oplog: { acquireCount: { w: 953 } } } protocol:op_command 685ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.942+0000 m31202| 2015-07-19T23:39:47.941+0000 I COMMAND [repl writer worker 6] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.956+0000 m31200| 2015-07-19T23:39:47.956+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.958+0000 m31200| 2015-07-19T23:39:47.958+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_99 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.963+0000 m31200| 2015-07-19T23:39:47.962+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_99 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.963+0000 m31201| 2015-07-19T23:39:47.963+0000 I COMMAND [repl writer worker 4] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.971+0000 m31202| 2015-07-19T23:39:47.971+0000 I COMMAND [repl writer worker 0] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.982+0000 m31200| 2015-07-19T23:39:47.982+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_99 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.983+0000 m31200| 2015-07-19T23:39:47.983+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_104 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.984+0000 m31200| 2015-07-19T23:39:47.984+0000 I COMMAND [conn22] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.984+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.985+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.985+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.985+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 4804, w: 2865, W: 3 }, acquireWaitCount: { r: 3, w: 1, W: 1 }, timeAcquiringMicros: { r: 23590, w: 2303, W: 5115 } }, Database: { acquireCount: { r: 956, w: 2857, R: 12, W: 10 }, acquireWaitCount: { r: 15, w: 56, R: 12, W: 6 }, timeAcquiringMicros: { r: 64531, w: 61104, R: 35937, W: 13903 } }, Collection: { acquireCount: { r: 956, w: 1906 } }, Metadata: { acquireCount: { w: 953 } }, oplog: { acquireCount: { w: 953 } } } protocol:op_command 798ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.988+0000 m30998| 2015-07-19T23:39:47.987+0000 I COMMAND [conn67] DROP DATABASE: map_reduce_drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.988+0000 m30998| 2015-07-19T23:39:47.987+0000 I SHARDING [conn67] DBConfig::dropDatabase: map_reduce_drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:47.988+0000 m30998| 2015-07-19T23:39:47.988+0000 I SHARDING [conn67] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:47.988+0000-55ac3543230355f00547ef27", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:36037", time: new Date(1437349187988), what: "dropDatabase.start", ns: "map_reduce_drop", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.070+0000 m31200| 2015-07-19T23:39:48.070+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.078+0000 m31200| 2015-07-19T23:39:48.078+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_101 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.087+0000 m31200| 2015-07-19T23:39:48.086+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_101 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.089+0000 m30998| 2015-07-19T23:39:48.089+0000 I SHARDING [conn67] DBConfig::dropDatabase: map_reduce_drop dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.090+0000 m31201| 2015-07-19T23:39:48.090+0000 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.090+0000 m31200| 2015-07-19T23:39:48.087+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_101 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.090+0000 m31200| 2015-07-19T23:39:48.090+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39804 #145 (73 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.090+0000 m31200| 2015-07-19T23:39:48.090+0000 I COMMAND [conn118] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.091+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.091+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.091+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.091+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 3418, w: 2037, W: 3 }, acquireWaitCount: { r: 5, W: 1 }, timeAcquiringMicros: { r: 31585, W: 120 } }, Database: { acquireCount: { r: 680, w: 2029, R: 9, W: 10 }, acquireWaitCount: { r: 12, w: 32, R: 8, W: 5 }, timeAcquiringMicros: { r: 25415, w: 60306, R: 6878, W: 917 } }, Collection: { acquireCount: { r: 680, w: 1354 } }, Metadata: { acquireCount: { w: 677 } }, oplog: { acquireCount: { w: 677 } } } protocol:op_command 532ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.092+0000 m30999| 2015-07-19T23:39:48.092+0000 I COMMAND [conn69] DROP: map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.092+0000 m30999| 2015-07-19T23:39:48.092+0000 I COMMAND [conn69] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.093+0000 m31200| 2015-07-19T23:39:48.092+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.095+0000 m31202| 2015-07-19T23:39:48.095+0000 I COMMAND [repl writer worker 9] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.097+0000 m31200| 2015-07-19T23:39:48.097+0000 I COMMAND [conn145] dropDatabase map_reduce_drop starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.099+0000 m31200| 2015-07-19T23:39:48.099+0000 I COMMAND [conn145] dropDatabase map_reduce_drop finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.100+0000 m30998| 2015-07-19T23:39:48.100+0000 I SHARDING [conn67] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:48.100+0000-55ac3544230355f00547ef28", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:36037", time: new Date(1437349188100), what: "dropDatabase", ns: "map_reduce_drop", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.100+0000 m31200| 2015-07-19T23:39:48.100+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.101+0000 m31200| 2015-07-19T23:39:48.101+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_104 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.102+0000 m31200| 2015-07-19T23:39:48.101+0000 I COMMAND [conn103] mr failed, removing collection :: caused by :: 18697 Collection unexpectedly disappeared: map_reduce_drop.tmp.mr.coll12_104 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.102+0000 m31200| 2015-07-19T23:39:48.102+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_104 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.105+0000 m31202| 2015-07-19T23:39:48.104+0000 I COMMAND [repl writer worker 10] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.106+0000 m31202| 2015-07-19T23:39:48.106+0000 I COMMAND [repl writer worker 4] dropDatabase map_reduce_drop starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.107+0000 m31200| 2015-07-19T23:39:48.107+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_103 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.107+0000 m31200| 2015-07-19T23:39:48.107+0000 I COMMAND [conn117] mr failed, removing collection :: caused by :: 10076 rename failed: { ok: 0.0, errmsg: "source namespace does not exist", code: 26 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.108+0000 m31200| 2015-07-19T23:39:48.107+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_103 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.108+0000 m31202| 2015-07-19T23:39:48.108+0000 I COMMAND [repl writer worker 4] dropDatabase map_reduce_drop finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.111+0000 m31201| 2015-07-19T23:39:48.110+0000 I COMMAND [repl writer worker 15] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.111+0000 m31201| 2015-07-19T23:39:48.111+0000 I COMMAND [repl writer worker 11] dropDatabase map_reduce_drop starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.112+0000 m31201| 2015-07-19T23:39:48.112+0000 I COMMAND [repl writer worker 11] dropDatabase map_reduce_drop finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.115+0000 m31200| 2015-07-19T23:39:48.114+0000 I COMMAND [conn117] command map_reduce_drop.tmp.mr.coll12_103_inc command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.115+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.120+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.120+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.121+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 exception: rename failed: { ok: 0.0, errmsg: "source namespace does not exist", code: 26 } code:10076 numYields:0 reslen:124 locks:{ Global: { acquireCount: { r: 1278, w: 759, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 15832, w: 7082, W: 7195 } }, Database: { acquireCount: { r: 254, w: 752, R: 4, W: 9 }, acquireWaitCount: { r: 3, w: 20, R: 3, W: 4 }, timeAcquiringMicros: { r: 659, w: 35831, R: 5017, W: 873 } }, Collection: { acquireCount: { r: 254, w: 504 } }, Metadata: { acquireCount: { w: 250 } }, oplog: { acquireCount: { w: 250 } } } protocol:op_command 217ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.121+0000 m31200| 2015-07-19T23:39:48.115+0000 I COMMAND [conn103] command map_reduce_drop.tmp.mr.coll12_104 command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.121+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.121+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.126+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.126+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 exception: Collection unexpectedly disappeared: map_reduce_drop.tmp.mr.coll12_104 code:18697 numYields:0 reslen:115 locks:{ Global: { acquireCount: { r: 844, w: 660 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 8846, w: 3932 } }, Database: { acquireCount: { r: 85, w: 654, R: 7, W: 9 }, acquireWaitCount: { r: 3, w: 3, R: 7, W: 4 }, timeAcquiringMicros: { r: 4829, w: 2982, R: 1005, W: 2905 } }, Collection: { acquireCount: { r: 85, w: 576 } }, Metadata: { acquireCount: { w: 81 } }, oplog: { acquireCount: { w: 81 } } } protocol:op_command 132ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.146+0000 m30998| 2015-07-19T23:39:48.142+0000 I SHARDING [conn68] distributed lock 'map_reduce_drop/ip-10-139-123-131:30998:1437349129:1804289383' acquired, ts : 55ac3544230355f00547ef29 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.146+0000 m30998| 2015-07-19T23:39:48.145+0000 I SHARDING [conn68] Placing [map_reduce_drop] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.152+0000 m31200| 2015-07-19T23:39:48.151+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_105 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.165+0000 m31200| 2015-07-19T23:39:48.164+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_106 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.196+0000 m30998| 2015-07-19T23:39:48.196+0000 I SHARDING [conn68] distributed lock 'map_reduce_drop/ip-10-139-123-131:30998:1437349129:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.255+0000 m31200| 2015-07-19T23:39:48.255+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_107 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.397+0000 m31200| 2015-07-19T23:39:48.397+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.410+0000 m31200| 2015-07-19T23:39:48.410+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_106 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.411+0000 m31200| 2015-07-19T23:39:48.410+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_106 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.413+0000 m31200| 2015-07-19T23:39:48.413+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_106 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.414+0000 m31200| 2015-07-19T23:39:48.413+0000 I COMMAND [conn118] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.414+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.414+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.414+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.414+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 2463, w: 1466, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 189 } }, Database: { acquireCount: { r: 490, w: 1458, R: 7, W: 11 }, acquireWaitCount: { r: 2, w: 11, R: 2, W: 6 }, timeAcquiringMicros: { r: 1664, w: 25099, R: 191, W: 3857 } }, Collection: { acquireCount: { r: 490, w: 975 } }, Metadata: { acquireCount: { w: 486 } }, oplog: { acquireCount: { w: 486 } } } protocol:op_command 250ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.472+0000 m31200| 2015-07-19T23:39:48.472+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.475+0000 m31200| 2015-07-19T23:39:48.475+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_105 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.475+0000 m31200| 2015-07-19T23:39:48.475+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_105 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.482+0000 m31201| 2015-07-19T23:39:48.482+0000 I COMMAND [repl writer worker 3] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.483+0000 m31202| 2015-07-19T23:39:48.482+0000 I COMMAND [repl writer worker 3] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.487+0000 m31200| 2015-07-19T23:39:48.487+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_105 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.491+0000 m31200| 2015-07-19T23:39:48.490+0000 I COMMAND [conn117] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.491+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.491+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.491+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.492+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 2464, w: 1467, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 13659, W: 5391 } }, Database: { acquireCount: { r: 490, w: 1459, R: 7, W: 11 }, acquireWaitCount: { r: 6, w: 13, R: 4, W: 3 }, timeAcquiringMicros: { r: 11471, w: 14688, R: 3164, W: 7869 } }, Collection: { acquireCount: { r: 490, w: 975 } }, Metadata: { acquireCount: { w: 487 } }, oplog: { acquireCount: { w: 487 } } } protocol:op_command 339ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.498+0000 m31200| 2015-07-19T23:39:48.498+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_108 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.542+0000 m31200| 2015-07-19T23:39:48.541+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_109 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.543+0000 m31100| 2015-07-19T23:39:48.543+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.544+0000 m31100| 2015-07-19T23:39:48.543+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.659+0000 m30998| 2015-07-19T23:39:48.658+0000 I SHARDING [conn67] distributed lock 'map_reduce_drop/ip-10-139-123-131:30998:1437349129:1804289383' acquired, ts : 55ac3544230355f00547ef2a [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.660+0000 m30998| 2015-07-19T23:39:48.659+0000 I SHARDING [conn67] distributed lock 'map_reduce_drop/ip-10-139-123-131:30998:1437349129:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.710+0000 m31200| 2015-07-19T23:39:48.710+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_110 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.782+0000 m30999| 2015-07-19T23:39:48.781+0000 I SHARDING [LockPinger] cluster test-configRS/ip-10-139-123-131:29000 pinged successfully at 2015-07-19T23:39:48.780+0000 by distributed lock pinger 'test-configRS/ip-10-139-123-131:29000/ip-10-139-123-131:30999:1437349128:1804289383', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.848+0000 m31200| 2015-07-19T23:39:48.848+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.860+0000 m31200| 2015-07-19T23:39:48.859+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_107 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.860+0000 m31200| 2015-07-19T23:39:48.860+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_107 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.864+0000 m31202| 2015-07-19T23:39:48.863+0000 I COMMAND [repl writer worker 4] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.864+0000 m31201| 2015-07-19T23:39:48.864+0000 I COMMAND [repl writer worker 15] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.864+0000 m31200| 2015-07-19T23:39:48.864+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_107 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.866+0000 m31200| 2015-07-19T23:39:48.866+0000 I COMMAND [conn103] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.866+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.866+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.866+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:48.867+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 3643, w: 2172, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 21433, W: 90 } }, Database: { acquireCount: { r: 725, w: 2164, R: 9, W: 11 }, acquireWaitCount: { r: 9, w: 44, R: 9, W: 7 }, timeAcquiringMicros: { r: 40559, w: 99668, R: 1879, W: 1666 } }, Collection: { acquireCount: { r: 725, w: 1445 } }, Metadata: { acquireCount: { w: 722 } }, oplog: { acquireCount: { w: 722 } } } protocol:op_command 611ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.012+0000 m31200| 2015-07-19T23:39:49.011+0000 I COMMAND [conn25] command map_reduce_drop.$cmd command: insert { insert: "coll12", documents: 250, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 257, w: 257 } }, Database: { acquireCount: { w: 257 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 250 } }, oplog: { acquireCount: { w: 250 } } } protocol:op_command 135ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.015+0000 m31200| 2015-07-19T23:39:49.015+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_111 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.323+0000 m31200| 2015-07-19T23:39:49.323+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.335+0000 m31200| 2015-07-19T23:39:49.335+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_109 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.335+0000 m31200| 2015-07-19T23:39:49.335+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_109 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.336+0000 m31202| 2015-07-19T23:39:49.336+0000 I COMMAND [repl writer worker 0] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.337+0000 m31201| 2015-07-19T23:39:49.336+0000 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.339+0000 m31200| 2015-07-19T23:39:49.339+0000 I COMMAND [conn118] CMD: drop map_reduce_drop.tmp.mr.coll12_109 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.340+0000 m31200| 2015-07-19T23:39:49.340+0000 I COMMAND [conn118] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.340+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.341+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.341+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.341+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 5863, w: 3498, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 12891, W: 80 } }, Database: { acquireCount: { r: 1167, w: 3490, R: 14, W: 10 }, acquireWaitCount: { r: 4, w: 48, R: 12, W: 8 }, timeAcquiringMicros: { r: 4387, w: 86134, R: 7179, W: 10223 } }, Collection: { acquireCount: { r: 1167, w: 2328 } }, Metadata: { acquireCount: { w: 1164 } }, oplog: { acquireCount: { w: 1164 } } } protocol:op_command 801ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.379+0000 m31200| 2015-07-19T23:39:49.379+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.383+0000 m31200| 2015-07-19T23:39:49.383+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_108 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.384+0000 m31200| 2015-07-19T23:39:49.383+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_108 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.388+0000 m31202| 2015-07-19T23:39:49.388+0000 I COMMAND [repl writer worker 2] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.388+0000 m31201| 2015-07-19T23:39:49.388+0000 I COMMAND [repl writer worker 11] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.414+0000 m31200| 2015-07-19T23:39:49.413+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_108 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.416+0000 m31200| 2015-07-19T23:39:49.416+0000 I COMMAND [conn117] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.416+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.417+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.417+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.417+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 5863, w: 3498, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 26984, W: 19908 } }, Database: { acquireCount: { r: 1167, w: 3490, R: 14, W: 10 }, acquireWaitCount: { r: 7, w: 58, R: 12, W: 7 }, timeAcquiringMicros: { r: 4160, w: 94641, R: 25807, W: 22193 } }, Collection: { acquireCount: { r: 1167, w: 2328 } }, Metadata: { acquireCount: { w: 1164 } }, oplog: { acquireCount: { w: 1164 } } } protocol:op_command 918ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.417+0000 m30999| 2015-07-19T23:39:49.417+0000 I COMMAND [conn67] DROP: map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.417+0000 m30999| 2015-07-19T23:39:49.417+0000 I COMMAND [conn67] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.418+0000 m31200| 2015-07-19T23:39:49.417+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.421+0000 m31201| 2015-07-19T23:39:49.421+0000 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.425+0000 m31202| 2015-07-19T23:39:49.425+0000 I COMMAND [repl writer worker 2] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.430+0000 m31200| 2015-07-19T23:39:49.430+0000 I COMMAND [conn117] CMD: drop map_reduce_drop.tmp.mr.coll12_112 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.486+0000 m31200| 2015-07-19T23:39:49.485+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.tmp.mr.coll12_113 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.506+0000 m31200| 2015-07-19T23:39:49.506+0000 I COMMAND [conn94] CMD: drop map_reduce_drop.tmp.mr.coll12_114 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.546+0000 m31100| 2015-07-19T23:39:49.546+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.547+0000 m31100| 2015-07-19T23:39:49.546+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.743+0000 m31200| 2015-07-19T23:39:49.742+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.751+0000 m31200| 2015-07-19T23:39:49.751+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_110 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.752+0000 m31200| 2015-07-19T23:39:49.752+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_110 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.753+0000 m31202| 2015-07-19T23:39:49.753+0000 I COMMAND [repl writer worker 9] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.753+0000 m31201| 2015-07-19T23:39:49.753+0000 I COMMAND [repl writer worker 0] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.754+0000 m31200| 2015-07-19T23:39:49.754+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_110 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.755+0000 m31200| 2015-07-19T23:39:49.755+0000 I COMMAND [conn22] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.755+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.755+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.756+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.756+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 6964, w: 4155, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 11420, w: 36237, W: 134 } }, Database: { acquireCount: { r: 1386, w: 4147, R: 17, W: 10 }, acquireWaitCount: { r: 12, w: 53, R: 17, W: 8 }, timeAcquiringMicros: { r: 34398, w: 78430, R: 8788, W: 2609 } }, Collection: { acquireCount: { r: 1386, w: 2766 } }, Metadata: { acquireCount: { w: 1383 } }, oplog: { acquireCount: { w: 1383 } } } protocol:op_command 1046ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.871+0000 m31200| 2015-07-19T23:39:49.870+0000 I COMMAND [conn25] command map_reduce_drop.$cmd command: insert { insert: "coll12", documents: 250, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 256, w: 256 } }, Database: { acquireCount: { w: 256 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 250 } }, oplog: { acquireCount: { w: 250 } } } protocol:op_command 104ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.873+0000 m31200| 2015-07-19T23:39:49.872+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_115 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:49.989+0000 m31200| 2015-07-19T23:39:49.988+0000 I COMMAND [conn94] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.001+0000 m31200| 2015-07-19T23:39:50.001+0000 I COMMAND [conn94] CMD: drop map_reduce_drop.tmp.mr.coll12_114 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.001+0000 m31200| 2015-07-19T23:39:50.001+0000 I COMMAND [conn94] CMD: drop map_reduce_drop.tmp.mr.coll12_114 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.003+0000 m31201| 2015-07-19T23:39:50.002+0000 I COMMAND [repl writer worker 6] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.004+0000 m31202| 2015-07-19T23:39:50.003+0000 I COMMAND [repl writer worker 15] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.006+0000 m31200| 2015-07-19T23:39:50.006+0000 I COMMAND [conn94] CMD: drop map_reduce_drop.tmp.mr.coll12_114 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.007+0000 m31200| 2015-07-19T23:39:50.007+0000 I COMMAND [conn94] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.007+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.007+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.007+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.008+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 2499, w: 1488, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 9162, W: 325 } }, Database: { acquireCount: { r: 497, w: 1480, R: 7, W: 10 }, acquireWaitCount: { r: 3, w: 19, R: 7, W: 8 }, timeAcquiringMicros: { r: 4814, w: 20880, R: 1238, W: 6957 } }, Collection: { acquireCount: { r: 497, w: 988 } }, Metadata: { acquireCount: { w: 494 } }, oplog: { acquireCount: { w: 494 } } } protocol:op_command 501ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.009+0000 m30999| 2015-07-19T23:39:50.008+0000 I COMMAND [conn67] DROP: map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.009+0000 m30999| 2015-07-19T23:39:50.008+0000 I COMMAND [conn67] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.009+0000 m31200| 2015-07-19T23:39:50.009+0000 I COMMAND [conn94] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.013+0000 m30999| 2015-07-19T23:39:50.013+0000 I NETWORK [conn67] end connection 10.139.123.131:57397 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.015+0000 m31200| 2015-07-19T23:39:50.014+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.023+0000 m30998| 2015-07-19T23:39:50.023+0000 I SHARDING [LockPinger] cluster test-configRS/ip-10-139-123-131:29000 pinged successfully at 2015-07-19T23:39:50.009+0000 by distributed lock pinger 'test-configRS/ip-10-139-123-131:29000/ip-10-139-123-131:30998:1437349129:1804289383', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.025+0000 m31201| 2015-07-19T23:39:50.024+0000 I COMMAND [repl writer worker 0] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.027+0000 m31200| 2015-07-19T23:39:50.026+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.tmp.mr.coll12_113 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.029+0000 m31201| 2015-07-19T23:39:50.029+0000 I COMMAND [repl writer worker 15] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.031+0000 m31202| 2015-07-19T23:39:50.031+0000 I COMMAND [repl writer worker 1] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.036+0000 m31200| 2015-07-19T23:39:50.036+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.tmp.mr.coll12_113 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.036+0000 m31202| 2015-07-19T23:39:50.036+0000 I COMMAND [repl writer worker 14] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.037+0000 m31200| 2015-07-19T23:39:50.037+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.tmp.mr.coll12_113 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.038+0000 m31200| 2015-07-19T23:39:50.038+0000 I COMMAND [conn126] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.038+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.038+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.038+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.039+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 2499, w: 1488, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 22013, W: 40 } }, Database: { acquireCount: { r: 497, w: 1480, R: 7, W: 10 }, acquireWaitCount: { r: 6, w: 19, R: 7, W: 8 }, timeAcquiringMicros: { r: 5968, w: 27127, R: 10255, W: 10841 } }, Collection: { acquireCount: { r: 497, w: 988 } }, Metadata: { acquireCount: { w: 494 } }, oplog: { acquireCount: { w: 494 } } } protocol:op_command 552ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.121+0000 m31200| 2015-07-19T23:39:50.121+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.tmp.mr.coll12_116 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.269+0000 m31200| 2015-07-19T23:39:50.269+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.273+0000 m31200| 2015-07-19T23:39:50.273+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.tmp.mr.coll12_116 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.273+0000 m31200| 2015-07-19T23:39:50.273+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.tmp.mr.coll12_116 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.275+0000 m31201| 2015-07-19T23:39:50.275+0000 I COMMAND [repl writer worker 2] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.276+0000 m31200| 2015-07-19T23:39:50.276+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.tmp.mr.coll12_116 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.276+0000 m31202| 2015-07-19T23:39:50.276+0000 I COMMAND [repl writer worker 13] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.277+0000 m31200| 2015-07-19T23:39:50.277+0000 I COMMAND [conn126] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.277+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.278+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.278+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.278+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 1268, w: 753, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 76 } }, Database: { acquireCount: { r: 252, w: 745, R: 4, W: 10 }, acquireWaitCount: { R: 4, W: 5 }, timeAcquiringMicros: { R: 1178, W: 840 } }, Collection: { acquireCount: { r: 252, w: 498 } }, Metadata: { acquireCount: { w: 249 } }, oplog: { acquireCount: { w: 249 } } } protocol:op_command 156ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.322+0000 m31200| 2015-07-19T23:39:50.322+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.325+0000 m31200| 2015-07-19T23:39:50.325+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_111 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.325+0000 m31200| 2015-07-19T23:39:50.325+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_111 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.332+0000 m31201| 2015-07-19T23:39:50.331+0000 I COMMAND [repl writer worker 12] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.332+0000 m31202| 2015-07-19T23:39:50.332+0000 I COMMAND [repl writer worker 12] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.335+0000 m31200| 2015-07-19T23:39:50.335+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_111 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.336+0000 m31200| 2015-07-19T23:39:50.336+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.tmp.mr.coll12_117 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.340+0000 m31200| 2015-07-19T23:39:50.340+0000 I COMMAND [conn103] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.340+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.341+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.341+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.341+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 8063, w: 4812, W: 3 }, acquireWaitCount: { r: 4, w: 2, W: 1 }, timeAcquiringMicros: { r: 38801, w: 35459, W: 3351 } }, Database: { acquireCount: { r: 1605, w: 4804, R: 19, W: 10 }, acquireWaitCount: { r: 10, w: 55, R: 19, W: 8 }, timeAcquiringMicros: { r: 21312, w: 98982, R: 6880, W: 14812 } }, Collection: { acquireCount: { r: 1605, w: 3204 } }, Metadata: { acquireCount: { w: 1602 } }, oplog: { acquireCount: { w: 1602 } } } protocol:op_command 1327ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.343+0000 m30998| 2015-07-19T23:39:50.342+0000 I COMMAND [conn68] DROP DATABASE: map_reduce_drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.343+0000 m30998| 2015-07-19T23:39:50.343+0000 I SHARDING [conn68] DBConfig::dropDatabase: map_reduce_drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.343+0000 m30998| 2015-07-19T23:39:50.343+0000 I SHARDING [conn68] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:50.343+0000-55ac3546230355f00547ef2b", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:36040", time: new Date(1437349190343), what: "dropDatabase.start", ns: "map_reduce_drop", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.400+0000 m31200| 2015-07-19T23:39:50.400+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.403+0000 m31200| 2015-07-19T23:39:50.403+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_115 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.403+0000 m31200| 2015-07-19T23:39:50.403+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_115 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.404+0000 m31200| 2015-07-19T23:39:50.404+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_115 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.404+0000 m31201| 2015-07-19T23:39:50.404+0000 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.404+0000 m31200| 2015-07-19T23:39:50.404+0000 I COMMAND [conn22] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.405+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.405+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.405+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.405+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 3653, w: 2178, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 11376, w: 25054, W: 44 } }, Database: { acquireCount: { r: 727, w: 2170, R: 9, W: 10 }, acquireWaitCount: { r: 4, w: 24, R: 9, W: 3 }, timeAcquiringMicros: { r: 10874, w: 41900, R: 4319, W: 599 } }, Collection: { acquireCount: { r: 727, w: 1448 } }, Metadata: { acquireCount: { w: 724 } }, oplog: { acquireCount: { w: 724 } } } protocol:op_command 532ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.406+0000 m31202| 2015-07-19T23:39:50.405+0000 I COMMAND [repl writer worker 14] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.407+0000 m30998| 2015-07-19T23:39:50.407+0000 I COMMAND [conn67] DROP: map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.408+0000 m30998| 2015-07-19T23:39:50.407+0000 I COMMAND [conn67] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.408+0000 m31200| 2015-07-19T23:39:50.408+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.414+0000 m31202| 2015-07-19T23:39:50.414+0000 I COMMAND [repl writer worker 10] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.415+0000 m31201| 2015-07-19T23:39:50.414+0000 I COMMAND [repl writer worker 6] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.445+0000 m30998| 2015-07-19T23:39:50.444+0000 I SHARDING [conn68] DBConfig::dropDatabase: map_reduce_drop dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.450+0000 m31200| 2015-07-19T23:39:50.449+0000 I COMMAND [conn145] dropDatabase map_reduce_drop starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.450+0000 m31200| 2015-07-19T23:39:50.450+0000 I COMMAND [conn145] dropDatabase map_reduce_drop finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.451+0000 m30998| 2015-07-19T23:39:50.450+0000 I SHARDING [conn68] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:50.450+0000-55ac3546230355f00547ef2c", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:36040", time: new Date(1437349190450), what: "dropDatabase", ns: "map_reduce_drop", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.451+0000 m31200| 2015-07-19T23:39:50.451+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.tmp.mr.coll12_117 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.451+0000 m31200| 2015-07-19T23:39:50.451+0000 I COMMAND [conn126] mr failed, removing collection :: caused by :: 18697 Collection unexpectedly disappeared: map_reduce_drop.tmp.mr.coll12_117 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.455+0000 m31200| 2015-07-19T23:39:50.451+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_118 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.457+0000 m31201| 2015-07-19T23:39:50.457+0000 I COMMAND [repl writer worker 11] dropDatabase map_reduce_drop starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.458+0000 m31201| 2015-07-19T23:39:50.458+0000 I COMMAND [repl writer worker 11] dropDatabase map_reduce_drop finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.460+0000 m31202| 2015-07-19T23:39:50.460+0000 I COMMAND [repl writer worker 2] dropDatabase map_reduce_drop starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.460+0000 m31200| 2015-07-19T23:39:50.460+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.tmp.mr.coll12_117 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.461+0000 m31200| 2015-07-19T23:39:50.461+0000 I COMMAND [conn126] command map_reduce_drop.tmp.mr.coll12_117 command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.461+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.461+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.461+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.462+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 exception: Collection unexpectedly disappeared: map_reduce_drop.tmp.mr.coll12_117 code:18697 numYields:0 reslen:115 locks:{ Global: { acquireCount: { r: 937, w: 707 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 3462, w: 5534 } }, Database: { acquireCount: { r: 108, w: 701, R: 7, W: 9 }, acquireWaitCount: { r: 5, w: 1, R: 7, W: 3 }, timeAcquiringMicros: { r: 17229, w: 29, R: 1092, W: 1657 } }, Collection: { acquireCount: { r: 108, w: 599 } }, Metadata: { acquireCount: { w: 104 } }, oplog: { acquireCount: { w: 104 } } } protocol:op_command 126ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.462+0000 m31202| 2015-07-19T23:39:50.461+0000 I COMMAND [repl writer worker 2] dropDatabase map_reduce_drop finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.463+0000 m30999| 2015-07-19T23:39:50.463+0000 I COMMAND [conn69] DROP: map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.463+0000 m30999| 2015-07-19T23:39:50.463+0000 I COMMAND [conn69] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.463+0000 m31200| 2015-07-19T23:39:50.463+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.549+0000 m31100| 2015-07-19T23:39:50.548+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.549+0000 m31200| 2015-07-19T23:39:50.548+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.tmp.mr.coll12_119 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.549+0000 m31100| 2015-07-19T23:39:50.549+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.582+0000 m31200| 2015-07-19T23:39:50.582+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_120 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.582+0000 m31200| 2015-07-19T23:39:50.582+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_121 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.851+0000 m31100| 2015-07-19T23:39:50.851+0000 I SHARDING [LockPinger] cluster test-configRS/ip-10-139-123-131:29000 pinged successfully at 2015-07-19T23:39:50.850+0000 by distributed lock pinger 'test-configRS/ip-10-139-123-131:29000/ip-10-139-123-131:31100:1437349130:1993228155', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.975+0000 m31200| 2015-07-19T23:39:50.975+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.986+0000 m31200| 2015-07-19T23:39:50.986+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_120 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.986+0000 m31200| 2015-07-19T23:39:50.986+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_120 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.988+0000 m31200| 2015-07-19T23:39:50.988+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_120 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.989+0000 m31200| 2015-07-19T23:39:50.989+0000 I COMMAND [conn22] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.989+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.989+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.989+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.990+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 3662, w: 2183, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 33 } }, Database: { acquireCount: { r: 729, w: 2175, R: 9, W: 11 }, acquireWaitCount: { w: 5, R: 5, W: 9 }, timeAcquiringMicros: { w: 10387, R: 31887, W: 11033 } }, Collection: { acquireCount: { r: 729, w: 1453 } }, Metadata: { acquireCount: { w: 725 } }, oplog: { acquireCount: { w: 725 } } } protocol:op_command 438ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.990+0000 m30998| 2015-07-19T23:39:50.990+0000 I COMMAND [conn67] DROP: map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.990+0000 m30998| 2015-07-19T23:39:50.990+0000 I COMMAND [conn67] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:50.990+0000 m31200| 2015-07-19T23:39:50.990+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.014+0000 m31202| 2015-07-19T23:39:51.013+0000 I COMMAND [repl writer worker 2] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.014+0000 m31201| 2015-07-19T23:39:51.014+0000 I COMMAND [repl writer worker 6] CMD: drop map_reduce_drop.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.031+0000 m31200| 2015-07-19T23:39:51.031+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.033+0000 m31200| 2015-07-19T23:39:51.033+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_121 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.034+0000 m31200| 2015-07-19T23:39:51.033+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_121 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.039+0000 m31201| 2015-07-19T23:39:51.038+0000 I COMMAND [repl writer worker 14] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.039+0000 m31202| 2015-07-19T23:39:51.039+0000 I COMMAND [repl writer worker 13] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.048+0000 m31200| 2015-07-19T23:39:51.048+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_121 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.049+0000 m31200| 2015-07-19T23:39:51.049+0000 I COMMAND [conn103] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.049+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.049+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.049+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.050+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 3663, w: 2184, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 11402, W: 6555 } }, Database: { acquireCount: { r: 729, w: 2176, R: 9, W: 11 }, acquireWaitCount: { r: 6, w: 5, R: 6, W: 9 }, timeAcquiringMicros: { r: 56321, w: 6965, R: 958, W: 22614 } }, Collection: { acquireCount: { r: 729, w: 1453 } }, Metadata: { acquireCount: { w: 726 } }, oplog: { acquireCount: { w: 726 } } } protocol:op_command 496ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.060+0000 m31200| 2015-07-19T23:39:51.060+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_122 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.093+0000 m31200| 2015-07-19T23:39:51.092+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_123 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.123+0000 m31200| 2015-07-19T23:39:51.123+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.125+0000 m31200| 2015-07-19T23:39:51.125+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.tmp.mr.coll12_119 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.126+0000 m31200| 2015-07-19T23:39:51.125+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.tmp.mr.coll12_119 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.126+0000 m31200| 2015-07-19T23:39:51.126+0000 I COMMAND [conn126] CMD: drop map_reduce_drop.tmp.mr.coll12_119 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.126+0000 m31200| 2015-07-19T23:39:51.126+0000 I COMMAND [conn126] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.126+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.127+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.127+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.127+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 3663, w: 2184, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 16976, W: 94 } }, Database: { acquireCount: { r: 729, w: 2176, R: 9, W: 11 }, acquireWaitCount: { r: 11, w: 22, R: 8, W: 6 }, timeAcquiringMicros: { r: 32548, w: 64820, R: 24913, W: 2510 } }, Collection: { acquireCount: { r: 729, w: 1453 } }, Metadata: { acquireCount: { w: 726 } }, oplog: { acquireCount: { w: 726 } } } protocol:op_command 589ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.127+0000 m31202| 2015-07-19T23:39:51.126+0000 I COMMAND [repl writer worker 7] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.129+0000 m31201| 2015-07-19T23:39:51.128+0000 I COMMAND [repl writer worker 10] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.130+0000 m30999| 2015-07-19T23:39:51.130+0000 I NETWORK [conn69] end connection 10.139.123.131:57400 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.151+0000 m31200| 2015-07-19T23:39:51.151+0000 I SHARDING [LockPinger] cluster test-configRS/ip-10-139-123-131:29000 pinged successfully at 2015-07-19T23:39:51.150+0000 by distributed lock pinger 'test-configRS/ip-10-139-123-131:29000/ip-10-139-123-131:31200:1437349131:182555922', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.313+0000 m31200| 2015-07-19T23:39:51.312+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.316+0000 m31200| 2015-07-19T23:39:51.316+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_122 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.316+0000 m31200| 2015-07-19T23:39:51.316+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_122 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.317+0000 m31200| 2015-07-19T23:39:51.317+0000 I COMMAND [conn103] CMD: drop map_reduce_drop.tmp.mr.coll12_122 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.318+0000 m31200| 2015-07-19T23:39:51.318+0000 I COMMAND [conn103] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.318+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.318+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.318+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.319+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 2514, w: 1497, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 7222, W: 37 } }, Database: { acquireCount: { r: 500, w: 1489, R: 7, W: 10 }, acquireWaitCount: { w: 4, R: 7, W: 5 }, timeAcquiringMicros: { w: 8668, R: 16013, W: 12112 } }, Collection: { acquireCount: { r: 500, w: 994 } }, Metadata: { acquireCount: { w: 497 } }, oplog: { acquireCount: { w: 497 } } } protocol:op_command 258ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.319+0000 m31201| 2015-07-19T23:39:51.318+0000 I COMMAND [repl writer worker 4] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.321+0000 m31202| 2015-07-19T23:39:51.320+0000 I COMMAND [repl writer worker 0] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.323+0000 m30998| 2015-07-19T23:39:51.323+0000 I NETWORK [conn67] end connection 10.139.123.131:36037 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.331+0000 m31200| 2015-07-19T23:39:51.331+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.336+0000 m31200| 2015-07-19T23:39:51.336+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_123 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.336+0000 m31200| 2015-07-19T23:39:51.336+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_123 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.339+0000 m31200| 2015-07-19T23:39:51.339+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_123 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.340+0000 m31200| 2015-07-19T23:39:51.340+0000 I COMMAND [conn22] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.340+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.340+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.340+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.341+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 2514, w: 1497, W: 3 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 6696 } }, Database: { acquireCount: { r: 500, w: 1489, R: 7, W: 10 }, acquireWaitCount: { r: 2, w: 5, R: 6, W: 4 }, timeAcquiringMicros: { r: 97, w: 14799, R: 6027, W: 3684 } }, Collection: { acquireCount: { r: 500, w: 994 } }, Metadata: { acquireCount: { w: 497 } }, oplog: { acquireCount: { w: 497 } } } protocol:op_command 251ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.341+0000 m31201| 2015-07-19T23:39:51.340+0000 I COMMAND [repl writer worker 13] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.342+0000 m31202| 2015-07-19T23:39:51.342+0000 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.372+0000 m31200| 2015-07-19T23:39:51.372+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_124 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.551+0000 m31100| 2015-07-19T23:39:51.551+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.573+0000 m31100| 2015-07-19T23:39:51.551+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.618+0000 m31200| 2015-07-19T23:39:51.618+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.624+0000 m31200| 2015-07-19T23:39:51.624+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_124 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.624+0000 m31200| 2015-07-19T23:39:51.624+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_124 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.625+0000 m31200| 2015-07-19T23:39:51.624+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_124 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.625+0000 m31200| 2015-07-19T23:39:51.625+0000 I COMMAND [conn22] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.625+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.625+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.625+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.626+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 3713, w: 2214, W: 3 } }, Database: { acquireCount: { r: 739, w: 2206, R: 9, W: 10 } }, Collection: { acquireCount: { r: 739, w: 1472 } }, Metadata: { acquireCount: { w: 736 } }, oplog: { acquireCount: { w: 736 } } } protocol:op_command 253ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.626+0000 m31201| 2015-07-19T23:39:51.625+0000 I COMMAND [repl writer worker 6] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.626+0000 m31202| 2015-07-19T23:39:51.626+0000 I COMMAND [repl writer worker 15] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.659+0000 m31200| 2015-07-19T23:39:51.658+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_125 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.906+0000 m31200| 2015-07-19T23:39:51.905+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.915+0000 m31200| 2015-07-19T23:39:51.914+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_125 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.915+0000 m31200| 2015-07-19T23:39:51.915+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_125 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.916+0000 m31201| 2015-07-19T23:39:51.916+0000 I COMMAND [repl writer worker 15] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.916+0000 m31200| 2015-07-19T23:39:51.916+0000 I COMMAND [conn22] CMD: drop map_reduce_drop.tmp.mr.coll12_125 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.917+0000 m31200| 2015-07-19T23:39:51.916+0000 I COMMAND [conn22] command map_reduce_drop.coll12_out command: mapReduce { mapreduce: "coll12", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.917+0000 m31200| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.917+0000 m31200| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.917+0000 m31200| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.917+0000 m31200| return redu..., out: "coll12_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:197 locks:{ Global: { acquireCount: { r: 4889, w: 2916, W: 3 } }, Database: { acquireCount: { r: 973, w: 2908, R: 12, W: 10 } }, Collection: { acquireCount: { r: 973, w: 1940 } }, Metadata: { acquireCount: { w: 970 } }, oplog: { acquireCount: { w: 970 } } } protocol:op_command 258ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.917+0000 m31202| 2015-07-19T23:39:51.916+0000 I COMMAND [repl writer worker 14] CMD: drop map_reduce_drop.coll12_out [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.921+0000 m30998| 2015-07-19T23:39:51.920+0000 I NETWORK [conn68] end connection 10.139.123.131:36040 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.940+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.940+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.940+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.940+0000 jstests/concurrency/fsm_workloads/map_reduce_drop.js: Workload completed in 6073 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.940+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.940+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.940+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.941+0000 m30999| 2015-07-19T23:39:51.941+0000 I COMMAND [conn1] DROP: db12.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.941+0000 m30999| 2015-07-19T23:39:51.941+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:51.941+0000-55ac3547d2c1f750d15483dd", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349191941), what: "dropCollection.start", ns: "db12.coll12", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.993+0000 m30999| 2015-07-19T23:39:51.993+0000 I SHARDING [conn1] distributed lock 'db12.coll12/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3547d2c1f750d15483de [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.993+0000 m31100| 2015-07-19T23:39:51.993+0000 I COMMAND [conn127] CMD: drop db12.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.993+0000 m31100| 2015-07-19T23:39:51.993+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 440ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.994+0000 m31100| 2015-07-19T23:39:51.993+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 439ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.994+0000 m31200| 2015-07-19T23:39:51.994+0000 I COMMAND [conn142] CMD: drop db12.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.996+0000 m31101| 2015-07-19T23:39:51.996+0000 I COMMAND [repl writer worker 13] CMD: drop db12.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.996+0000 m31102| 2015-07-19T23:39:51.996+0000 I COMMAND [repl writer worker 2] CMD: drop db12.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.997+0000 m31201| 2015-07-19T23:39:51.997+0000 I COMMAND [repl writer worker 4] CMD: drop db12.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:51.998+0000 m31202| 2015-07-19T23:39:51.997+0000 I COMMAND [repl writer worker 9] CMD: drop db12.coll12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.047+0000 m31100| 2015-07-19T23:39:52.046+0000 I SHARDING [conn127] remotely refreshing metadata for db12.coll12 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||55ac3541d2c1f750d15483d7, current metadata version is 2|3||55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.047+0000 m31100| 2015-07-19T23:39:52.047+0000 W SHARDING [conn127] no chunks found when reloading db12.coll12, previous version was 0|0||55ac3541d2c1f750d15483d7, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.047+0000 m31100| 2015-07-19T23:39:52.047+0000 I SHARDING [conn127] dropping metadata for db12.coll12 at shard version 2|3||55ac3541d2c1f750d15483d7, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.048+0000 m31200| 2015-07-19T23:39:52.047+0000 I SHARDING [conn142] remotely refreshing metadata for db12.coll12 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||55ac3541d2c1f750d15483d7, current metadata version is 2|5||55ac3541d2c1f750d15483d7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.048+0000 m31200| 2015-07-19T23:39:52.048+0000 W SHARDING [conn142] no chunks found when reloading db12.coll12, previous version was 0|0||55ac3541d2c1f750d15483d7, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.048+0000 m31200| 2015-07-19T23:39:52.048+0000 I SHARDING [conn142] dropping metadata for db12.coll12 at shard version 2|5||55ac3541d2c1f750d15483d7, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.048+0000 m30999| 2015-07-19T23:39:52.048+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:52.048+0000-55ac3548d2c1f750d15483df", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349192048), what: "dropCollection", ns: "db12.coll12", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.100+0000 m30999| 2015-07-19T23:39:52.100+0000 I SHARDING [conn1] distributed lock 'db12.coll12/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.151+0000 m30999| 2015-07-19T23:39:52.151+0000 I COMMAND [conn1] DROP DATABASE: db12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.152+0000 m30999| 2015-07-19T23:39:52.151+0000 I SHARDING [conn1] DBConfig::dropDatabase: db12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.152+0000 m30999| 2015-07-19T23:39:52.151+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:52.151+0000-55ac3548d2c1f750d15483e0", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349192151), what: "dropDatabase.start", ns: "db12", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.253+0000 m30999| 2015-07-19T23:39:52.253+0000 I SHARDING [conn1] DBConfig::dropDatabase: db12 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.254+0000 m31200| 2015-07-19T23:39:52.253+0000 I COMMAND [conn111] dropDatabase db12 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.254+0000 m31200| 2015-07-19T23:39:52.253+0000 I COMMAND [conn111] dropDatabase db12 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.254+0000 m30999| 2015-07-19T23:39:52.253+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:52.253+0000-55ac3548d2c1f750d15483e1", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349192253), what: "dropDatabase", ns: "db12", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.255+0000 m31200| 2015-07-19T23:39:52.253+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:109 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 254ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.255+0000 m31200| 2015-07-19T23:39:52.253+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:109 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 29 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 254ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.255+0000 m31201| 2015-07-19T23:39:52.254+0000 I COMMAND [repl writer worker 0] dropDatabase db12 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.255+0000 m31202| 2015-07-19T23:39:52.254+0000 I COMMAND [repl writer worker 6] dropDatabase db12 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.255+0000 m31202| 2015-07-19T23:39:52.254+0000 I COMMAND [repl writer worker 6] dropDatabase db12 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.256+0000 m31201| 2015-07-19T23:39:52.254+0000 I COMMAND [repl writer worker 0] dropDatabase db12 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.311+0000 m31100| 2015-07-19T23:39:52.310+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 312ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.311+0000 m31100| 2015-07-19T23:39:52.310+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 312ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.317+0000 m31100| 2015-07-19T23:39:52.317+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.320+0000 m31102| 2015-07-19T23:39:52.319+0000 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.320+0000 m31101| 2015-07-19T23:39:52.319+0000 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.329+0000 m31200| 2015-07-19T23:39:52.329+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.330+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.330+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.330+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.330+0000 jstests/concurrency/fsm_workloads/explain_update.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.330+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.330+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.330+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.332+0000 m31201| 2015-07-19T23:39:52.331+0000 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.333+0000 m31202| 2015-07-19T23:39:52.332+0000 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.333+0000 m30999| 2015-07-19T23:39:52.332+0000 I SHARDING [conn1] distributed lock 'db13/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3548d2c1f750d15483e2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.335+0000 m30999| 2015-07-19T23:39:52.335+0000 I SHARDING [conn1] Placing [db13] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.335+0000 m30999| 2015-07-19T23:39:52.335+0000 I SHARDING [conn1] Enabling sharding for database [db13] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.386+0000 m30999| 2015-07-19T23:39:52.386+0000 I SHARDING [conn1] distributed lock 'db13/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.392+0000 m31200| 2015-07-19T23:39:52.392+0000 I INDEX [conn28] build index on: db13.coll13 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db13.coll13" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.392+0000 m31200| 2015-07-19T23:39:52.392+0000 I INDEX [conn28] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.394+0000 m31200| 2015-07-19T23:39:52.393+0000 I INDEX [conn28] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.394+0000 m30999| 2015-07-19T23:39:52.394+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db13.coll13", key: { j: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.396+0000 m30999| 2015-07-19T23:39:52.395+0000 I SHARDING [conn1] distributed lock 'db13.coll13/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3548d2c1f750d15483e3 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.396+0000 m30999| 2015-07-19T23:39:52.396+0000 I SHARDING [conn1] enable sharding on: db13.coll13 with shard key: { j: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.396+0000 m30999| 2015-07-19T23:39:52.396+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:52.396+0000-55ac3548d2c1f750d15483e4", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349192396), what: "shardCollection.start", ns: "db13.coll13", details: { shardKey: { j: 1.0 }, collection: "db13.coll13", primary: "test-rs1:test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.397+0000 m31201| 2015-07-19T23:39:52.396+0000 I INDEX [repl writer worker 13] build index on: db13.coll13 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db13.coll13" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.397+0000 m31201| 2015-07-19T23:39:52.396+0000 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.398+0000 m31201| 2015-07-19T23:39:52.397+0000 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.398+0000 m31202| 2015-07-19T23:39:52.397+0000 I INDEX [repl writer worker 10] build index on: db13.coll13 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db13.coll13" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.398+0000 m31202| 2015-07-19T23:39:52.397+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.399+0000 m31202| 2015-07-19T23:39:52.399+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.447+0000 m30999| 2015-07-19T23:39:52.447+0000 I SHARDING [conn1] going to create 1 chunk(s) for: db13.coll13 using new epoch 55ac3548d2c1f750d15483e5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.498+0000 m30999| 2015-07-19T23:39:52.498+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db13.coll13: 0ms sequenceNumber: 63 version: 1|0||55ac3548d2c1f750d15483e5 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.550+0000 m30999| 2015-07-19T23:39:52.550+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db13.coll13: 0ms sequenceNumber: 64 version: 1|0||55ac3548d2c1f750d15483e5 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.550+0000 m31200| 2015-07-19T23:39:52.550+0000 I SHARDING [conn126] remotely refreshing metadata for db13.coll13 with requested shard version 1|0||55ac3548d2c1f750d15483e5, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.551+0000 m31200| 2015-07-19T23:39:52.551+0000 I SHARDING [conn126] collection db13.coll13 was previously unsharded, new metadata loaded with shard version 1|0||55ac3548d2c1f750d15483e5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.551+0000 m31200| 2015-07-19T23:39:52.551+0000 I SHARDING [conn126] collection version was loaded at version 1|0||55ac3548d2c1f750d15483e5, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.551+0000 m30999| 2015-07-19T23:39:52.551+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:52.551+0000-55ac3548d2c1f750d15483e6", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349192551), what: "shardCollection", ns: "db13.coll13", details: { version: "1|0||55ac3548d2c1f750d15483e5" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.602+0000 m30999| 2015-07-19T23:39:52.602+0000 I SHARDING [conn1] distributed lock 'db13.coll13/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.603+0000 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.739+0000 m30998| 2015-07-19T23:39:52.739+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36045 #69 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.740+0000 m30998| 2015-07-19T23:39:52.740+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36046 #70 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.754+0000 m30999| 2015-07-19T23:39:52.754+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57406 #70 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.760+0000 m30998| 2015-07-19T23:39:52.760+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36048 #71 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.770+0000 m30999| 2015-07-19T23:39:52.770+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57408 #71 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.773+0000 m30998| 2015-07-19T23:39:52.772+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36050 #72 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.778+0000 m30999| 2015-07-19T23:39:52.777+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57410 #72 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.780+0000 m30998| 2015-07-19T23:39:52.780+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36052 #73 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.789+0000 m30999| 2015-07-19T23:39:52.789+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57413 #73 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.789+0000 m30999| 2015-07-19T23:39:52.789+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57412 #74 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.801+0000 setting random seed: 7260370319709 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.801+0000 setting random seed: 7997982511296 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.801+0000 setting random seed: 6908954340033 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.801+0000 setting random seed: 9809359428472 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.802+0000 setting random seed: 7664344036020 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.802+0000 setting random seed: 785056897439 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.802+0000 setting random seed: 896564675495 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.802+0000 setting random seed: 6386085529811 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.802+0000 setting random seed: 2481595892459 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.802+0000 setting random seed: 1449620332568 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.806+0000 m31200| 2015-07-19T23:39:52.803+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 406ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.806+0000 m31200| 2015-07-19T23:39:52.803+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 405ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.816+0000 m30998| 2015-07-19T23:39:52.816+0000 I SHARDING [conn70] ChunkManager: time to load chunks for db13.coll13: 0ms sequenceNumber: 16 version: 1|0||55ac3548d2c1f750d15483e5 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.872+0000 m31200| 2015-07-19T23:39:52.872+0000 I SHARDING [conn136] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.872+0000 m31200| 2015-07-19T23:39:52.872+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.873+0000 m31200| 2015-07-19T23:39:52.872+0000 I SHARDING [conn86] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.873+0000 m31200| 2015-07-19T23:39:52.872+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.873+0000 m31200| 2015-07-19T23:39:52.873+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.874+0000 m31200| 2015-07-19T23:39:52.873+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.874+0000 m31200| 2015-07-19T23:39:52.873+0000 I SHARDING [conn18] could not acquire lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.874+0000 m31200| 2015-07-19T23:39:52.873+0000 I SHARDING [conn18] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.874+0000 m31200| 2015-07-19T23:39:52.873+0000 W SHARDING [conn18] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.875+0000 m30999| 2015-07-19T23:39:52.873+0000 W SHARDING [conn72] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.875+0000 m31200| 2015-07-19T23:39:52.874+0000 I SHARDING [conn86] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3548d9a63f6196b1728c [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.875+0000 m31200| 2015-07-19T23:39:52.874+0000 I SHARDING [conn86] remotely refreshing metadata for db13.coll13 based on current shard version 1|0||55ac3548d2c1f750d15483e5, current metadata version is 1|0||55ac3548d2c1f750d15483e5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.875+0000 m31200| 2015-07-19T23:39:52.875+0000 I SHARDING [conn86] metadata of collection db13.coll13 already up to date (shard version : 1|0||55ac3548d2c1f750d15483e5, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.876+0000 m31200| 2015-07-19T23:39:52.875+0000 I SHARDING [conn86] splitChunk accepted at version 1|0||55ac3548d2c1f750d15483e5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.877+0000 m31200| 2015-07-19T23:39:52.875+0000 I SHARDING [conn86] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:52.875+0000-55ac3548d9a63f6196b1728e", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39656", time: new Date(1437349192875), what: "multi-split", ns: "db13.coll13", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 1, of: 3, chunk: { min: { j: MinKey }, max: { j: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('55ac3548d2c1f750d15483e5') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.881+0000 m31200| 2015-07-19T23:39:52.879+0000 I SHARDING [conn136] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.882+0000 m31200| 2015-07-19T23:39:52.879+0000 I SHARDING [conn88] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 14.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.882+0000 m31200| 2015-07-19T23:39:52.881+0000 W SHARDING [conn88] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.883+0000 m30998| 2015-07-19T23:39:52.881+0000 W SHARDING [conn69] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 14.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.884+0000 m31200| 2015-07-19T23:39:52.884+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.885+0000 m31200| 2015-07-19T23:39:52.884+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.886+0000 m31200| 2015-07-19T23:39:52.885+0000 W SHARDING [conn18] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.887+0000 m30999| 2015-07-19T23:39:52.885+0000 W SHARDING [conn74] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.888+0000 m29000| 2015-07-19T23:39:52.888+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55715 #41 (41 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.891+0000 m31200| 2015-07-19T23:39:52.891+0000 W SHARDING [conn98] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.893+0000 m30999| 2015-07-19T23:39:52.891+0000 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.896+0000 m31200| 2015-07-19T23:39:52.896+0000 I SHARDING [conn136] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.897+0000 m31200| 2015-07-19T23:39:52.896+0000 I SHARDING [conn88] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 16.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.900+0000 m31200| 2015-07-19T23:39:52.899+0000 I SHARDING [conn136] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.901+0000 m31200| 2015-07-19T23:39:52.900+0000 I SHARDING [conn89] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 12.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.901+0000 m31200| 2015-07-19T23:39:52.900+0000 W SHARDING [conn88] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.901+0000 m30998| 2015-07-19T23:39:52.900+0000 W SHARDING [conn69] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 16.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.902+0000 m31200| 2015-07-19T23:39:52.900+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.902+0000 m31200| 2015-07-19T23:39:52.901+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 12.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.902+0000 m31200| 2015-07-19T23:39:52.902+0000 W SHARDING [conn89] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.903+0000 m30998| 2015-07-19T23:39:52.902+0000 W SHARDING [conn70] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 12.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.904+0000 m31200| 2015-07-19T23:39:52.904+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.904+0000 m31200| 2015-07-19T23:39:52.904+0000 W SHARDING [conn98] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.905+0000 m30999| 2015-07-19T23:39:52.904+0000 W SHARDING [conn72] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 12.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.905+0000 m31200| 2015-07-19T23:39:52.904+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.907+0000 m31200| 2015-07-19T23:39:52.905+0000 W SHARDING [conn98] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.907+0000 m30999| 2015-07-19T23:39:52.905+0000 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.914+0000 m31200| 2015-07-19T23:39:52.914+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.916+0000 m31200| 2015-07-19T23:39:52.915+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 16.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.916+0000 m30998| 2015-07-19T23:39:52.915+0000 I SHARDING [conn69] ChunkManager: time to load chunks for db13.coll13: 0ms sequenceNumber: 17 version: 1|3||55ac3548d2c1f750d15483e5 based on: 1|0||55ac3548d2c1f750d15483e5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.918+0000 m31200| 2015-07-19T23:39:52.916+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.918+0000 m31200| 2015-07-19T23:39:52.917+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 16.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.919+0000 m30999| 2015-07-19T23:39:52.917+0000 W SHARDING [conn74] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 16.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.919+0000 m31200| 2015-07-19T23:39:52.917+0000 W SHARDING [conn98] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.920+0000 m31200| 2015-07-19T23:39:52.918+0000 W SHARDING [conn18] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.920+0000 m30999| 2015-07-19T23:39:52.919+0000 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 16.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.928+0000 m31200| 2015-07-19T23:39:52.926+0000 I SHARDING [conn86] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:52.926+0000-55ac3548d9a63f6196b1728f", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39656", time: new Date(1437349192926), what: "multi-split", ns: "db13.coll13", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 2, of: 3, chunk: { min: { j: 0.0 }, max: { j: 4.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('55ac3548d2c1f750d15483e5') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.932+0000 m31200| 2015-07-19T23:39:52.932+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.933+0000 m31200| 2015-07-19T23:39:52.932+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 22.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.938+0000 m31200| 2015-07-19T23:39:52.934+0000 W SHARDING [conn18] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.941+0000 m30999| 2015-07-19T23:39:52.934+0000 W SHARDING [conn74] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 22.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.950+0000 m31200| 2015-07-19T23:39:52.949+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.951+0000 m31200| 2015-07-19T23:39:52.949+0000 I SHARDING [conn14] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.952+0000 m31200| 2015-07-19T23:39:52.950+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 16.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.952+0000 m31200| 2015-07-19T23:39:52.950+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 16.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.952+0000 m31200| 2015-07-19T23:39:52.952+0000 W SHARDING [conn18] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.953+0000 m31200| 2015-07-19T23:39:52.952+0000 W SHARDING [conn98] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.955+0000 m30999| 2015-07-19T23:39:52.952+0000 W SHARDING [conn70] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 16.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.957+0000 m30999| 2015-07-19T23:39:52.952+0000 W SHARDING [conn71] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 16.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.962+0000 m31200| 2015-07-19T23:39:52.962+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.963+0000 m31200| 2015-07-19T23:39:52.963+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 22.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.964+0000 m31200| 2015-07-19T23:39:52.964+0000 W SHARDING [conn98] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.964+0000 m30999| 2015-07-19T23:39:52.964+0000 W SHARDING [conn71] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 22.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.970+0000 m31200| 2015-07-19T23:39:52.969+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.970+0000 m31200| 2015-07-19T23:39:52.970+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.971+0000 m31200| 2015-07-19T23:39:52.971+0000 W SHARDING [conn98] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.972+0000 m30999| 2015-07-19T23:39:52.971+0000 W SHARDING [conn72] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.977+0000 m31200| 2015-07-19T23:39:52.977+0000 I SHARDING [conn86] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:52.977+0000-55ac3548d9a63f6196b17290", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39656", time: new Date(1437349192977), what: "multi-split", ns: "db13.coll13", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 3, of: 3, chunk: { min: { j: 4.0 }, max: { j: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('55ac3548d2c1f750d15483e5') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.982+0000 m31200| 2015-07-19T23:39:52.982+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.983+0000 m31200| 2015-07-19T23:39:52.983+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 22.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.985+0000 m31200| 2015-07-19T23:39:52.984+0000 W SHARDING [conn98] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:52.985+0000 m30999| 2015-07-19T23:39:52.985+0000 W SHARDING [conn72] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 22.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.000+0000 m31200| 2015-07-19T23:39:53.000+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.001+0000 m31200| 2015-07-19T23:39:53.000+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 26.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.002+0000 m31200| 2015-07-19T23:39:53.002+0000 W SHARDING [conn98] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.002+0000 m30999| 2015-07-19T23:39:53.002+0000 W SHARDING [conn71] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 26.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.011+0000 m31200| 2015-07-19T23:39:53.010+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.012+0000 m31200| 2015-07-19T23:39:53.011+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 24.0 }, { j: 32.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.013+0000 m31200| 2015-07-19T23:39:53.013+0000 W SHARDING [conn98] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.013+0000 m30999| 2015-07-19T23:39:53.013+0000 W SHARDING [conn71] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 24.0 }, { j: 32.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.020+0000 m31200| 2015-07-19T23:39:53.019+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.020+0000 m31200| 2015-07-19T23:39:53.020+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 38.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.021+0000 m31200| 2015-07-19T23:39:53.021+0000 W SHARDING [conn98] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.022+0000 m30999| 2015-07-19T23:39:53.021+0000 W SHARDING [conn74] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 38.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.028+0000 m31200| 2015-07-19T23:39:53.027+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.028+0000 m31200| 2015-07-19T23:39:53.028+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 32.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.029+0000 m31200| 2015-07-19T23:39:53.028+0000 I SHARDING [conn86] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.030+0000 m31200| 2015-07-19T23:39:53.028+0000 I COMMAND [conn86] command db13.coll13 command: splitChunk { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 157 } } } protocol:op_command 155ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.030+0000 m30998| 2015-07-19T23:39:53.029+0000 I SHARDING [conn72] autosplitted db13.coll13 shard: ns: db13.coll13, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { j: MinKey }, max: { j: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.030+0000 m31200| 2015-07-19T23:39:53.029+0000 W SHARDING [conn98] could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db13.coll13 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.031+0000 m30999| 2015-07-19T23:39:53.029+0000 W SHARDING [conn71] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 32.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db13.coll13 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.040+0000 m31200| 2015-07-19T23:39:53.039+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.042+0000 m31200| 2015-07-19T23:39:53.040+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 34.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.042+0000 m31200| 2015-07-19T23:39:53.042+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3549d9a63f6196b17291 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.043+0000 m31200| 2015-07-19T23:39:53.042+0000 I SHARDING [conn98] remotely refreshing metadata for db13.coll13 based on current shard version 1|3||55ac3548d2c1f750d15483e5, current metadata version is 1|3||55ac3548d2c1f750d15483e5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.043+0000 m31200| 2015-07-19T23:39:53.042+0000 I SHARDING [conn98] metadata of collection db13.coll13 already up to date (shard version : 1|3||55ac3548d2c1f750d15483e5, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.043+0000 m31200| 2015-07-19T23:39:53.042+0000 W SHARDING [conn98] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.043+0000 m31200| 2015-07-19T23:39:53.043+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.044+0000 m30999| 2015-07-19T23:39:53.043+0000 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 34.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.049+0000 m31200| 2015-07-19T23:39:53.049+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.050+0000 m31200| 2015-07-19T23:39:53.049+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 32.0 }, { j: 44.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.052+0000 m31200| 2015-07-19T23:39:53.051+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3549d9a63f6196b17292 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.052+0000 m31200| 2015-07-19T23:39:53.051+0000 I SHARDING [conn98] remotely refreshing metadata for db13.coll13 based on current shard version 1|3||55ac3548d2c1f750d15483e5, current metadata version is 1|3||55ac3548d2c1f750d15483e5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.052+0000 m31200| 2015-07-19T23:39:53.052+0000 I SHARDING [conn98] metadata of collection db13.coll13 already up to date (shard version : 1|3||55ac3548d2c1f750d15483e5, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.052+0000 m31200| 2015-07-19T23:39:53.052+0000 W SHARDING [conn98] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.053+0000 m30999| 2015-07-19T23:39:53.053+0000 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 32.0 }, { j: 44.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.053+0000 m31200| 2015-07-19T23:39:53.052+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.067+0000 m31200| 2015-07-19T23:39:53.066+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.067+0000 m31200| 2015-07-19T23:39:53.066+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 36.0 }, { j: 46.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.068+0000 m31200| 2015-07-19T23:39:53.068+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3549d9a63f6196b17293 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.068+0000 m31200| 2015-07-19T23:39:53.068+0000 I SHARDING [conn98] remotely refreshing metadata for db13.coll13 based on current shard version 1|3||55ac3548d2c1f750d15483e5, current metadata version is 1|3||55ac3548d2c1f750d15483e5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.069+0000 m31200| 2015-07-19T23:39:53.068+0000 I SHARDING [conn98] metadata of collection db13.coll13 already up to date (shard version : 1|3||55ac3548d2c1f750d15483e5, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.069+0000 m31200| 2015-07-19T23:39:53.068+0000 W SHARDING [conn98] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.069+0000 m31200| 2015-07-19T23:39:53.069+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.070+0000 m30999| 2015-07-19T23:39:53.069+0000 W SHARDING [conn72] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 36.0 }, { j: 46.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.070+0000 m31200| 2015-07-19T23:39:53.069+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.070+0000 m31200| 2015-07-19T23:39:53.069+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 36.0 }, { j: 44.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.071+0000 m31200| 2015-07-19T23:39:53.071+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3549d9a63f6196b17294 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.071+0000 m31200| 2015-07-19T23:39:53.071+0000 I SHARDING [conn98] remotely refreshing metadata for db13.coll13 based on current shard version 1|3||55ac3548d2c1f750d15483e5, current metadata version is 1|3||55ac3548d2c1f750d15483e5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.071+0000 m31200| 2015-07-19T23:39:53.071+0000 I SHARDING [conn98] metadata of collection db13.coll13 already up to date (shard version : 1|3||55ac3548d2c1f750d15483e5, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.072+0000 m31200| 2015-07-19T23:39:53.071+0000 W SHARDING [conn98] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.072+0000 m31200| 2015-07-19T23:39:53.072+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.072+0000 m30999| 2015-07-19T23:39:53.072+0000 W SHARDING [conn71] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 36.0 }, { j: 44.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.088+0000 m31200| 2015-07-19T23:39:53.087+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.093+0000 m31200| 2015-07-19T23:39:53.088+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 34.0 }, { j: 40.0 }, { j: 48.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.094+0000 m31200| 2015-07-19T23:39:53.089+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3549d9a63f6196b17295 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.094+0000 m31200| 2015-07-19T23:39:53.089+0000 I SHARDING [conn98] remotely refreshing metadata for db13.coll13 based on current shard version 1|3||55ac3548d2c1f750d15483e5, current metadata version is 1|3||55ac3548d2c1f750d15483e5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.094+0000 m31200| 2015-07-19T23:39:53.090+0000 I SHARDING [conn98] metadata of collection db13.coll13 already up to date (shard version : 1|3||55ac3548d2c1f750d15483e5, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.094+0000 m31200| 2015-07-19T23:39:53.090+0000 W SHARDING [conn98] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.095+0000 m31200| 2015-07-19T23:39:53.090+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.095+0000 m30999| 2015-07-19T23:39:53.090+0000 W SHARDING [conn72] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 34.0 }, { j: 40.0 }, { j: 48.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.095+0000 m31200| 2015-07-19T23:39:53.090+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.096+0000 m31200| 2015-07-19T23:39:53.091+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 34.0 }, { j: 38.0 }, { j: 46.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.096+0000 m31200| 2015-07-19T23:39:53.092+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3549d9a63f6196b17296 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.099+0000 m31200| 2015-07-19T23:39:53.092+0000 I SHARDING [conn98] remotely refreshing metadata for db13.coll13 based on current shard version 1|3||55ac3548d2c1f750d15483e5, current metadata version is 1|3||55ac3548d2c1f750d15483e5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.100+0000 m31200| 2015-07-19T23:39:53.093+0000 I SHARDING [conn98] metadata of collection db13.coll13 already up to date (shard version : 1|3||55ac3548d2c1f750d15483e5, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.100+0000 m31200| 2015-07-19T23:39:53.093+0000 W SHARDING [conn98] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.100+0000 m31200| 2015-07-19T23:39:53.093+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.103+0000 m30999| 2015-07-19T23:39:53.093+0000 W SHARDING [conn74] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 34.0 }, { j: 38.0 }, { j: 46.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.114+0000 m31200| 2015-07-19T23:39:53.113+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.114+0000 m31200| 2015-07-19T23:39:53.114+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 46.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.115+0000 m31200| 2015-07-19T23:39:53.115+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3549d9a63f6196b17297 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.116+0000 m31200| 2015-07-19T23:39:53.115+0000 I SHARDING [conn98] remotely refreshing metadata for db13.coll13 based on current shard version 1|3||55ac3548d2c1f750d15483e5, current metadata version is 1|3||55ac3548d2c1f750d15483e5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.116+0000 m31200| 2015-07-19T23:39:53.116+0000 I SHARDING [conn98] metadata of collection db13.coll13 already up to date (shard version : 1|3||55ac3548d2c1f750d15483e5, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.116+0000 m31200| 2015-07-19T23:39:53.116+0000 W SHARDING [conn98] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.116+0000 m31200| 2015-07-19T23:39:53.116+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.117+0000 m30999| 2015-07-19T23:39:53.116+0000 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 46.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.135+0000 m31200| 2015-07-19T23:39:53.134+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.135+0000 m31200| 2015-07-19T23:39:53.135+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 46.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.136+0000 m31200| 2015-07-19T23:39:53.136+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3549d9a63f6196b17298 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.137+0000 m31200| 2015-07-19T23:39:53.136+0000 I SHARDING [conn98] remotely refreshing metadata for db13.coll13 based on current shard version 1|3||55ac3548d2c1f750d15483e5, current metadata version is 1|3||55ac3548d2c1f750d15483e5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.137+0000 m31200| 2015-07-19T23:39:53.137+0000 I SHARDING [conn98] metadata of collection db13.coll13 already up to date (shard version : 1|3||55ac3548d2c1f750d15483e5, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.138+0000 m31200| 2015-07-19T23:39:53.137+0000 W SHARDING [conn98] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.138+0000 m31200| 2015-07-19T23:39:53.137+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.138+0000 m30999| 2015-07-19T23:39:53.137+0000 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 46.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.206+0000 m31200| 2015-07-19T23:39:53.205+0000 I SHARDING [conn142] request split points lookup for chunk db13.coll13 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.207+0000 m31200| 2015-07-19T23:39:53.206+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 42.0 }, { j: 48.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.208+0000 m31200| 2015-07-19T23:39:53.208+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac3549d9a63f6196b17299 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.208+0000 m31200| 2015-07-19T23:39:53.208+0000 I SHARDING [conn98] remotely refreshing metadata for db13.coll13 based on current shard version 1|3||55ac3548d2c1f750d15483e5, current metadata version is 1|3||55ac3548d2c1f750d15483e5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.209+0000 m31200| 2015-07-19T23:39:53.208+0000 I SHARDING [conn98] metadata of collection db13.coll13 already up to date (shard version : 1|3||55ac3548d2c1f750d15483e5, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.209+0000 m31200| 2015-07-19T23:39:53.208+0000 W SHARDING [conn98] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.209+0000 m31200| 2015-07-19T23:39:53.209+0000 I SHARDING [conn98] distributed lock 'db13.coll13/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.210+0000 m30999| 2015-07-19T23:39:53.209+0000 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db13.coll13", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 42.0 }, { j: 48.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3548d2c1f750d15483e5') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.226+0000 m30999| 2015-07-19T23:39:53.226+0000 I NETWORK [conn71] end connection 10.139.123.131:57408 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.233+0000 m30998| 2015-07-19T23:39:53.232+0000 I NETWORK [conn69] end connection 10.139.123.131:36045 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.242+0000 m30999| 2015-07-19T23:39:53.242+0000 I NETWORK [conn70] end connection 10.139.123.131:57406 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.248+0000 m30998| 2015-07-19T23:39:53.248+0000 I NETWORK [conn71] end connection 10.139.123.131:36048 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.250+0000 m30999| 2015-07-19T23:39:53.250+0000 I NETWORK [conn72] end connection 10.139.123.131:57410 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.250+0000 m30998| 2015-07-19T23:39:53.250+0000 I NETWORK [conn70] end connection 10.139.123.131:36046 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.263+0000 m30999| 2015-07-19T23:39:53.263+0000 I NETWORK [conn74] end connection 10.139.123.131:57412 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.266+0000 m30999| 2015-07-19T23:39:53.265+0000 I NETWORK [conn73] end connection 10.139.123.131:57413 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.271+0000 m30998| 2015-07-19T23:39:53.270+0000 I NETWORK [conn72] end connection 10.139.123.131:36050 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.272+0000 m30998| 2015-07-19T23:39:53.272+0000 I NETWORK [conn73] end connection 10.139.123.131:36052 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.272+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.272+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.272+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.272+0000 jstests/concurrency/fsm_workloads/explain_update.js: Workload completed in 670 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.273+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.273+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.273+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.273+0000 m30999| 2015-07-19T23:39:53.272+0000 I COMMAND [conn1] DROP: db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.273+0000 m30999| 2015-07-19T23:39:53.272+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:53.272+0000-55ac3549d2c1f750d15483e7", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349193272), what: "dropCollection.start", ns: "db13.coll13", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.322+0000 m31100| 2015-07-19T23:39:53.322+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.322+0000 m31100| 2015-07-19T23:39:53.322+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.325+0000 m30999| 2015-07-19T23:39:53.324+0000 I SHARDING [conn1] distributed lock 'db13.coll13/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3549d2c1f750d15483e8 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.325+0000 m31100| 2015-07-19T23:39:53.325+0000 I COMMAND [conn127] CMD: drop db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.325+0000 m31200| 2015-07-19T23:39:53.325+0000 I COMMAND [conn142] CMD: drop db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.325+0000 m31200| 2015-07-19T23:39:53.325+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 115ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.326+0000 m31200| 2015-07-19T23:39:53.325+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 114ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.328+0000 m31201| 2015-07-19T23:39:53.328+0000 I COMMAND [repl writer worker 8] CMD: drop db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.328+0000 m31202| 2015-07-19T23:39:53.328+0000 I COMMAND [repl writer worker 11] CMD: drop db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.378+0000 m31200| 2015-07-19T23:39:53.378+0000 I SHARDING [conn142] remotely refreshing metadata for db13.coll13 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||55ac3548d2c1f750d15483e5, current metadata version is 1|3||55ac3548d2c1f750d15483e5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.379+0000 m31200| 2015-07-19T23:39:53.378+0000 W SHARDING [conn142] no chunks found when reloading db13.coll13, previous version was 0|0||55ac3548d2c1f750d15483e5, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.379+0000 m31200| 2015-07-19T23:39:53.379+0000 I SHARDING [conn142] dropping metadata for db13.coll13 at shard version 1|3||55ac3548d2c1f750d15483e5, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.379+0000 m30999| 2015-07-19T23:39:53.379+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:53.379+0000-55ac3549d2c1f750d15483e9", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349193379), what: "dropCollection", ns: "db13.coll13", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.430+0000 m30999| 2015-07-19T23:39:53.430+0000 I SHARDING [conn1] distributed lock 'db13.coll13/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.482+0000 m30999| 2015-07-19T23:39:53.482+0000 I COMMAND [conn1] DROP DATABASE: db13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.482+0000 m30999| 2015-07-19T23:39:53.482+0000 I SHARDING [conn1] DBConfig::dropDatabase: db13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.482+0000 m30999| 2015-07-19T23:39:53.482+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:53.482+0000-55ac3549d2c1f750d15483ea", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349193482), what: "dropDatabase.start", ns: "db13", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.584+0000 m30999| 2015-07-19T23:39:53.584+0000 I SHARDING [conn1] DBConfig::dropDatabase: db13 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.584+0000 m31200| 2015-07-19T23:39:53.584+0000 I COMMAND [conn111] dropDatabase db13 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.585+0000 m31200| 2015-07-19T23:39:53.584+0000 I COMMAND [conn111] dropDatabase db13 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.585+0000 m31200| 2015-07-19T23:39:53.584+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:109 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 31 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 254ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.585+0000 m31200| 2015-07-19T23:39:53.584+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:109 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 31 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 254ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.586+0000 m30999| 2015-07-19T23:39:53.584+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:53.584+0000-55ac3549d2c1f750d15483eb", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349193584), what: "dropDatabase", ns: "db13", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.586+0000 m31201| 2015-07-19T23:39:53.585+0000 I COMMAND [repl writer worker 6] dropDatabase db13 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.586+0000 m31201| 2015-07-19T23:39:53.585+0000 I COMMAND [repl writer worker 6] dropDatabase db13 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.586+0000 m31202| 2015-07-19T23:39:53.584+0000 I COMMAND [repl writer worker 0] dropDatabase db13 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.586+0000 m31202| 2015-07-19T23:39:53.585+0000 I COMMAND [repl writer worker 0] dropDatabase db13 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.640+0000 m31100| 2015-07-19T23:39:53.639+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 315ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.640+0000 m31100| 2015-07-19T23:39:53.639+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 315ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.648+0000 m31100| 2015-07-19T23:39:53.648+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.651+0000 m31101| 2015-07-19T23:39:53.650+0000 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.651+0000 m31102| 2015-07-19T23:39:53.650+0000 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.658+0000 m31200| 2015-07-19T23:39:53.658+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.659+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.659+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.660+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.660+0000 jstests/concurrency/fsm_workloads/indexed_insert_base.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.660+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.660+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.660+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.661+0000 m31202| 2015-07-19T23:39:53.661+0000 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.662+0000 m31201| 2015-07-19T23:39:53.661+0000 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.663+0000 m30999| 2015-07-19T23:39:53.662+0000 I SHARDING [conn1] distributed lock 'db14/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3549d2c1f750d15483ec [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.664+0000 m30999| 2015-07-19T23:39:53.664+0000 I SHARDING [conn1] Placing [db14] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.664+0000 m30999| 2015-07-19T23:39:53.664+0000 I SHARDING [conn1] Enabling sharding for database [db14] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.716+0000 m30999| 2015-07-19T23:39:53.716+0000 I SHARDING [conn1] distributed lock 'db14/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.722+0000 m31200| 2015-07-19T23:39:53.722+0000 I INDEX [conn113] build index on: db14.coll14 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.722+0000 m31200| 2015-07-19T23:39:53.722+0000 I INDEX [conn113] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.723+0000 m31200| 2015-07-19T23:39:53.723+0000 I INDEX [conn113] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.724+0000 m30999| 2015-07-19T23:39:53.723+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db14.coll14", key: { x: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.725+0000 m30999| 2015-07-19T23:39:53.725+0000 I SHARDING [conn1] distributed lock 'db14.coll14/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3549d2c1f750d15483ed [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.725+0000 m30999| 2015-07-19T23:39:53.725+0000 I SHARDING [conn1] enable sharding on: db14.coll14 with shard key: { x: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.726+0000 m30999| 2015-07-19T23:39:53.725+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:53.725+0000-55ac3549d2c1f750d15483ee", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349193725), what: "shardCollection.start", ns: "db14.coll14", details: { shardKey: { x: 1.0 }, collection: "db14.coll14", primary: "test-rs1:test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.727+0000 m31202| 2015-07-19T23:39:53.727+0000 I INDEX [repl writer worker 2] build index on: db14.coll14 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.727+0000 m31202| 2015-07-19T23:39:53.727+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.728+0000 m31201| 2015-07-19T23:39:53.728+0000 I INDEX [repl writer worker 2] build index on: db14.coll14 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.728+0000 m31201| 2015-07-19T23:39:53.728+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.730+0000 m31201| 2015-07-19T23:39:53.730+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.730+0000 m31202| 2015-07-19T23:39:53.730+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.776+0000 m30999| 2015-07-19T23:39:53.776+0000 I SHARDING [conn1] going to create 1 chunk(s) for: db14.coll14 using new epoch 55ac3549d2c1f750d15483ef [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.828+0000 m30999| 2015-07-19T23:39:53.827+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db14.coll14: 0ms sequenceNumber: 65 version: 1|0||55ac3549d2c1f750d15483ef based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.879+0000 m30999| 2015-07-19T23:39:53.879+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db14.coll14: 0ms sequenceNumber: 66 version: 1|0||55ac3549d2c1f750d15483ef based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.880+0000 m31200| 2015-07-19T23:39:53.879+0000 I SHARDING [conn94] remotely refreshing metadata for db14.coll14 with requested shard version 1|0||55ac3549d2c1f750d15483ef, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.880+0000 m31200| 2015-07-19T23:39:53.880+0000 I SHARDING [conn94] collection db14.coll14 was previously unsharded, new metadata loaded with shard version 1|0||55ac3549d2c1f750d15483ef [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.880+0000 m31200| 2015-07-19T23:39:53.880+0000 I SHARDING [conn94] collection version was loaded at version 1|0||55ac3549d2c1f750d15483ef, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.880+0000 m30999| 2015-07-19T23:39:53.880+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:53.880+0000-55ac3549d2c1f750d15483f0", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349193880), what: "shardCollection", ns: "db14.coll14", details: { version: "1|0||55ac3549d2c1f750d15483ef" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.932+0000 m30999| 2015-07-19T23:39:53.931+0000 I SHARDING [conn1] distributed lock 'db14.coll14/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.935+0000 m31100| 2015-07-19T23:39:53.935+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:110 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 282ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.935+0000 m31100| 2015-07-19T23:39:53.935+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:110 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 282ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.937+0000 m31100| 2015-07-19T23:39:53.937+0000 I INDEX [conn118] build index on: db14.coll14 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.937+0000 m31100| 2015-07-19T23:39:53.937+0000 I INDEX [conn118] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.938+0000 m31100| 2015-07-19T23:39:53.938+0000 I INDEX [conn118] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.940+0000 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.941+0000 m31102| 2015-07-19T23:39:53.941+0000 I INDEX [repl writer worker 4] build index on: db14.coll14 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.941+0000 m31102| 2015-07-19T23:39:53.941+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.941+0000 m31101| 2015-07-19T23:39:53.941+0000 I INDEX [repl writer worker 7] build index on: db14.coll14 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.941+0000 m31101| 2015-07-19T23:39:53.941+0000 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.942+0000 m31102| 2015-07-19T23:39:53.942+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:53.943+0000 m31101| 2015-07-19T23:39:53.942+0000 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.194+0000 m30999| 2015-07-19T23:39:54.194+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57416 #75 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.216+0000 m30998| 2015-07-19T23:39:54.216+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36058 #74 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.225+0000 m30998| 2015-07-19T23:39:54.225+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36059 #75 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.230+0000 m30998| 2015-07-19T23:39:54.230+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36060 #76 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.234+0000 m30998| 2015-07-19T23:39:54.234+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36061 #77 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.242+0000 m30998| 2015-07-19T23:39:54.242+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36062 #78 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.250+0000 m30998| 2015-07-19T23:39:54.250+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36063 #79 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.283+0000 m30998| 2015-07-19T23:39:54.283+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36064 #80 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.292+0000 m30999| 2015-07-19T23:39:54.291+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57424 #76 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.294+0000 m30999| 2015-07-19T23:39:54.294+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57425 #77 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.300+0000 m30999| 2015-07-19T23:39:54.300+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57426 #78 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.305+0000 m30999| 2015-07-19T23:39:54.305+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57427 #79 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.307+0000 m30998| 2015-07-19T23:39:54.306+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36069 #81 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.312+0000 m30999| 2015-07-19T23:39:54.312+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57429 #80 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.315+0000 m30999| 2015-07-19T23:39:54.315+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57430 #81 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.315+0000 m30999| 2015-07-19T23:39:54.315+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57431 #82 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.316+0000 m30999| 2015-07-19T23:39:54.316+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57432 #83 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.319+0000 m30998| 2015-07-19T23:39:54.316+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36074 #82 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.321+0000 m30999| 2015-07-19T23:39:54.321+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57434 #84 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.324+0000 m30998| 2015-07-19T23:39:54.324+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36076 #83 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.328+0000 setting random seed: 2950844019651 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.328+0000 setting random seed: 9798898599110 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.328+0000 setting random seed: 4730237741023 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.330+0000 setting random seed: 9138667392544 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.331+0000 setting random seed: 795328565873 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.343+0000 setting random seed: 9442213200964 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.343+0000 setting random seed: 3490293263457 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.344+0000 setting random seed: 9974332139827 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.344+0000 setting random seed: 2213276918046 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.344+0000 setting random seed: 384560520760 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.344+0000 setting random seed: 2660604426637 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.344+0000 setting random seed: 8342043943703 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.344+0000 setting random seed: 9470855379477 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.344+0000 setting random seed: 4956116680987 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.345+0000 setting random seed: 2895990223623 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.362+0000 setting random seed: 1352286967448 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.363+0000 setting random seed: 8586067669093 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.363+0000 setting random seed: 1682403725571 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.364+0000 m31200| 2015-07-19T23:39:54.350+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 624ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.364+0000 m31200| 2015-07-19T23:39:54.350+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 624ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.364+0000 m30998| 2015-07-19T23:39:54.357+0000 I SHARDING [conn74] ChunkManager: time to load chunks for db14.coll14: 0ms sequenceNumber: 18 version: 1|0||55ac3549d2c1f750d15483ef based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.378+0000 setting random seed: 5985181434080 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.380+0000 setting random seed: 4712683665566 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.404+0000 m31200| 2015-07-19T23:39:54.403+0000 I SHARDING [conn14] request split points lookup for chunk db14.coll14 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.404+0000 m31200| 2015-07-19T23:39:54.404+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 15.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.405+0000 m31200| 2015-07-19T23:39:54.404+0000 I SHARDING [conn14] request split points lookup for chunk db14.coll14 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.406+0000 m31200| 2015-07-19T23:39:54.405+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 15.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.406+0000 m31200| 2015-07-19T23:39:54.405+0000 I SHARDING [conn98] distributed lock 'db14.coll14/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac354ad9a63f6196b1729b [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.406+0000 m31200| 2015-07-19T23:39:54.405+0000 I SHARDING [conn98] remotely refreshing metadata for db14.coll14 based on current shard version 1|0||55ac3549d2c1f750d15483ef, current metadata version is 1|0||55ac3549d2c1f750d15483ef [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.407+0000 m31200| 2015-07-19T23:39:54.406+0000 I SHARDING [conn98] metadata of collection db14.coll14 already up to date (shard version : 1|0||55ac3549d2c1f750d15483ef, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.407+0000 m31200| 2015-07-19T23:39:54.406+0000 I SHARDING [conn98] splitChunk accepted at version 1|0||55ac3549d2c1f750d15483ef [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.408+0000 m30999| 2015-07-19T23:39:54.406+0000 W SHARDING [conn76] splitChunk failed - cmd: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 15.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.408+0000 m31200| 2015-07-19T23:39:54.406+0000 W SHARDING [conn18] could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db14.coll14 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.409+0000 m31200| 2015-07-19T23:39:54.407+0000 I SHARDING [conn98] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:54.407+0000-55ac354ad9a63f6196b1729c", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39669", time: new Date(1437349194407), what: "multi-split", ns: "db14.coll14", details: { before: { min: { x: MinKey }, max: { x: MaxKey } }, number: 1, of: 3, chunk: { min: { x: MinKey }, max: { x: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('55ac3549d2c1f750d15483ef') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.409+0000 m31200| 2015-07-19T23:39:54.407+0000 I SHARDING [conn135] request split points lookup for chunk db14.coll14 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.409+0000 m31200| 2015-07-19T23:39:54.407+0000 I SHARDING [conn86] received splitChunk request: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 14.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.410+0000 m31200| 2015-07-19T23:39:54.407+0000 I SHARDING [conn135] request split points lookup for chunk db14.coll14 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.410+0000 m31200| 2015-07-19T23:39:54.408+0000 I SHARDING [conn89] received splitChunk request: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 14.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.411+0000 m30998| 2015-07-19T23:39:54.409+0000 W SHARDING [conn81] splitChunk failed - cmd: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 14.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.411+0000 m31200| 2015-07-19T23:39:54.408+0000 W SHARDING [conn86] could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db14.coll14 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.411+0000 m31200| 2015-07-19T23:39:54.409+0000 W SHARDING [conn89] could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db14.coll14 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.411+0000 m30998| 2015-07-19T23:39:54.409+0000 W SHARDING [conn80] splitChunk failed - cmd: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 14.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.419+0000 m31200| 2015-07-19T23:39:54.418+0000 I SHARDING [conn14] request split points lookup for chunk db14.coll14 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.419+0000 m31200| 2015-07-19T23:39:54.419+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 11.0 }, { x: 19.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.421+0000 m31200| 2015-07-19T23:39:54.420+0000 W SHARDING [conn18] could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db14.coll14 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.422+0000 m30999| 2015-07-19T23:39:54.420+0000 W SHARDING [conn79] splitChunk failed - cmd: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 11.0 }, { x: 19.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.424+0000 m31200| 2015-07-19T23:39:54.424+0000 I SHARDING [conn135] request split points lookup for chunk db14.coll14 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.424+0000 m31200| 2015-07-19T23:39:54.424+0000 I SHARDING [conn89] received splitChunk request: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 10.0 }, { x: 16.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.432+0000 m31200| 2015-07-19T23:39:54.426+0000 W SHARDING [conn89] could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db14.coll14 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.439+0000 m30998| 2015-07-19T23:39:54.426+0000 W SHARDING [conn78] splitChunk failed - cmd: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 10.0 }, { x: 16.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.445+0000 m30999| 2015-07-19T23:39:54.428+0000 I SHARDING [conn77] ChunkManager: time to load chunks for db14.coll14: 0ms sequenceNumber: 67 version: 1|3||55ac3549d2c1f750d15483ef based on: 1|0||55ac3549d2c1f750d15483ef [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.449+0000 m31200| 2015-07-19T23:39:54.434+0000 I SHARDING [conn135] request split points lookup for chunk db14.coll14 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.449+0000 m31200| 2015-07-19T23:39:54.435+0000 I SHARDING [conn135] request split points lookup for chunk db14.coll14 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.449+0000 m31200| 2015-07-19T23:39:54.435+0000 I SHARDING [conn89] received splitChunk request: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 8.0 }, { x: 12.0 }, { x: 18.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.454+0000 m31200| 2015-07-19T23:39:54.435+0000 I SHARDING [conn86] received splitChunk request: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 7.0 }, { x: 12.0 }, { x: 17.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.459+0000 m31200| 2015-07-19T23:39:54.436+0000 W SHARDING [conn89] could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db14.coll14 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.459+0000 m30998| 2015-07-19T23:39:54.436+0000 W SHARDING [conn75] splitChunk failed - cmd: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 8.0 }, { x: 12.0 }, { x: 18.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.459+0000 m31200| 2015-07-19T23:39:54.436+0000 I SHARDING [conn135] request split points lookup for chunk db14.coll14 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.460+0000 m31200| 2015-07-19T23:39:54.437+0000 W SHARDING [conn86] could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db14.coll14 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.462+0000 m30998| 2015-07-19T23:39:54.437+0000 W SHARDING [conn74] splitChunk failed - cmd: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 7.0 }, { x: 12.0 }, { x: 17.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.462+0000 m31200| 2015-07-19T23:39:54.437+0000 I SHARDING [conn89] received splitChunk request: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 7.0 }, { x: 12.0 }, { x: 17.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.462+0000 m31200| 2015-07-19T23:39:54.438+0000 W SHARDING [conn89] could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db14.coll14 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.463+0000 m30998| 2015-07-19T23:39:54.438+0000 W SHARDING [conn79] splitChunk failed - cmd: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 7.0 }, { x: 12.0 }, { x: 17.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.464+0000 m31200| 2015-07-19T23:39:54.448+0000 I SHARDING [conn135] request split points lookup for chunk db14.coll14 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.464+0000 m31200| 2015-07-19T23:39:54.449+0000 I SHARDING [conn89] received splitChunk request: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 7.0 }, { x: 10.0 }, { x: 14.0 }, { x: 19.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.464+0000 m31200| 2015-07-19T23:39:54.450+0000 W SHARDING [conn89] could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db14.coll14 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.465+0000 m30998| 2015-07-19T23:39:54.451+0000 W SHARDING [conn79] splitChunk failed - cmd: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 7.0 }, { x: 10.0 }, { x: 14.0 }, { x: 19.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.466+0000 m31200| 2015-07-19T23:39:54.457+0000 I SHARDING [conn98] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:54.457+0000-55ac354ad9a63f6196b1729d", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39669", time: new Date(1437349194457), what: "multi-split", ns: "db14.coll14", details: { before: { min: { x: MinKey }, max: { x: MaxKey } }, number: 2, of: 3, chunk: { min: { x: 0.0 }, max: { x: 15.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('55ac3549d2c1f750d15483ef') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.466+0000 m31200| 2015-07-19T23:39:54.460+0000 I SHARDING [conn135] request split points lookup for chunk db14.coll14 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.466+0000 m31200| 2015-07-19T23:39:54.461+0000 I SHARDING [conn89] received splitChunk request: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 6.0 }, { x: 9.0 }, { x: 12.0 }, { x: 16.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.468+0000 m31200| 2015-07-19T23:39:54.462+0000 W SHARDING [conn89] could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db14.coll14 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.468+0000 m30998| 2015-07-19T23:39:54.462+0000 W SHARDING [conn83] splitChunk failed - cmd: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 6.0 }, { x: 9.0 }, { x: 12.0 }, { x: 16.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.482+0000 m31200| 2015-07-19T23:39:54.476+0000 I SHARDING [conn135] request split points lookup for chunk db14.coll14 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.483+0000 m31200| 2015-07-19T23:39:54.477+0000 I SHARDING [conn89] received splitChunk request: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 5.0 }, { x: 8.0 }, { x: 11.0 }, { x: 14.0 }, { x: 17.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.483+0000 m31200| 2015-07-19T23:39:54.479+0000 W SHARDING [conn89] could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db14.coll14 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.483+0000 m30998| 2015-07-19T23:39:54.479+0000 W SHARDING [conn83] splitChunk failed - cmd: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 5.0 }, { x: 8.0 }, { x: 11.0 }, { x: 14.0 }, { x: 17.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.488+0000 m31200| 2015-07-19T23:39:54.488+0000 I SHARDING [conn135] request split points lookup for chunk db14.coll14 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.489+0000 m31200| 2015-07-19T23:39:54.488+0000 I SHARDING [conn89] received splitChunk request: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 5.0 }, { x: 7.0 }, { x: 10.0 }, { x: 12.0 }, { x: 15.0 }, { x: 18.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.499+0000 m31200| 2015-07-19T23:39:54.489+0000 W SHARDING [conn89] could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db14.coll14 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.500+0000 m30998| 2015-07-19T23:39:54.490+0000 W SHARDING [conn82] splitChunk failed - cmd: { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 5.0 }, { x: 7.0 }, { x: 10.0 }, { x: 12.0 }, { x: 15.0 }, { x: 18.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db14.coll14 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.502+0000 m30998| 2015-07-19T23:39:54.493+0000 I SHARDING [conn79] ChunkManager: time to load chunks for db14.coll14: 0ms sequenceNumber: 19 version: 1|3||55ac3549d2c1f750d15483ef based on: 1|0||55ac3549d2c1f750d15483ef [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.520+0000 m31200| 2015-07-19T23:39:54.508+0000 I SHARDING [conn98] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:54.508+0000-55ac354ad9a63f6196b1729e", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39669", time: new Date(1437349194508), what: "multi-split", ns: "db14.coll14", details: { before: { min: { x: MinKey }, max: { x: MaxKey } }, number: 3, of: 3, chunk: { min: { x: 15.0 }, max: { x: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('55ac3549d2c1f750d15483ef') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.560+0000 m31200| 2015-07-19T23:39:54.559+0000 I SHARDING [conn98] distributed lock 'db14.coll14/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.560+0000 m31200| 2015-07-19T23:39:54.560+0000 I COMMAND [conn98] command db14.coll14 command: splitChunk { splitChunk: "db14.coll14", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs1", splitKeys: [ { x: 0.0 }, { x: 15.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac3549d2c1f750d15483ef') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 95 } } } protocol:op_command 155ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.560+0000 m30999| 2015-07-19T23:39:54.560+0000 I SHARDING [conn83] autosplitted db14.coll14 shard: ns: db14.coll14, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { x: MinKey }, max: { x: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.752+0000 m30999| 2015-07-19T23:39:54.752+0000 I NETWORK [conn78] end connection 10.139.123.131:57426 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.767+0000 m30999| 2015-07-19T23:39:54.766+0000 I NETWORK [conn75] end connection 10.139.123.131:57416 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.810+0000 m30998| 2015-07-19T23:39:54.809+0000 I NETWORK [conn80] end connection 10.139.123.131:36064 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.825+0000 m30999| 2015-07-19T23:39:54.825+0000 I NETWORK [conn81] end connection 10.139.123.131:57430 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.825+0000 m30998| 2015-07-19T23:39:54.825+0000 I NETWORK [conn83] end connection 10.139.123.131:36076 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.845+0000 m30998| 2015-07-19T23:39:54.845+0000 I NETWORK [conn78] end connection 10.139.123.131:36062 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.846+0000 m30999| 2015-07-19T23:39:54.845+0000 I NETWORK [conn77] end connection 10.139.123.131:57425 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.846+0000 m30998| 2015-07-19T23:39:54.846+0000 I NETWORK [conn75] end connection 10.139.123.131:36059 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.879+0000 m30999| 2015-07-19T23:39:54.879+0000 I NETWORK [conn79] end connection 10.139.123.131:57427 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.918+0000 m30999| 2015-07-19T23:39:54.918+0000 I NETWORK [conn76] end connection 10.139.123.131:57424 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.921+0000 m30998| 2015-07-19T23:39:54.921+0000 I NETWORK [conn82] end connection 10.139.123.131:36074 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.925+0000 m30999| 2015-07-19T23:39:54.925+0000 I NETWORK [conn80] end connection 10.139.123.131:57429 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.935+0000 m30998| 2015-07-19T23:39:54.935+0000 I NETWORK [conn76] end connection 10.139.123.131:36060 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.935+0000 m30998| 2015-07-19T23:39:54.935+0000 I NETWORK [conn79] end connection 10.139.123.131:36063 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.936+0000 m30998| 2015-07-19T23:39:54.935+0000 I NETWORK [conn74] end connection 10.139.123.131:36058 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.939+0000 m30999| 2015-07-19T23:39:54.939+0000 I NETWORK [conn84] end connection 10.139.123.131:57434 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.941+0000 m31100| 2015-07-19T23:39:54.941+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.941+0000 m31100| 2015-07-19T23:39:54.941+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.947+0000 m30998| 2015-07-19T23:39:54.947+0000 I NETWORK [conn81] end connection 10.139.123.131:36069 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.957+0000 m30999| 2015-07-19T23:39:54.957+0000 I NETWORK [conn82] end connection 10.139.123.131:57431 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.961+0000 m30998| 2015-07-19T23:39:54.961+0000 I NETWORK [conn77] end connection 10.139.123.131:36061 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:54.972+0000 m30999| 2015-07-19T23:39:54.972+0000 I NETWORK [conn83] end connection 10.139.123.131:57432 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.078+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.078+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.078+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.079+0000 jstests/concurrency/fsm_workloads/indexed_insert_base.js: Workload completed in 1140 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.079+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.079+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.079+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.079+0000 m30999| 2015-07-19T23:39:55.078+0000 I COMMAND [conn1] DROP: db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.079+0000 m30999| 2015-07-19T23:39:55.078+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:55.078+0000-55ac354bd2c1f750d15483f1", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349195078), what: "dropCollection.start", ns: "db14.coll14", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.130+0000 m30999| 2015-07-19T23:39:55.130+0000 I SHARDING [conn1] distributed lock 'db14.coll14/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac354bd2c1f750d15483f2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.131+0000 m31100| 2015-07-19T23:39:55.130+0000 I COMMAND [conn127] CMD: drop db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.131+0000 m31100| 2015-07-19T23:39:55.131+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 187ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.131+0000 m31100| 2015-07-19T23:39:55.131+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 187ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.132+0000 m31200| 2015-07-19T23:39:55.132+0000 I COMMAND [conn14] CMD: drop db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.132+0000 m31200| 2015-07-19T23:39:55.132+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 201ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.133+0000 m31200| 2015-07-19T23:39:55.132+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 199ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.134+0000 m31102| 2015-07-19T23:39:55.134+0000 I COMMAND [repl writer worker 7] CMD: drop db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.134+0000 m31101| 2015-07-19T23:39:55.134+0000 I COMMAND [repl writer worker 12] CMD: drop db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.135+0000 m31202| 2015-07-19T23:39:55.135+0000 I COMMAND [repl writer worker 6] CMD: drop db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.136+0000 m31201| 2015-07-19T23:39:55.135+0000 I COMMAND [repl writer worker 5] CMD: drop db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.185+0000 m31200| 2015-07-19T23:39:55.184+0000 I SHARDING [conn14] remotely refreshing metadata for db14.coll14 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||55ac3549d2c1f750d15483ef, current metadata version is 1|3||55ac3549d2c1f750d15483ef [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.185+0000 m31200| 2015-07-19T23:39:55.185+0000 W SHARDING [conn14] no chunks found when reloading db14.coll14, previous version was 0|0||55ac3549d2c1f750d15483ef, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.185+0000 m31200| 2015-07-19T23:39:55.185+0000 I SHARDING [conn14] dropping metadata for db14.coll14 at shard version 1|3||55ac3549d2c1f750d15483ef, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.185+0000 m30999| 2015-07-19T23:39:55.185+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:55.185+0000-55ac354bd2c1f750d15483f3", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349195185), what: "dropCollection", ns: "db14.coll14", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.236+0000 m30999| 2015-07-19T23:39:55.236+0000 I SHARDING [conn1] distributed lock 'db14.coll14/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.288+0000 m30999| 2015-07-19T23:39:55.288+0000 I COMMAND [conn1] DROP DATABASE: db14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.288+0000 m30999| 2015-07-19T23:39:55.288+0000 I SHARDING [conn1] DBConfig::dropDatabase: db14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.288+0000 m30999| 2015-07-19T23:39:55.288+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:55.288+0000-55ac354bd2c1f750d15483f4", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349195288), what: "dropDatabase.start", ns: "db14", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.390+0000 m30999| 2015-07-19T23:39:55.389+0000 I SHARDING [conn1] DBConfig::dropDatabase: db14 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.391+0000 m31200| 2015-07-19T23:39:55.390+0000 I COMMAND [conn111] dropDatabase db14 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.391+0000 m31200| 2015-07-19T23:39:55.390+0000 I COMMAND [conn111] dropDatabase db14 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.391+0000 m30999| 2015-07-19T23:39:55.390+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:55.390+0000-55ac354bd2c1f750d15483f5", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349195390), what: "dropDatabase", ns: "db14", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.391+0000 m31200| 2015-07-19T23:39:55.390+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:109 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 53 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 252ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.392+0000 m31200| 2015-07-19T23:39:55.390+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:109 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 61 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 253ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.392+0000 m31201| 2015-07-19T23:39:55.390+0000 I COMMAND [repl writer worker 2] dropDatabase db14 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.392+0000 m31201| 2015-07-19T23:39:55.390+0000 I COMMAND [repl writer worker 2] dropDatabase db14 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.392+0000 m31202| 2015-07-19T23:39:55.390+0000 I COMMAND [repl writer worker 8] dropDatabase db14 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.392+0000 m31202| 2015-07-19T23:39:55.390+0000 I COMMAND [repl writer worker 8] dropDatabase db14 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.447+0000 m31100| 2015-07-19T23:39:55.445+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 309ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.447+0000 m31100| 2015-07-19T23:39:55.445+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 309ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.450+0000 m31100| 2015-07-19T23:39:55.450+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.453+0000 m31102| 2015-07-19T23:39:55.453+0000 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.453+0000 m31101| 2015-07-19T23:39:55.453+0000 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.463+0000 m31200| 2015-07-19T23:39:55.462+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.464+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.464+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.464+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.464+0000 jstests/concurrency/fsm_workloads/touch_index.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.464+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.464+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.464+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.466+0000 m31201| 2015-07-19T23:39:55.465+0000 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.466+0000 m31202| 2015-07-19T23:39:55.465+0000 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.467+0000 m30999| 2015-07-19T23:39:55.467+0000 I SHARDING [conn1] distributed lock 'db15/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac354bd2c1f750d15483f6 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.469+0000 m30999| 2015-07-19T23:39:55.469+0000 I SHARDING [conn1] Placing [db15] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.469+0000 m30999| 2015-07-19T23:39:55.469+0000 I SHARDING [conn1] Enabling sharding for database [db15] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.520+0000 m30999| 2015-07-19T23:39:55.520+0000 I SHARDING [conn1] distributed lock 'db15/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.603+0000 m31200| 2015-07-19T23:39:55.603+0000 I INDEX [conn113] build index on: db15.coll15 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db15.coll15" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.603+0000 m31200| 2015-07-19T23:39:55.603+0000 I INDEX [conn113] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.607+0000 m31200| 2015-07-19T23:39:55.607+0000 I INDEX [conn113] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.608+0000 m30999| 2015-07-19T23:39:55.608+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db15.coll15", key: { tid: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.609+0000 m30999| 2015-07-19T23:39:55.609+0000 I SHARDING [conn1] distributed lock 'db15.coll15/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac354bd2c1f750d15483f7 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.610+0000 m30999| 2015-07-19T23:39:55.610+0000 I SHARDING [conn1] enable sharding on: db15.coll15 with shard key: { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.610+0000 m30999| 2015-07-19T23:39:55.610+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:55.610+0000-55ac354bd2c1f750d15483f8", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349195610), what: "shardCollection.start", ns: "db15.coll15", details: { shardKey: { tid: 1.0 }, collection: "db15.coll15", primary: "test-rs1:test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.612+0000 m31202| 2015-07-19T23:39:55.611+0000 I INDEX [repl writer worker 5] build index on: db15.coll15 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db15.coll15" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.612+0000 m31202| 2015-07-19T23:39:55.611+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.612+0000 m31201| 2015-07-19T23:39:55.612+0000 I INDEX [repl writer worker 0] build index on: db15.coll15 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db15.coll15" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.612+0000 m31201| 2015-07-19T23:39:55.612+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.613+0000 m31202| 2015-07-19T23:39:55.613+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.613+0000 m31201| 2015-07-19T23:39:55.613+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.661+0000 m30999| 2015-07-19T23:39:55.661+0000 I SHARDING [conn1] going to create 1 chunk(s) for: db15.coll15 using new epoch 55ac354bd2c1f750d15483f9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.712+0000 m30999| 2015-07-19T23:39:55.712+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db15.coll15: 0ms sequenceNumber: 68 version: 1|0||55ac354bd2c1f750d15483f9 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.764+0000 m30999| 2015-07-19T23:39:55.764+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db15.coll15: 0ms sequenceNumber: 69 version: 1|0||55ac354bd2c1f750d15483f9 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.764+0000 m31200| 2015-07-19T23:39:55.764+0000 I SHARDING [conn126] remotely refreshing metadata for db15.coll15 with requested shard version 1|0||55ac354bd2c1f750d15483f9, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.765+0000 m31200| 2015-07-19T23:39:55.765+0000 I SHARDING [conn126] collection db15.coll15 was previously unsharded, new metadata loaded with shard version 1|0||55ac354bd2c1f750d15483f9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.765+0000 m31200| 2015-07-19T23:39:55.765+0000 I SHARDING [conn126] collection version was loaded at version 1|0||55ac354bd2c1f750d15483f9, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.765+0000 m30999| 2015-07-19T23:39:55.765+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:55.765+0000-55ac354bd2c1f750d15483fa", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349195765), what: "shardCollection", ns: "db15.coll15", details: { version: "1|0||55ac354bd2c1f750d15483f9" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.816+0000 m30999| 2015-07-19T23:39:55.816+0000 I SHARDING [conn1] distributed lock 'db15.coll15/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.819+0000 m31200| 2015-07-19T23:39:55.818+0000 I INDEX [conn126] build index on: db15.coll15 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db15.coll15" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.819+0000 m31200| 2015-07-19T23:39:55.818+0000 I INDEX [conn126] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.821+0000 m31100| 2015-07-19T23:39:55.820+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:110 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 365ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.821+0000 m31100| 2015-07-19T23:39:55.820+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:110 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 364ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.822+0000 m31200| 2015-07-19T23:39:55.820+0000 I INDEX [conn126] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.822+0000 m31200| 2015-07-19T23:39:55.821+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 208ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.822+0000 m31200| 2015-07-19T23:39:55.821+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 208ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.825+0000 m31100| 2015-07-19T23:39:55.824+0000 I INDEX [conn118] build index on: db15.coll15 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db15.coll15" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.825+0000 m31100| 2015-07-19T23:39:55.824+0000 I INDEX [conn118] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.826+0000 m31202| 2015-07-19T23:39:55.825+0000 I INDEX [repl writer worker 10] build index on: db15.coll15 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db15.coll15" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.826+0000 m31202| 2015-07-19T23:39:55.825+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.827+0000 m31201| 2015-07-19T23:39:55.825+0000 I INDEX [repl writer worker 1] build index on: db15.coll15 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db15.coll15" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.827+0000 m31201| 2015-07-19T23:39:55.825+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.827+0000 m31100| 2015-07-19T23:39:55.826+0000 I INDEX [conn118] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.827+0000 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.828+0000 m31202| 2015-07-19T23:39:55.828+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.828+0000 m31201| 2015-07-19T23:39:55.828+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.831+0000 m31101| 2015-07-19T23:39:55.831+0000 I INDEX [repl writer worker 5] build index on: db15.coll15 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db15.coll15" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.831+0000 m31101| 2015-07-19T23:39:55.831+0000 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.832+0000 m31102| 2015-07-19T23:39:55.831+0000 I INDEX [repl writer worker 14] build index on: db15.coll15 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db15.coll15" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.832+0000 m31102| 2015-07-19T23:39:55.831+0000 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.833+0000 m31101| 2015-07-19T23:39:55.833+0000 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.833+0000 m31102| 2015-07-19T23:39:55.833+0000 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.967+0000 m30998| 2015-07-19T23:39:55.967+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36077 #84 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.977+0000 m30998| 2015-07-19T23:39:55.976+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36078 #85 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.981+0000 m30999| 2015-07-19T23:39:55.980+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57438 #85 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.991+0000 m30999| 2015-07-19T23:39:55.991+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57439 #86 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.996+0000 m30999| 2015-07-19T23:39:55.996+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57440 #87 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:55.997+0000 m30999| 2015-07-19T23:39:55.997+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57441 #88 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.001+0000 m30998| 2015-07-19T23:39:56.001+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36083 #86 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.010+0000 m30998| 2015-07-19T23:39:56.010+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36084 #87 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.014+0000 m30998| 2015-07-19T23:39:56.014+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36085 #88 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.016+0000 m30999| 2015-07-19T23:39:56.016+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57445 #89 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.025+0000 setting random seed: 3710916703566 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.025+0000 setting random seed: 6428513913415 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.025+0000 setting random seed: 6727326875552 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.029+0000 setting random seed: 6517957574687 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.031+0000 setting random seed: 6353620192967 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.031+0000 setting random seed: 8027499755844 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.032+0000 setting random seed: 488873999565 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.037+0000 m30998| 2015-07-19T23:39:56.036+0000 I SHARDING [conn85] ChunkManager: time to load chunks for db15.coll15: 0ms sequenceNumber: 20 version: 1|0||55ac354bd2c1f750d15483f9 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.037+0000 setting random seed: 9981171251274 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.038+0000 m31200| 2015-07-19T23:39:56.038+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 212ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.038+0000 m31200| 2015-07-19T23:39:56.038+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 211ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.062+0000 setting random seed: 2960762754082 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.065+0000 setting random seed: 5464752530679 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.158+0000 m31200| 2015-07-19T23:39:56.157+0000 I SHARDING [conn135] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.159+0000 m31200| 2015-07-19T23:39:56.158+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.159+0000 m31200| 2015-07-19T23:39:56.158+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.159+0000 m31200| 2015-07-19T23:39:56.158+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.159+0000 m31200| 2015-07-19T23:39:56.158+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.159+0000 m31200| 2015-07-19T23:39:56.158+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.160+0000 m31200| 2015-07-19T23:39:56.158+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.160+0000 m31200| 2015-07-19T23:39:56.158+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.160+0000 m31200| 2015-07-19T23:39:56.159+0000 I SHARDING [conn89] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.160+0000 m31200| 2015-07-19T23:39:56.160+0000 I SHARDING [conn89] distributed lock 'db15.coll15/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac354cd9a63f6196b172a0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.161+0000 m31200| 2015-07-19T23:39:56.160+0000 I SHARDING [conn89] remotely refreshing metadata for db15.coll15 based on current shard version 1|0||55ac354bd2c1f750d15483f9, current metadata version is 1|0||55ac354bd2c1f750d15483f9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.175+0000 m31200| 2015-07-19T23:39:56.171+0000 I SHARDING [conn89] metadata of collection db15.coll15 already up to date (shard version : 1|0||55ac354bd2c1f750d15483f9, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.176+0000 m31200| 2015-07-19T23:39:56.171+0000 I SHARDING [conn89] splitChunk accepted at version 1|0||55ac354bd2c1f750d15483f9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.176+0000 m31200| 2015-07-19T23:39:56.171+0000 I SHARDING [conn14] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.176+0000 m31200| 2015-07-19T23:39:56.172+0000 I SHARDING [conn135] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.176+0000 m31200| 2015-07-19T23:39:56.173+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.176+0000 m31200| 2015-07-19T23:39:56.173+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.177+0000 m31200| 2015-07-19T23:39:56.173+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.177+0000 m31200| 2015-07-19T23:39:56.173+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.177+0000 m31200| 2015-07-19T23:39:56.173+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.177+0000 m31200| 2015-07-19T23:39:56.173+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.177+0000 m31200| 2015-07-19T23:39:56.173+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.178+0000 m31200| 2015-07-19T23:39:56.173+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.178+0000 m31200| 2015-07-19T23:39:56.173+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.178+0000 m31200| 2015-07-19T23:39:56.173+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.178+0000 m31200| 2015-07-19T23:39:56.173+0000 I SHARDING [conn142] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.178+0000 m31200| 2015-07-19T23:39:56.173+0000 I SHARDING [conn86] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.179+0000 m31200| 2015-07-19T23:39:56.174+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.179+0000 m31200| 2015-07-19T23:39:56.174+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.179+0000 m31200| 2015-07-19T23:39:56.174+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.180+0000 m30999| 2015-07-19T23:39:56.177+0000 W SHARDING [conn85] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.181+0000 m31200| 2015-07-19T23:39:56.176+0000 W SHARDING [conn98] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.181+0000 m31200| 2015-07-19T23:39:56.180+0000 W SHARDING [conn86] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.181+0000 m30998| 2015-07-19T23:39:56.180+0000 W SHARDING [conn85] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.186+0000 m31200| 2015-07-19T23:39:56.185+0000 I COMMAND [conn25] command db15.$cmd command: insert { insert: "coll15", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('55ac354bd2c1f750d15483f9') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 8798 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 118ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.192+0000 m31200| 2015-07-19T23:39:56.185+0000 I SHARDING [conn89] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:56.185+0000-55ac354cd9a63f6196b172a1", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39660", time: new Date(1437349196185), what: "multi-split", ns: "db15.coll15", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 1, of: 9, chunk: { min: { tid: MinKey }, max: { tid: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('55ac354bd2c1f750d15483f9') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.192+0000 m31200| 2015-07-19T23:39:56.185+0000 I SHARDING [conn142] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.192+0000 m31200| 2015-07-19T23:39:56.190+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.192+0000 m31200| 2015-07-19T23:39:56.190+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.193+0000 m31200| 2015-07-19T23:39:56.190+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.193+0000 m31200| 2015-07-19T23:39:56.190+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.193+0000 m31200| 2015-07-19T23:39:56.190+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.193+0000 m31200| 2015-07-19T23:39:56.190+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.193+0000 m31200| 2015-07-19T23:39:56.190+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.193+0000 m31200| 2015-07-19T23:39:56.190+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.194+0000 m31200| 2015-07-19T23:39:56.190+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.194+0000 m31200| 2015-07-19T23:39:56.190+0000 I SHARDING [conn135] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.194+0000 m31200| 2015-07-19T23:39:56.190+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.194+0000 m31200| 2015-07-19T23:39:56.185+0000 I SHARDING [conn14] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.194+0000 m31200| 2015-07-19T23:39:56.191+0000 W SHARDING [conn98] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.195+0000 m30999| 2015-07-19T23:39:56.191+0000 W SHARDING [conn89] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.196+0000 m31200| 2015-07-19T23:39:56.191+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.199+0000 m31200| 2015-07-19T23:39:56.192+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.200+0000 m31200| 2015-07-19T23:39:56.192+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.200+0000 m31200| 2015-07-19T23:39:56.192+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.200+0000 m31200| 2015-07-19T23:39:56.192+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.209+0000 m31200| 2015-07-19T23:39:56.192+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.209+0000 m31200| 2015-07-19T23:39:56.192+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.210+0000 m31200| 2015-07-19T23:39:56.192+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.210+0000 m31200| 2015-07-19T23:39:56.192+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.210+0000 m31200| 2015-07-19T23:39:56.192+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.210+0000 m31200| 2015-07-19T23:39:56.195+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.211+0000 m31200| 2015-07-19T23:39:56.196+0000 I SHARDING [conn86] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.211+0000 m30999| 2015-07-19T23:39:56.197+0000 W SHARDING [conn88] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.212+0000 m30998| 2015-07-19T23:39:56.197+0000 W SHARDING [conn84] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.212+0000 m31200| 2015-07-19T23:39:56.197+0000 W SHARDING [conn98] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.212+0000 m31200| 2015-07-19T23:39:56.197+0000 W SHARDING [conn86] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.234+0000 m31200| 2015-07-19T23:39:56.234+0000 I SHARDING [conn14] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.237+0000 m31200| 2015-07-19T23:39:56.236+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.237+0000 m31200| 2015-07-19T23:39:56.236+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.237+0000 m31200| 2015-07-19T23:39:56.236+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.237+0000 m31200| 2015-07-19T23:39:56.236+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.237+0000 m31200| 2015-07-19T23:39:56.236+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.238+0000 m31200| 2015-07-19T23:39:56.236+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.238+0000 m31200| 2015-07-19T23:39:56.236+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.238+0000 m31200| 2015-07-19T23:39:56.236+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.238+0000 m31200| 2015-07-19T23:39:56.236+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.238+0000 m31200| 2015-07-19T23:39:56.236+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.239+0000 m31200| 2015-07-19T23:39:56.237+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.239+0000 m31200| 2015-07-19T23:39:56.239+0000 I SHARDING [conn89] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:56.239+0000-55ac354cd9a63f6196b172a2", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39660", time: new Date(1437349196239), what: "multi-split", ns: "db15.coll15", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 2, of: 9, chunk: { min: { tid: 0.0 }, max: { tid: 2.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('55ac354bd2c1f750d15483f9') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.239+0000 m30999| 2015-07-19T23:39:56.239+0000 W SHARDING [conn86] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.240+0000 m31200| 2015-07-19T23:39:56.239+0000 W SHARDING [conn98] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.250+0000 m29000| 2015-07-19T23:39:56.250+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:55746 #42 (42 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.255+0000 m31200| 2015-07-19T23:39:56.253+0000 I SHARDING [conn14] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.256+0000 m31200| 2015-07-19T23:39:56.254+0000 I COMMAND [conn115] command db15.$cmd command: insert { insert: "coll15", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('55ac354bd2c1f750d15483f9') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 108, w: 108 } }, Database: { acquireCount: { w: 108 } }, Collection: { acquireCount: { w: 8 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 30424 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 166ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.256+0000 m31200| 2015-07-19T23:39:56.254+0000 I SHARDING [conn135] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.257+0000 m31200| 2015-07-19T23:39:56.256+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.257+0000 m31200| 2015-07-19T23:39:56.256+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.257+0000 m31200| 2015-07-19T23:39:56.256+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.257+0000 m31200| 2015-07-19T23:39:56.256+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.257+0000 m31200| 2015-07-19T23:39:56.256+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.258+0000 m31200| 2015-07-19T23:39:56.256+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.258+0000 m31200| 2015-07-19T23:39:56.256+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.258+0000 m31200| 2015-07-19T23:39:56.256+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.258+0000 m31200| 2015-07-19T23:39:56.256+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.258+0000 m31200| 2015-07-19T23:39:56.256+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.259+0000 m31200| 2015-07-19T23:39:56.256+0000 W SHARDING [conn18] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.259+0000 m30999| 2015-07-19T23:39:56.257+0000 W SHARDING [conn87] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.260+0000 m31200| 2015-07-19T23:39:56.257+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.260+0000 m31200| 2015-07-19T23:39:56.257+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.260+0000 m31200| 2015-07-19T23:39:56.257+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.260+0000 m31200| 2015-07-19T23:39:56.258+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.260+0000 m31200| 2015-07-19T23:39:56.258+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.261+0000 m31200| 2015-07-19T23:39:56.258+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.261+0000 m31200| 2015-07-19T23:39:56.258+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.261+0000 m31200| 2015-07-19T23:39:56.258+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.261+0000 m31200| 2015-07-19T23:39:56.258+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.262+0000 m31200| 2015-07-19T23:39:56.258+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.262+0000 m31200| 2015-07-19T23:39:56.258+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.262+0000 m31200| 2015-07-19T23:39:56.258+0000 I SHARDING [conn135] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.262+0000 m31200| 2015-07-19T23:39:56.259+0000 I SHARDING [conn86] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.263+0000 m30998| 2015-07-19T23:39:56.260+0000 W SHARDING [conn87] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.299+0000 m31200| 2015-07-19T23:39:56.260+0000 W SHARDING [conn86] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.300+0000 m31200| 2015-07-19T23:39:56.260+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.300+0000 m31200| 2015-07-19T23:39:56.260+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.300+0000 m31200| 2015-07-19T23:39:56.260+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.300+0000 m31200| 2015-07-19T23:39:56.260+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.300+0000 m31200| 2015-07-19T23:39:56.260+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.300+0000 m31200| 2015-07-19T23:39:56.260+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.313+0000 m31200| 2015-07-19T23:39:56.264+0000 I SHARDING [conn86] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.314+0000 m30998| 2015-07-19T23:39:56.266+0000 W SHARDING [conn84] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.314+0000 m31200| 2015-07-19T23:39:56.266+0000 W SHARDING [conn86] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.314+0000 m31200| 2015-07-19T23:39:56.287+0000 I COMMAND [conn27] command db15.$cmd command: insert { insert: "coll15", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('55ac354bd2c1f750d15483f9') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 19403 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 121ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.315+0000 m31200| 2015-07-19T23:39:56.288+0000 I SHARDING [conn135] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.315+0000 m31200| 2015-07-19T23:39:56.289+0000 I SHARDING [conn14] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.348+0000 m31200| 2015-07-19T23:39:56.289+0000 I SHARDING [conn89] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:56.289+0000-55ac354cd9a63f6196b172a3", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39660", time: new Date(1437349196289), what: "multi-split", ns: "db15.coll15", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 3, of: 9, chunk: { min: { tid: 2.0 }, max: { tid: 3.0 }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('55ac354bd2c1f750d15483f9') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.348+0000 m30999| 2015-07-19T23:39:56.295+0000 W SHARDING [conn89] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.348+0000 m31200| 2015-07-19T23:39:56.292+0000 I SHARDING [conn142] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.348+0000 m31200| 2015-07-19T23:39:56.292+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.349+0000 m31200| 2015-07-19T23:39:56.292+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.349+0000 m31200| 2015-07-19T23:39:56.292+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.349+0000 m31200| 2015-07-19T23:39:56.292+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.349+0000 m31200| 2015-07-19T23:39:56.292+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.349+0000 m31200| 2015-07-19T23:39:56.292+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.349+0000 m31200| 2015-07-19T23:39:56.292+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.350+0000 m31200| 2015-07-19T23:39:56.292+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.350+0000 m31200| 2015-07-19T23:39:56.292+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.350+0000 m31200| 2015-07-19T23:39:56.292+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.350+0000 m31200| 2015-07-19T23:39:56.293+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.350+0000 m31200| 2015-07-19T23:39:56.293+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.350+0000 m31200| 2015-07-19T23:39:56.293+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.351+0000 m31200| 2015-07-19T23:39:56.293+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.351+0000 m31200| 2015-07-19T23:39:56.293+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.351+0000 m31200| 2015-07-19T23:39:56.293+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.351+0000 m31200| 2015-07-19T23:39:56.293+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.351+0000 m31200| 2015-07-19T23:39:56.293+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.351+0000 m31200| 2015-07-19T23:39:56.293+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.352+0000 m31200| 2015-07-19T23:39:56.293+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.352+0000 m31200| 2015-07-19T23:39:56.293+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.374+0000 m31200| 2015-07-19T23:39:56.295+0000 W SHARDING [conn98] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.374+0000 m31200| 2015-07-19T23:39:56.295+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.374+0000 m31200| 2015-07-19T23:39:56.295+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.375+0000 m31200| 2015-07-19T23:39:56.295+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.375+0000 m31200| 2015-07-19T23:39:56.295+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.375+0000 m31200| 2015-07-19T23:39:56.295+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.375+0000 m31200| 2015-07-19T23:39:56.295+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.376+0000 m31200| 2015-07-19T23:39:56.295+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.376+0000 m31200| 2015-07-19T23:39:56.295+0000 I SHARDING [conn86] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.376+0000 m31200| 2015-07-19T23:39:56.299+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.377+0000 m30998| 2015-07-19T23:39:56.300+0000 W SHARDING [conn88] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.377+0000 m31200| 2015-07-19T23:39:56.300+0000 W SHARDING [conn86] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.377+0000 m31200| 2015-07-19T23:39:56.301+0000 W SHARDING [conn98] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.394+0000 m30999| 2015-07-19T23:39:56.301+0000 W SHARDING [conn85] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.437+0000 m31200| 2015-07-19T23:39:56.302+0000 I SHARDING [conn135] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.437+0000 m31200| 2015-07-19T23:39:56.306+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.438+0000 m31200| 2015-07-19T23:39:56.306+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.438+0000 m31200| 2015-07-19T23:39:56.306+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.438+0000 m31200| 2015-07-19T23:39:56.306+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.439+0000 m31200| 2015-07-19T23:39:56.306+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.439+0000 m31200| 2015-07-19T23:39:56.306+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.439+0000 m31200| 2015-07-19T23:39:56.306+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.439+0000 m31200| 2015-07-19T23:39:56.306+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.439+0000 m31200| 2015-07-19T23:39:56.306+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.440+0000 m31200| 2015-07-19T23:39:56.306+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.440+0000 m31200| 2015-07-19T23:39:56.307+0000 I SHARDING [conn86] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.440+0000 m31200| 2015-07-19T23:39:56.308+0000 W SHARDING [conn86] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.466+0000 m30998| 2015-07-19T23:39:56.308+0000 W SHARDING [conn85] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.466+0000 m31200| 2015-07-19T23:39:56.318+0000 W SHARDING [conn18] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.482+0000 m30999| 2015-07-19T23:39:56.318+0000 W SHARDING [conn88] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.482+0000 m31200| 2015-07-19T23:39:56.335+0000 I SHARDING [conn142] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.482+0000 m31200| 2015-07-19T23:39:56.336+0000 I SHARDING [conn135] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.483+0000 m31200| 2015-07-19T23:39:56.340+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.483+0000 m31200| 2015-07-19T23:39:56.340+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.483+0000 m31200| 2015-07-19T23:39:56.340+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.483+0000 m31200| 2015-07-19T23:39:56.340+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.483+0000 m31200| 2015-07-19T23:39:56.340+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.483+0000 m31200| 2015-07-19T23:39:56.340+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.484+0000 m31200| 2015-07-19T23:39:56.340+0000 I SHARDING [conn89] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:56.340+0000-55ac354cd9a63f6196b172a4", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39660", time: new Date(1437349196340), what: "multi-split", ns: "db15.coll15", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 4, of: 9, chunk: { min: { tid: 3.0 }, max: { tid: 5.0 }, lastmod: Timestamp 1000|4, lastmodEpoch: ObjectId('55ac354bd2c1f750d15483f9') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.484+0000 m31200| 2015-07-19T23:39:56.340+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.485+0000 m31200| 2015-07-19T23:39:56.340+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.485+0000 m31200| 2015-07-19T23:39:56.340+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.485+0000 m31200| 2015-07-19T23:39:56.340+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.485+0000 m31200| 2015-07-19T23:39:56.341+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.486+0000 m31200| 2015-07-19T23:39:56.341+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.486+0000 m31200| 2015-07-19T23:39:56.341+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.486+0000 m31200| 2015-07-19T23:39:56.341+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.486+0000 m31200| 2015-07-19T23:39:56.342+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.486+0000 m31200| 2015-07-19T23:39:56.342+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.486+0000 m31200| 2015-07-19T23:39:56.342+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.486+0000 m31200| 2015-07-19T23:39:56.342+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.487+0000 m31200| 2015-07-19T23:39:56.342+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.487+0000 m31200| 2015-07-19T23:39:56.342+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.487+0000 m31200| 2015-07-19T23:39:56.342+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.487+0000 m30999| 2015-07-19T23:39:56.342+0000 W SHARDING [conn86] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.505+0000 m31200| 2015-07-19T23:39:56.342+0000 W SHARDING [conn18] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.506+0000 m31200| 2015-07-19T23:39:56.343+0000 I SHARDING [conn86] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.506+0000 m30998| 2015-07-19T23:39:56.344+0000 W SHARDING [conn84] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.567+0000 m31200| 2015-07-19T23:39:56.344+0000 W SHARDING [conn86] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.567+0000 m30998| 2015-07-19T23:39:56.354+0000 I NETWORK [conn84] end connection 10.139.123.131:36077 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.567+0000 m31200| 2015-07-19T23:39:56.385+0000 I COMMAND [conn28] command db15.$cmd command: insert { insert: "coll15", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('55ac354bd2c1f750d15483f9') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 118ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.568+0000 m31200| 2015-07-19T23:39:56.385+0000 I SHARDING [conn142] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.568+0000 m31200| 2015-07-19T23:39:56.390+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.568+0000 m31200| 2015-07-19T23:39:56.390+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.568+0000 m31200| 2015-07-19T23:39:56.390+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.568+0000 m31200| 2015-07-19T23:39:56.391+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.568+0000 m31200| 2015-07-19T23:39:56.391+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.569+0000 m31200| 2015-07-19T23:39:56.391+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.569+0000 m31200| 2015-07-19T23:39:56.391+0000 I SHARDING [conn89] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:56.391+0000-55ac354cd9a63f6196b172a5", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39660", time: new Date(1437349196391), what: "multi-split", ns: "db15.coll15", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 5, of: 9, chunk: { min: { tid: 5.0 }, max: { tid: 6.0 }, lastmod: Timestamp 1000|5, lastmodEpoch: ObjectId('55ac354bd2c1f750d15483f9') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.569+0000 m31200| 2015-07-19T23:39:56.391+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.569+0000 m31200| 2015-07-19T23:39:56.391+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.569+0000 m31200| 2015-07-19T23:39:56.391+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.570+0000 m31200| 2015-07-19T23:39:56.391+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.570+0000 m31200| 2015-07-19T23:39:56.391+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.570+0000 m30999| 2015-07-19T23:39:56.393+0000 W SHARDING [conn87] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.570+0000 m31200| 2015-07-19T23:39:56.393+0000 W SHARDING [conn18] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.571+0000 m31200| 2015-07-19T23:39:56.398+0000 I SHARDING [conn142] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.580+0000 m31200| 2015-07-19T23:39:56.401+0000 I COMMAND [conn116] command db15.$cmd command: insert { insert: "coll15", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('55ac354bd2c1f750d15483f9') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.580+0000 m31200| 2015-07-19T23:39:56.402+0000 I SHARDING [conn14] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.580+0000 m31200| 2015-07-19T23:39:56.403+0000 I SHARDING [conn135] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.580+0000 m31200| 2015-07-19T23:39:56.403+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.580+0000 m31200| 2015-07-19T23:39:56.403+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.581+0000 m31200| 2015-07-19T23:39:56.403+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.581+0000 m31200| 2015-07-19T23:39:56.403+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.581+0000 m31200| 2015-07-19T23:39:56.403+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.581+0000 m31200| 2015-07-19T23:39:56.403+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.587+0000 m31200| 2015-07-19T23:39:56.403+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.588+0000 m31200| 2015-07-19T23:39:56.403+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.588+0000 m31200| 2015-07-19T23:39:56.403+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.588+0000 m31200| 2015-07-19T23:39:56.403+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.589+0000 m31200| 2015-07-19T23:39:56.406+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.590+0000 m31200| 2015-07-19T23:39:56.406+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.590+0000 m31200| 2015-07-19T23:39:56.406+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.590+0000 m31200| 2015-07-19T23:39:56.406+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.590+0000 m31200| 2015-07-19T23:39:56.406+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.590+0000 m31200| 2015-07-19T23:39:56.406+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.591+0000 m31200| 2015-07-19T23:39:56.406+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.592+0000 m31200| 2015-07-19T23:39:56.406+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.592+0000 m31200| 2015-07-19T23:39:56.406+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.592+0000 m31200| 2015-07-19T23:39:56.407+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.592+0000 m31200| 2015-07-19T23:39:56.407+0000 I SHARDING [conn98] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.593+0000 m31200| 2015-07-19T23:39:56.408+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.593+0000 m31200| 2015-07-19T23:39:56.408+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.594+0000 m31200| 2015-07-19T23:39:56.408+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.594+0000 m31200| 2015-07-19T23:39:56.408+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.594+0000 m31200| 2015-07-19T23:39:56.408+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.594+0000 m31200| 2015-07-19T23:39:56.408+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.594+0000 m31200| 2015-07-19T23:39:56.409+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.594+0000 m31200| 2015-07-19T23:39:56.409+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.595+0000 m31200| 2015-07-19T23:39:56.409+0000 W SHARDING [conn18] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.595+0000 m31200| 2015-07-19T23:39:56.409+0000 W SHARDING [conn98] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.597+0000 m30999| 2015-07-19T23:39:56.409+0000 W SHARDING [conn89] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.597+0000 m30999| 2015-07-19T23:39:56.409+0000 W SHARDING [conn88] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.597+0000 m31200| 2015-07-19T23:39:56.421+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.598+0000 m31200| 2015-07-19T23:39:56.421+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.598+0000 m31200| 2015-07-19T23:39:56.422+0000 I SHARDING [conn86] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.599+0000 m31200| 2015-07-19T23:39:56.423+0000 W SHARDING [conn86] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.599+0000 m30998| 2015-07-19T23:39:56.423+0000 W SHARDING [conn85] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.599+0000 m31200| 2015-07-19T23:39:56.442+0000 I SHARDING [conn89] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:56.442+0000-55ac354cd9a63f6196b172a6", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39660", time: new Date(1437349196442), what: "multi-split", ns: "db15.coll15", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 6, of: 9, chunk: { min: { tid: 6.0 }, max: { tid: 7.0 }, lastmod: Timestamp 1000|6, lastmodEpoch: ObjectId('55ac354bd2c1f750d15483f9') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.601+0000 m31200| 2015-07-19T23:39:56.447+0000 I COMMAND [conn113] command db15.$cmd command: insert { insert: "coll15", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('55ac354bd2c1f750d15483f9') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 141ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.601+0000 m31200| 2015-07-19T23:39:56.448+0000 I SHARDING [conn14] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.601+0000 m31200| 2015-07-19T23:39:56.450+0000 I COMMAND [conn29] command db15.$cmd command: insert { insert: "coll15", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('55ac354bd2c1f750d15483f9') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 151ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.601+0000 m31200| 2015-07-19T23:39:56.451+0000 I SHARDING [conn135] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.602+0000 m31200| 2015-07-19T23:39:56.453+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.602+0000 m31200| 2015-07-19T23:39:56.453+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.606+0000 m31200| 2015-07-19T23:39:56.453+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.606+0000 m31200| 2015-07-19T23:39:56.453+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.606+0000 m31200| 2015-07-19T23:39:56.453+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.606+0000 m31200| 2015-07-19T23:39:56.453+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.606+0000 m31200| 2015-07-19T23:39:56.453+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.606+0000 m31200| 2015-07-19T23:39:56.453+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.607+0000 m31200| 2015-07-19T23:39:56.454+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.607+0000 m31200| 2015-07-19T23:39:56.454+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.608+0000 m31200| 2015-07-19T23:39:56.454+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.608+0000 m30999| 2015-07-19T23:39:56.456+0000 W SHARDING [conn85] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.608+0000 m31200| 2015-07-19T23:39:56.455+0000 W SHARDING [conn18] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.609+0000 m31200| 2015-07-19T23:39:56.457+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.609+0000 m31200| 2015-07-19T23:39:56.457+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.609+0000 m31200| 2015-07-19T23:39:56.457+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.609+0000 m31200| 2015-07-19T23:39:56.457+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.609+0000 m31200| 2015-07-19T23:39:56.457+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.610+0000 m31200| 2015-07-19T23:39:56.457+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.610+0000 m31200| 2015-07-19T23:39:56.457+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.611+0000 m31200| 2015-07-19T23:39:56.457+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.611+0000 m31200| 2015-07-19T23:39:56.457+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.611+0000 m31200| 2015-07-19T23:39:56.457+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.611+0000 m31200| 2015-07-19T23:39:56.458+0000 I SHARDING [conn86] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.612+0000 m30998| 2015-07-19T23:39:56.460+0000 W SHARDING [conn87] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.612+0000 m31200| 2015-07-19T23:39:56.460+0000 W SHARDING [conn86] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.612+0000 m31200| 2015-07-19T23:39:56.490+0000 I SHARDING [conn14] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.612+0000 m31200| 2015-07-19T23:39:56.491+0000 I SHARDING [conn142] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.613+0000 m31200| 2015-07-19T23:39:56.492+0000 I SHARDING [conn89] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:56.492+0000-55ac354cd9a63f6196b172a7", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39660", time: new Date(1437349196492), what: "multi-split", ns: "db15.coll15", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 7, of: 9, chunk: { min: { tid: 7.0 }, max: { tid: 8.0 }, lastmod: Timestamp 1000|7, lastmodEpoch: ObjectId('55ac354bd2c1f750d15483f9') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.613+0000 m31200| 2015-07-19T23:39:56.496+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.613+0000 m31200| 2015-07-19T23:39:56.496+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.613+0000 m31200| 2015-07-19T23:39:56.496+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.613+0000 m31200| 2015-07-19T23:39:56.496+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.613+0000 m31200| 2015-07-19T23:39:56.496+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.613+0000 m31200| 2015-07-19T23:39:56.497+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.614+0000 m31200| 2015-07-19T23:39:56.497+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.614+0000 m31200| 2015-07-19T23:39:56.497+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.614+0000 m31200| 2015-07-19T23:39:56.497+0000 W SHARDING [conn142] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.614+0000 m31200| 2015-07-19T23:39:56.497+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.615+0000 m30999| 2015-07-19T23:39:56.499+0000 W SHARDING [conn88] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.615+0000 m31200| 2015-07-19T23:39:56.499+0000 W SHARDING [conn18] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.615+0000 m31200| 2015-07-19T23:39:56.506+0000 I COMMAND [conn115] command db15.$cmd command: insert { insert: "coll15", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('55ac354bd2c1f750d15483f9') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 145ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.615+0000 m31200| 2015-07-19T23:39:56.506+0000 I SHARDING [conn135] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.616+0000 m31200| 2015-07-19T23:39:56.507+0000 I SHARDING [conn134] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.616+0000 m31200| 2015-07-19T23:39:56.511+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.616+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.616+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.616+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.616+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.617+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn134] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.617+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn134] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.617+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn134] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.617+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn134] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.617+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn134] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.617+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn134] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.618+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn134] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.618+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.618+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.618+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.618+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.618+0000 m31200| 2015-07-19T23:39:56.514+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.619+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn134] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.619+0000 m31200| 2015-07-19T23:39:56.514+0000 W SHARDING [conn134] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.619+0000 m31200| 2015-07-19T23:39:56.514+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.619+0000 m31200| 2015-07-19T23:39:56.513+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.619+0000 m31200| 2015-07-19T23:39:56.514+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.620+0000 m31200| 2015-07-19T23:39:56.514+0000 I SHARDING [conn86] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.620+0000 m31200| 2015-07-19T23:39:56.515+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.620+0000 m31200| 2015-07-19T23:39:56.515+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.620+0000 m31200| 2015-07-19T23:39:56.515+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.620+0000 m31200| 2015-07-19T23:39:56.515+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.621+0000 m31200| 2015-07-19T23:39:56.515+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.621+0000 m31200| 2015-07-19T23:39:56.515+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.621+0000 m31200| 2015-07-19T23:39:56.515+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.621+0000 m30999| 2015-07-19T23:39:56.516+0000 W SHARDING [conn89] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.621+0000 m31200| 2015-07-19T23:39:56.515+0000 W SHARDING [conn18] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.670+0000 m31200| 2015-07-19T23:39:56.516+0000 W SHARDING [conn86] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.670+0000 m31200| 2015-07-19T23:39:56.518+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:39849 #146 (74 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.671+0000 m30998| 2015-07-19T23:39:56.516+0000 W SHARDING [conn85] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.671+0000 m30998| 2015-07-19T23:39:56.521+0000 I NETWORK [conn85] end connection 10.139.123.131:36078 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.671+0000 m31200| 2015-07-19T23:39:56.528+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.671+0000 m31200| 2015-07-19T23:39:56.528+0000 I SHARDING [conn146] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.671+0000 m31200| 2015-07-19T23:39:56.529+0000 I SHARDING [conn86] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.672+0000 m31200| 2015-07-19T23:39:56.530+0000 W SHARDING [conn86] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.672+0000 m30998| 2015-07-19T23:39:56.530+0000 W SHARDING [conn88] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.683+0000 m31200| 2015-07-19T23:39:56.532+0000 I COMMAND [conn112] command db15.$cmd command: insert { insert: "coll15", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('55ac354bd2c1f750d15483f9') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 156ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.683+0000 m31200| 2015-07-19T23:39:56.533+0000 I SHARDING [conn14] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.684+0000 m31200| 2015-07-19T23:39:56.543+0000 I SHARDING [conn89] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:56.543+0000-55ac354cd9a63f6196b172a8", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39660", time: new Date(1437349196543), what: "multi-split", ns: "db15.coll15", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 8, of: 9, chunk: { min: { tid: 8.0 }, max: { tid: 9.0 }, lastmod: Timestamp 1000|8, lastmodEpoch: ObjectId('55ac354bd2c1f750d15483f9') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.684+0000 m31200| 2015-07-19T23:39:56.558+0000 I SHARDING [conn135] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.684+0000 m31200| 2015-07-19T23:39:56.560+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.684+0000 m31200| 2015-07-19T23:39:56.560+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.684+0000 m31200| 2015-07-19T23:39:56.560+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.684+0000 m31200| 2015-07-19T23:39:56.560+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.685+0000 m31200| 2015-07-19T23:39:56.560+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.685+0000 m31200| 2015-07-19T23:39:56.560+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.685+0000 m31200| 2015-07-19T23:39:56.560+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.685+0000 m31200| 2015-07-19T23:39:56.560+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.685+0000 m31200| 2015-07-19T23:39:56.560+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.685+0000 m31200| 2015-07-19T23:39:56.560+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.686+0000 m31200| 2015-07-19T23:39:56.562+0000 W SHARDING [conn18] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.686+0000 m30999| 2015-07-19T23:39:56.562+0000 W SHARDING [conn87] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.686+0000 m31200| 2015-07-19T23:39:56.575+0000 I SHARDING [conn146] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.686+0000 m30999| 2015-07-19T23:39:56.575+0000 I NETWORK [conn87] end connection 10.139.123.131:57440 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.695+0000 m31200| 2015-07-19T23:39:56.593+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.696+0000 m31200| 2015-07-19T23:39:56.593+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.696+0000 m31200| 2015-07-19T23:39:56.593+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.696+0000 m31200| 2015-07-19T23:39:56.593+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.697+0000 m31200| 2015-07-19T23:39:56.593+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.706+0000 m31200| 2015-07-19T23:39:56.594+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.713+0000 m31200| 2015-07-19T23:39:56.594+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.714+0000 m31200| 2015-07-19T23:39:56.594+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.714+0000 m31200| 2015-07-19T23:39:56.594+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.714+0000 m31200| 2015-07-19T23:39:56.594+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.714+0000 m31200| 2015-07-19T23:39:56.594+0000 I SHARDING [conn89] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:56.594+0000-55ac354cd9a63f6196b172a9", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39660", time: new Date(1437349196594), what: "multi-split", ns: "db15.coll15", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 9, of: 9, chunk: { min: { tid: 9.0 }, max: { tid: MaxKey }, lastmod: Timestamp 1000|9, lastmodEpoch: ObjectId('55ac354bd2c1f750d15483f9') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.715+0000 m31200| 2015-07-19T23:39:56.594+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.715+0000 m30999| 2015-07-19T23:39:56.596+0000 W SHARDING [conn89] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.715+0000 m31200| 2015-07-19T23:39:56.596+0000 W SHARDING [conn18] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.715+0000 m31200| 2015-07-19T23:39:56.602+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.716+0000 m31200| 2015-07-19T23:39:56.602+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.716+0000 m31200| 2015-07-19T23:39:56.602+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.716+0000 m31200| 2015-07-19T23:39:56.602+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.716+0000 m31200| 2015-07-19T23:39:56.602+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.716+0000 m31200| 2015-07-19T23:39:56.602+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.716+0000 m31200| 2015-07-19T23:39:56.602+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.717+0000 m31200| 2015-07-19T23:39:56.602+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.717+0000 m31200| 2015-07-19T23:39:56.602+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.717+0000 m31200| 2015-07-19T23:39:56.603+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.733+0000 m31200| 2015-07-19T23:39:56.603+0000 I SHARDING [conn86] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.733+0000 m31200| 2015-07-19T23:39:56.602+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.739+0000 m31200| 2015-07-19T23:39:56.603+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.743+0000 m31200| 2015-07-19T23:39:56.603+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.744+0000 m31200| 2015-07-19T23:39:56.603+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.745+0000 m31200| 2015-07-19T23:39:56.603+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.745+0000 m31200| 2015-07-19T23:39:56.603+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.746+0000 m31200| 2015-07-19T23:39:56.603+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.753+0000 m31200| 2015-07-19T23:39:56.603+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.753+0000 m31200| 2015-07-19T23:39:56.603+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.754+0000 m31200| 2015-07-19T23:39:56.603+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.754+0000 m30998| 2015-07-19T23:39:56.605+0000 W SHARDING [conn87] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.764+0000 m31200| 2015-07-19T23:39:56.604+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.764+0000 m31200| 2015-07-19T23:39:56.605+0000 W SHARDING [conn86] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.765+0000 m30999| 2015-07-19T23:39:56.605+0000 W SHARDING [conn86] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.770+0000 m31200| 2015-07-19T23:39:56.605+0000 W SHARDING [conn18] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.770+0000 m30999| 2015-07-19T23:39:56.611+0000 I NETWORK [conn89] end connection 10.139.123.131:57445 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.771+0000 m30999| 2015-07-19T23:39:56.616+0000 I NETWORK [conn86] end connection 10.139.123.131:57439 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.773+0000 m31200| 2015-07-19T23:39:56.629+0000 I COMMAND [conn28] command db15.$cmd command: insert { insert: "coll15", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('55ac354bd2c1f750d15483f9') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 115ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.773+0000 m31200| 2015-07-19T23:39:56.632+0000 I COMMAND [conn113] command db15.$cmd command: insert { insert: "coll15", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('55ac354bd2c1f750d15483f9') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 108, w: 108 } }, Database: { acquireCount: { w: 108 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 168ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.774+0000 m31200| 2015-07-19T23:39:56.632+0000 I SHARDING [conn146] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.774+0000 m31200| 2015-07-19T23:39:56.639+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.776+0000 m31200| 2015-07-19T23:39:56.639+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.776+0000 m31200| 2015-07-19T23:39:56.639+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.776+0000 m31200| 2015-07-19T23:39:56.644+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.777+0000 m31200| 2015-07-19T23:39:56.645+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.777+0000 m31200| 2015-07-19T23:39:56.645+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.779+0000 m31200| 2015-07-19T23:39:56.645+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.779+0000 m31200| 2015-07-19T23:39:56.645+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.780+0000 m31200| 2015-07-19T23:39:56.645+0000 W SHARDING [conn146] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.780+0000 m31200| 2015-07-19T23:39:56.646+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.780+0000 m31200| 2015-07-19T23:39:56.647+0000 I SHARDING [conn89] distributed lock 'db15.coll15/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.783+0000 m31200| 2015-07-19T23:39:56.647+0000 I COMMAND [conn89] command db15.coll15 command: splitChunk { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 20607 } } } protocol:op_command 488ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.784+0000 m31200| 2015-07-19T23:39:56.647+0000 W SHARDING [conn18] could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db15.coll15 is taken. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.784+0000 m31200| 2015-07-19T23:39:56.647+0000 I SHARDING [conn135] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.785+0000 m30999| 2015-07-19T23:39:56.647+0000 W SHARDING [conn85] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db15.coll15 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.785+0000 m30998| 2015-07-19T23:39:56.648+0000 I SHARDING [conn86] ChunkManager: time to load chunks for db15.coll15: 0ms sequenceNumber: 21 version: 1|9||55ac354bd2c1f750d15483f9 based on: 1|0||55ac354bd2c1f750d15483f9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.787+0000 m30998| 2015-07-19T23:39:56.648+0000 I SHARDING [conn86] autosplitted db15.coll15 shard: ns: db15.coll15, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { tid: MinKey }, max: { tid: MaxKey } into 9 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.788+0000 m31200| 2015-07-19T23:39:56.649+0000 I SHARDING [conn14] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.788+0000 m30999| 2015-07-19T23:39:56.672+0000 I NETWORK [conn85] end connection 10.139.123.131:57438 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.788+0000 m31200| 2015-07-19T23:39:56.680+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.788+0000 m31200| 2015-07-19T23:39:56.680+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.788+0000 m31200| 2015-07-19T23:39:56.680+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.789+0000 m31200| 2015-07-19T23:39:56.680+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.789+0000 m31200| 2015-07-19T23:39:56.680+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.789+0000 m31200| 2015-07-19T23:39:56.680+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.790+0000 m31200| 2015-07-19T23:39:56.680+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.790+0000 m31200| 2015-07-19T23:39:56.681+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.790+0000 m31200| 2015-07-19T23:39:56.681+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.790+0000 m31200| 2015-07-19T23:39:56.681+0000 W SHARDING [conn135] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.790+0000 m31200| 2015-07-19T23:39:56.682+0000 I SHARDING [conn89] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.791+0000 m31200| 2015-07-19T23:39:56.683+0000 I SHARDING [conn89] distributed lock 'db15.coll15/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac354cd9a63f6196b172aa [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.791+0000 m31200| 2015-07-19T23:39:56.683+0000 I SHARDING [conn89] remotely refreshing metadata for db15.coll15 based on current shard version 1|9||55ac354bd2c1f750d15483f9, current metadata version is 1|9||55ac354bd2c1f750d15483f9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.791+0000 m31200| 2015-07-19T23:39:56.684+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.791+0000 m31200| 2015-07-19T23:39:56.684+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.791+0000 m31200| 2015-07-19T23:39:56.684+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.792+0000 m31200| 2015-07-19T23:39:56.684+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.792+0000 m31200| 2015-07-19T23:39:56.684+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.792+0000 m31200| 2015-07-19T23:39:56.684+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.792+0000 m31200| 2015-07-19T23:39:56.684+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.792+0000 m31200| 2015-07-19T23:39:56.684+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.792+0000 m31200| 2015-07-19T23:39:56.684+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.793+0000 m31200| 2015-07-19T23:39:56.684+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.793+0000 m31200| 2015-07-19T23:39:56.690+0000 I SHARDING [conn89] metadata of collection db15.coll15 already up to date (shard version : 1|9||55ac354bd2c1f750d15483f9, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.793+0000 m31200| 2015-07-19T23:39:56.690+0000 W SHARDING [conn89] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.793+0000 m31200| 2015-07-19T23:39:56.690+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.793+0000 m31200| 2015-07-19T23:39:56.691+0000 I SHARDING [conn89] distributed lock 'db15.coll15/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.794+0000 m30998| 2015-07-19T23:39:56.691+0000 W SHARDING [conn88] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.794+0000 m31200| 2015-07-19T23:39:56.692+0000 I SHARDING [conn18] distributed lock 'db15.coll15/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac354cd9a63f6196b172ab [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.794+0000 m31200| 2015-07-19T23:39:56.692+0000 I SHARDING [conn18] remotely refreshing metadata for db15.coll15 based on current shard version 1|9||55ac354bd2c1f750d15483f9, current metadata version is 1|9||55ac354bd2c1f750d15483f9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.794+0000 m30998| 2015-07-19T23:39:56.696+0000 I NETWORK [conn88] end connection 10.139.123.131:36085 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.795+0000 m31200| 2015-07-19T23:39:56.698+0000 I SHARDING [conn18] metadata of collection db15.coll15 already up to date (shard version : 1|9||55ac354bd2c1f750d15483f9, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.795+0000 m31200| 2015-07-19T23:39:56.698+0000 W SHARDING [conn18] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.795+0000 m30999| 2015-07-19T23:39:56.698+0000 W SHARDING [conn88] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.795+0000 m31200| 2015-07-19T23:39:56.698+0000 I SHARDING [conn18] distributed lock 'db15.coll15/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.796+0000 m30998| 2015-07-19T23:39:56.755+0000 I NETWORK [conn87] end connection 10.139.123.131:36084 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.796+0000 m30998| 2015-07-19T23:39:56.766+0000 I NETWORK [conn86] end connection 10.139.123.131:36083 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.796+0000 m31200| 2015-07-19T23:39:56.764+0000 I SHARDING [conn14] request split points lookup for chunk db15.coll15 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.796+0000 m31200| 2015-07-19T23:39:56.773+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.796+0000 m31200| 2015-07-19T23:39:56.773+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.796+0000 m31200| 2015-07-19T23:39:56.773+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.797+0000 m31200| 2015-07-19T23:39:56.773+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.797+0000 m31200| 2015-07-19T23:39:56.774+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.797+0000 m31200| 2015-07-19T23:39:56.774+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.797+0000 m31200| 2015-07-19T23:39:56.774+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.797+0000 m31200| 2015-07-19T23:39:56.774+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.797+0000 m31200| 2015-07-19T23:39:56.774+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.798+0000 m31200| 2015-07-19T23:39:56.774+0000 W SHARDING [conn14] possible low cardinality key detected in db15.coll15 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.798+0000 m31200| 2015-07-19T23:39:56.774+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.798+0000 m31200| 2015-07-19T23:39:56.776+0000 I SHARDING [conn18] distributed lock 'db15.coll15/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac354cd9a63f6196b172ac [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.798+0000 m31200| 2015-07-19T23:39:56.776+0000 I SHARDING [conn18] remotely refreshing metadata for db15.coll15 based on current shard version 1|9||55ac354bd2c1f750d15483f9, current metadata version is 1|9||55ac354bd2c1f750d15483f9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.798+0000 m31200| 2015-07-19T23:39:56.776+0000 I SHARDING [conn18] metadata of collection db15.coll15 already up to date (shard version : 1|9||55ac354bd2c1f750d15483f9, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.799+0000 m31200| 2015-07-19T23:39:56.776+0000 W SHARDING [conn18] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.799+0000 m31200| 2015-07-19T23:39:56.776+0000 I SHARDING [conn18] distributed lock 'db15.coll15/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.799+0000 m30999| 2015-07-19T23:39:56.777+0000 W SHARDING [conn88] splitChunk failed - cmd: { splitChunk: "db15.coll15", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354bd2c1f750d15483f9') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.799+0000 m30999| 2015-07-19T23:39:56.799+0000 I NETWORK [conn88] end connection 10.139.123.131:57441 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.821+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.821+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.822+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.822+0000 jstests/concurrency/fsm_workloads/touch_index.js: Workload completed in 995 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.822+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.822+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.822+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.822+0000 m30999| 2015-07-19T23:39:56.821+0000 I COMMAND [conn1] DROP: db15.coll15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.822+0000 m30999| 2015-07-19T23:39:56.821+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:56.821+0000-55ac354cd2c1f750d15483fb", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349196821), what: "dropCollection.start", ns: "db15.coll15", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.829+0000 m31100| 2015-07-19T23:39:56.829+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.829+0000 m31100| 2015-07-19T23:39:56.829+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.874+0000 m30999| 2015-07-19T23:39:56.873+0000 I SHARDING [conn1] distributed lock 'db15.coll15/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac354cd2c1f750d15483fc [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.874+0000 m31100| 2015-07-19T23:39:56.874+0000 I COMMAND [conn127] CMD: drop db15.coll15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.875+0000 m31200| 2015-07-19T23:39:56.875+0000 I COMMAND [conn14] CMD: drop db15.coll15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.875+0000 m31200| 2015-07-19T23:39:56.875+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 105ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.876+0000 m31200| 2015-07-19T23:39:56.875+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 105ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.877+0000 m31101| 2015-07-19T23:39:56.877+0000 I COMMAND [repl writer worker 11] CMD: drop db15.coll15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.877+0000 m31102| 2015-07-19T23:39:56.877+0000 I COMMAND [repl writer worker 11] CMD: drop db15.coll15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.880+0000 m31201| 2015-07-19T23:39:56.878+0000 I COMMAND [repl writer worker 4] CMD: drop db15.coll15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.880+0000 m31202| 2015-07-19T23:39:56.878+0000 I COMMAND [repl writer worker 8] CMD: drop db15.coll15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.928+0000 m31200| 2015-07-19T23:39:56.928+0000 I SHARDING [conn14] remotely refreshing metadata for db15.coll15 with requested shard version 0|0||000000000000000000000000, current shard version is 1|9||55ac354bd2c1f750d15483f9, current metadata version is 1|9||55ac354bd2c1f750d15483f9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.929+0000 m31200| 2015-07-19T23:39:56.928+0000 W SHARDING [conn14] no chunks found when reloading db15.coll15, previous version was 0|0||55ac354bd2c1f750d15483f9, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.929+0000 m31200| 2015-07-19T23:39:56.928+0000 I SHARDING [conn14] dropping metadata for db15.coll15 at shard version 1|9||55ac354bd2c1f750d15483f9, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.929+0000 m30999| 2015-07-19T23:39:56.929+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:56.929+0000-55ac354cd2c1f750d15483fd", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349196929), what: "dropCollection", ns: "db15.coll15", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:56.980+0000 m30999| 2015-07-19T23:39:56.980+0000 I SHARDING [conn1] distributed lock 'db15.coll15/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.032+0000 m30999| 2015-07-19T23:39:57.032+0000 I COMMAND [conn1] DROP DATABASE: db15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.032+0000 m30999| 2015-07-19T23:39:57.032+0000 I SHARDING [conn1] DBConfig::dropDatabase: db15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.032+0000 m30999| 2015-07-19T23:39:57.032+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:57.032+0000-55ac354dd2c1f750d15483fe", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349197032), what: "dropDatabase.start", ns: "db15", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.133+0000 m30999| 2015-07-19T23:39:57.133+0000 I SHARDING [conn1] DBConfig::dropDatabase: db15 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.134+0000 m31200| 2015-07-19T23:39:57.133+0000 I COMMAND [conn111] dropDatabase db15 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.134+0000 m31200| 2015-07-19T23:39:57.133+0000 I COMMAND [conn111] dropDatabase db15 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.135+0000 m31200| 2015-07-19T23:39:57.134+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:109 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 89 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 253ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.135+0000 m30999| 2015-07-19T23:39:57.134+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:57.134+0000-55ac354dd2c1f750d15483ff", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349197134), what: "dropDatabase", ns: "db15", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.136+0000 m31202| 2015-07-19T23:39:57.134+0000 I COMMAND [repl writer worker 9] dropDatabase db15 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.136+0000 m31202| 2015-07-19T23:39:57.134+0000 I COMMAND [repl writer worker 9] dropDatabase db15 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.136+0000 m31200| 2015-07-19T23:39:57.134+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:109 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 244 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 253ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.136+0000 m31201| 2015-07-19T23:39:57.134+0000 I COMMAND [repl writer worker 10] dropDatabase db15 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.136+0000 m31201| 2015-07-19T23:39:57.135+0000 I COMMAND [repl writer worker 10] dropDatabase db15 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.190+0000 m31100| 2015-07-19T23:39:57.190+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 310ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.191+0000 m31100| 2015-07-19T23:39:57.190+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 311ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.195+0000 m31100| 2015-07-19T23:39:57.195+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.198+0000 m31102| 2015-07-19T23:39:57.198+0000 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.208+0000 m31200| 2015-07-19T23:39:57.208+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.209+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.209+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.209+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.209+0000 jstests/concurrency/fsm_workloads/update_array_noindex.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.209+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.209+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.209+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.211+0000 m31202| 2015-07-19T23:39:57.210+0000 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.211+0000 m31201| 2015-07-19T23:39:57.211+0000 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.213+0000 m30999| 2015-07-19T23:39:57.213+0000 I SHARDING [conn1] distributed lock 'db16/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac354dd2c1f750d1548400 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.215+0000 m30999| 2015-07-19T23:39:57.214+0000 I SHARDING [conn1] Placing [db16] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.215+0000 m30999| 2015-07-19T23:39:57.214+0000 I SHARDING [conn1] Enabling sharding for database [db16] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.266+0000 m30999| 2015-07-19T23:39:57.265+0000 I SHARDING [conn1] distributed lock 'db16/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.271+0000 m31200| 2015-07-19T23:39:57.271+0000 I INDEX [conn113] build index on: db16.coll16 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.271+0000 m31200| 2015-07-19T23:39:57.271+0000 I INDEX [conn113] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.272+0000 m31200| 2015-07-19T23:39:57.272+0000 I INDEX [conn113] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.273+0000 m30999| 2015-07-19T23:39:57.272+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db16.coll16", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.274+0000 m30999| 2015-07-19T23:39:57.274+0000 I SHARDING [conn1] distributed lock 'db16.coll16/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac354dd2c1f750d1548401 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.274+0000 m30999| 2015-07-19T23:39:57.274+0000 I SHARDING [conn1] enable sharding on: db16.coll16 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.275+0000 m30999| 2015-07-19T23:39:57.274+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:57.274+0000-55ac354dd2c1f750d1548402", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349197274), what: "shardCollection.start", ns: "db16.coll16", details: { shardKey: { _id: "hashed" }, collection: "db16.coll16", primary: "test-rs1:test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.276+0000 m31201| 2015-07-19T23:39:57.276+0000 I INDEX [repl writer worker 8] build index on: db16.coll16 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.276+0000 m31201| 2015-07-19T23:39:57.276+0000 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.278+0000 m31201| 2015-07-19T23:39:57.278+0000 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.279+0000 m31202| 2015-07-19T23:39:57.279+0000 I INDEX [repl writer worker 1] build index on: db16.coll16 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.280+0000 m31202| 2015-07-19T23:39:57.279+0000 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.281+0000 m31202| 2015-07-19T23:39:57.280+0000 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.325+0000 m30999| 2015-07-19T23:39:57.325+0000 I SHARDING [conn1] going to create 2 chunk(s) for: db16.coll16 using new epoch 55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.427+0000 m30999| 2015-07-19T23:39:57.427+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db16.coll16: 0ms sequenceNumber: 70 version: 1|1||55ac354dd2c1f750d1548403 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.479+0000 m30999| 2015-07-19T23:39:57.478+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db16.coll16: 0ms sequenceNumber: 71 version: 1|1||55ac354dd2c1f750d1548403 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.479+0000 m31200| 2015-07-19T23:39:57.479+0000 I SHARDING [conn126] remotely refreshing metadata for db16.coll16 with requested shard version 1|1||55ac354dd2c1f750d1548403, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.479+0000 m31200| 2015-07-19T23:39:57.479+0000 I SHARDING [conn126] collection db16.coll16 was previously unsharded, new metadata loaded with shard version 1|1||55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.480+0000 m31200| 2015-07-19T23:39:57.479+0000 I SHARDING [conn126] collection version was loaded at version 1|1||55ac354dd2c1f750d1548403, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.480+0000 m30999| 2015-07-19T23:39:57.479+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:57.479+0000-55ac354dd2c1f750d1548404", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349197479), what: "shardCollection", ns: "db16.coll16", details: { version: "1|1||55ac354dd2c1f750d1548403" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.531+0000 m30999| 2015-07-19T23:39:57.531+0000 I SHARDING [conn1] distributed lock 'db16.coll16/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.531+0000 m30999| 2015-07-19T23:39:57.531+0000 I SHARDING [conn1] moving chunk ns: db16.coll16 moving ( ns: db16.coll16, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.531+0000 m31200| 2015-07-19T23:39:57.531+0000 I SHARDING [conn18] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.532+0000 m31200| 2015-07-19T23:39:57.531+0000 I SHARDING [conn18] received moveChunk request: { moveChunk: "db16.coll16", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", to: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac354dd2c1f750d1548403') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.533+0000 m31200| 2015-07-19T23:39:57.532+0000 I SHARDING [conn18] distributed lock 'db16.coll16/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac354dd9a63f6196b172ae [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.533+0000 m31200| 2015-07-19T23:39:57.532+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:57.532+0000-55ac354dd9a63f6196b172af", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349197532), what: "moveChunk.start", ns: "db16.coll16", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.583+0000 m31200| 2015-07-19T23:39:57.583+0000 I SHARDING [conn18] remotely refreshing metadata for db16.coll16 based on current shard version 1|1||55ac354dd2c1f750d1548403, current metadata version is 1|1||55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.584+0000 m31200| 2015-07-19T23:39:57.584+0000 I SHARDING [conn18] metadata of collection db16.coll16 already up to date (shard version : 1|1||55ac354dd2c1f750d1548403, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.584+0000 m31200| 2015-07-19T23:39:57.584+0000 I SHARDING [conn18] moveChunk request accepted at version 1|1||55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.584+0000 m31200| 2015-07-19T23:39:57.584+0000 I SHARDING [conn18] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.585+0000 m31100| 2015-07-19T23:39:57.584+0000 I SHARDING [conn19] remotely refreshing metadata for db16.coll16, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.585+0000 m31100| 2015-07-19T23:39:57.585+0000 I SHARDING [conn19] collection db16.coll16 was previously unsharded, new metadata loaded with shard version 0|0||55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.585+0000 m31100| 2015-07-19T23:39:57.585+0000 I SHARDING [conn19] collection version was loaded at version 1|1||55ac354dd2c1f750d1548403, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.585+0000 m31100| 2015-07-19T23:39:57.585+0000 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db16.coll16 from test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 at epoch 55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.587+0000 m31200| 2015-07-19T23:39:57.586+0000 I SHARDING [conn18] moveChunk data transfer progress: { active: true, ns: "db16.coll16", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.589+0000 m31100| 2015-07-19T23:39:57.588+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:2 reslen:204 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 393ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.589+0000 m31100| 2015-07-19T23:39:57.588+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:110 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 388ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.589+0000 m31101| 2015-07-19T23:39:57.589+0000 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.590+0000 m31200| 2015-07-19T23:39:57.589+0000 I SHARDING [conn18] moveChunk data transfer progress: { active: true, ns: "db16.coll16", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.590+0000 m31100| 2015-07-19T23:39:57.590+0000 I INDEX [migrateThread] build index on: db16.coll16 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.591+0000 m31100| 2015-07-19T23:39:57.590+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.592+0000 m31100| 2015-07-19T23:39:57.592+0000 I INDEX [migrateThread] build index on: db16.coll16 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.592+0000 m31100| 2015-07-19T23:39:57.592+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.593+0000 m31200| 2015-07-19T23:39:57.593+0000 I SHARDING [conn18] moveChunk data transfer progress: { active: true, ns: "db16.coll16", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.595+0000 m31100| 2015-07-19T23:39:57.594+0000 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.595+0000 m31100| 2015-07-19T23:39:57.595+0000 I SHARDING [migrateThread] Deleter starting delete for: db16.coll16 from { _id: MinKey } -> { _id: 0 }, with opId: 37607 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.595+0000 m31100| 2015-07-19T23:39:57.595+0000 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db16.coll16 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.599+0000 m31101| 2015-07-19T23:39:57.599+0000 I INDEX [repl writer worker 0] build index on: db16.coll16 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.599+0000 m31101| 2015-07-19T23:39:57.599+0000 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.600+0000 m31102| 2015-07-19T23:39:57.599+0000 I INDEX [repl writer worker 12] build index on: db16.coll16 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.600+0000 m31102| 2015-07-19T23:39:57.599+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.601+0000 m31101| 2015-07-19T23:39:57.600+0000 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.602+0000 m31100| 2015-07-19T23:39:57.601+0000 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.602+0000 m31100| 2015-07-19T23:39:57.601+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db16.coll16' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.602+0000 m31102| 2015-07-19T23:39:57.601+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.602+0000 m31200| 2015-07-19T23:39:57.601+0000 I SHARDING [conn18] moveChunk data transfer progress: { active: true, ns: "db16.coll16", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.602+0000 m31200| 2015-07-19T23:39:57.601+0000 I SHARDING [conn18] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.603+0000 m31200| 2015-07-19T23:39:57.601+0000 I SHARDING [conn18] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.603+0000 m31200| 2015-07-19T23:39:57.602+0000 I SHARDING [conn18] moveChunk setting version to: 2|0||55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.611+0000 m31100| 2015-07-19T23:39:57.611+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db16.coll16' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.611+0000 m31100| 2015-07-19T23:39:57.611+0000 I SHARDING [migrateThread] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:57.611+0000-55ac354d68c42881b59cba49", server: "ip-10-139-123-131", clientAddr: "", time: new Date(1437349197611), what: "moveChunk.to", ns: "db16.coll16", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 9, step 2 of 5: 5, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 10, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.662+0000 m31200| 2015-07-19T23:39:57.662+0000 I SHARDING [conn18] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db16.coll16", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.663+0000 m31200| 2015-07-19T23:39:57.662+0000 I SHARDING [conn18] moveChunk updating self version to: 2|1||55ac354dd2c1f750d1548403 through { _id: 0 } -> { _id: MaxKey } for collection 'db16.coll16' [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.663+0000 m31200| 2015-07-19T23:39:57.663+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:57.663+0000-55ac354dd9a63f6196b172b0", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349197663), what: "moveChunk.commit", ns: "db16.coll16", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.714+0000 m31200| 2015-07-19T23:39:57.714+0000 I SHARDING [conn18] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.714+0000 m31200| 2015-07-19T23:39:57.714+0000 I SHARDING [conn18] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.714+0000 m31200| 2015-07-19T23:39:57.714+0000 I SHARDING [conn18] Deleter starting delete for: db16.coll16 from { _id: MinKey } -> { _id: 0 }, with opId: 52315 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.714+0000 m31200| 2015-07-19T23:39:57.714+0000 I SHARDING [conn18] rangeDeleter deleted 0 documents for db16.coll16 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.714+0000 m31200| 2015-07-19T23:39:57.714+0000 I SHARDING [conn18] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.715+0000 m31200| 2015-07-19T23:39:57.714+0000 I SHARDING [conn18] distributed lock 'db16.coll16/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.715+0000 m31200| 2015-07-19T23:39:57.714+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:57.714+0000-55ac354dd9a63f6196b172b1", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349197714), what: "moveChunk.from", ns: "db16.coll16", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 52, step 3 of 6: 1, step 4 of 6: 16, step 5 of 6: 112, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.766+0000 m31200| 2015-07-19T23:39:57.765+0000 I COMMAND [conn18] command db16.coll16 command: moveChunk { moveChunk: "db16.coll16", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", to: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac354dd2c1f750d1548403') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 234ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.766+0000 m30999| 2015-07-19T23:39:57.766+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db16.coll16: 0ms sequenceNumber: 72 version: 2|1||55ac354dd2c1f750d1548403 based on: 1|1||55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.766+0000 m31100| 2015-07-19T23:39:57.766+0000 I SHARDING [conn41] received splitChunk request: { splitChunk: "db16.coll16", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354dd2c1f750d1548403') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.768+0000 m31100| 2015-07-19T23:39:57.767+0000 I SHARDING [conn41] distributed lock 'db16.coll16/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac354d68c42881b59cba4a [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.768+0000 m31100| 2015-07-19T23:39:57.767+0000 I SHARDING [conn41] remotely refreshing metadata for db16.coll16 based on current shard version 0|0||55ac354dd2c1f750d1548403, current metadata version is 1|1||55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.768+0000 m31100| 2015-07-19T23:39:57.768+0000 I SHARDING [conn41] updating metadata for db16.coll16 from shard version 0|0||55ac354dd2c1f750d1548403 to shard version 2|0||55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.768+0000 m31100| 2015-07-19T23:39:57.768+0000 I SHARDING [conn41] collection version was loaded at version 2|1||55ac354dd2c1f750d1548403, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.769+0000 m31100| 2015-07-19T23:39:57.768+0000 I SHARDING [conn41] splitChunk accepted at version 2|0||55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.769+0000 m31100| 2015-07-19T23:39:57.769+0000 I SHARDING [conn41] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:57.769+0000-55ac354d68c42881b59cba4b", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47678", time: new Date(1437349197769), what: "split", ns: "db16.coll16", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('55ac354dd2c1f750d1548403') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('55ac354dd2c1f750d1548403') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.820+0000 m31100| 2015-07-19T23:39:57.820+0000 I SHARDING [conn41] distributed lock 'db16.coll16/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.821+0000 m30999| 2015-07-19T23:39:57.821+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db16.coll16: 0ms sequenceNumber: 73 version: 2|3||55ac354dd2c1f750d1548403 based on: 2|1||55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.821+0000 m31200| 2015-07-19T23:39:57.821+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db16.coll16", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354dd2c1f750d1548403') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.822+0000 m31200| 2015-07-19T23:39:57.822+0000 I SHARDING [conn18] distributed lock 'db16.coll16/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac354dd9a63f6196b172b2 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.822+0000 m31200| 2015-07-19T23:39:57.822+0000 I SHARDING [conn18] remotely refreshing metadata for db16.coll16 based on current shard version 2|0||55ac354dd2c1f750d1548403, current metadata version is 2|0||55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.823+0000 m31200| 2015-07-19T23:39:57.822+0000 I SHARDING [conn18] updating metadata for db16.coll16 from shard version 2|0||55ac354dd2c1f750d1548403 to shard version 2|1||55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.823+0000 m31200| 2015-07-19T23:39:57.822+0000 I SHARDING [conn18] collection version was loaded at version 2|3||55ac354dd2c1f750d1548403, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.823+0000 m31200| 2015-07-19T23:39:57.822+0000 I SHARDING [conn18] splitChunk accepted at version 2|1||55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.823+0000 m31200| 2015-07-19T23:39:57.823+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:57.823+0000-55ac354dd9a63f6196b172b3", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349197823), what: "split", ns: "db16.coll16", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('55ac354dd2c1f750d1548403') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('55ac354dd2c1f750d1548403') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.875+0000 m31200| 2015-07-19T23:39:57.874+0000 I SHARDING [conn18] distributed lock 'db16.coll16/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.875+0000 m30999| 2015-07-19T23:39:57.875+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db16.coll16: 0ms sequenceNumber: 74 version: 2|5||55ac354dd2c1f750d1548403 based on: 2|3||55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.876+0000 m30999| 2015-07-19T23:39:57.876+0000 I SHARDING [conn1] sharded connection to test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.876+0000 m31200| 2015-07-19T23:39:57.876+0000 I NETWORK [conn126] end connection 10.139.123.131:39734 (73 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.876+0000 m30999| 2015-07-19T23:39:57.876+0000 I SHARDING [conn1] retrying command: { listIndexes: "coll16" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.880+0000 m31200| 2015-07-19T23:39:57.880+0000 I INDEX [conn94] build index on: db16.coll16 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.880+0000 m31200| 2015-07-19T23:39:57.880+0000 I INDEX [conn94] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.880+0000 m31100| 2015-07-19T23:39:57.880+0000 I INDEX [conn118] build index on: db16.coll16 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.881+0000 m31100| 2015-07-19T23:39:57.880+0000 I INDEX [conn118] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.883+0000 m31200| 2015-07-19T23:39:57.881+0000 I INDEX [conn94] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.883+0000 m31100| 2015-07-19T23:39:57.881+0000 I INDEX [conn118] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.888+0000 m31100| 2015-07-19T23:39:57.881+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 281ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.900+0000 m31100| 2015-07-19T23:39:57.881+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 281ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.900+0000 m31200| 2015-07-19T23:39:57.881+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 605ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.901+0000 m31200| 2015-07-19T23:39:57.881+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:160 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 604ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.901+0000 m31202| 2015-07-19T23:39:57.884+0000 I INDEX [repl writer worker 4] build index on: db16.coll16 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.901+0000 m31202| 2015-07-19T23:39:57.884+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.901+0000 m31202| 2015-07-19T23:39:57.885+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.901+0000 m31102| 2015-07-19T23:39:57.886+0000 I INDEX [repl writer worker 6] build index on: db16.coll16 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.901+0000 m31102| 2015-07-19T23:39:57.886+0000 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.902+0000 m31201| 2015-07-19T23:39:57.887+0000 I INDEX [repl writer worker 2] build index on: db16.coll16 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.902+0000 m31201| 2015-07-19T23:39:57.887+0000 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.902+0000 m31101| 2015-07-19T23:39:57.888+0000 I INDEX [repl writer worker 10] build index on: db16.coll16 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.902+0000 m31101| 2015-07-19T23:39:57.888+0000 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.902+0000 m31101| 2015-07-19T23:39:57.890+0000 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.902+0000 m31201| 2015-07-19T23:39:57.891+0000 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.903+0000 m31102| 2015-07-19T23:39:57.891+0000 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.903+0000 m31100| 2015-07-19T23:39:57.897+0000 I COMMAND [conn41] CMD: dropIndexes db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.903+0000 m31200| 2015-07-19T23:39:57.898+0000 I COMMAND [conn18] CMD: dropIndexes db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.903+0000 m31101| 2015-07-19T23:39:57.898+0000 I COMMAND [repl writer worker 4] CMD: dropIndexes db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.903+0000 m31102| 2015-07-19T23:39:57.898+0000 I COMMAND [repl writer worker 9] CMD: dropIndexes db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.903+0000 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.906+0000 m31202| 2015-07-19T23:39:57.899+0000 I COMMAND [repl writer worker 15] CMD: dropIndexes db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.965+0000 m30998| 2015-07-19T23:39:57.964+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36089 #89 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.968+0000 m30998| 2015-07-19T23:39:57.967+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36090 #90 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.968+0000 m30999| 2015-07-19T23:39:57.968+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57450 #90 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.987+0000 m30998| 2015-07-19T23:39:57.986+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36092 #91 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.990+0000 m30999| 2015-07-19T23:39:57.989+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57452 #91 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.996+0000 setting random seed: 3243954940699 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.996+0000 setting random seed: 4762329370714 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.996+0000 setting random seed: 5832705837674 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.996+0000 setting random seed: 6463657449930 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:57.997+0000 setting random seed: 8640911472029 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.002+0000 m31200| 2015-07-19T23:39:58.002+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:134 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.003+0000 m31200| 2015-07-19T23:39:58.002+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:147 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 101ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.003+0000 m30998| 2015-07-19T23:39:58.002+0000 I SHARDING [conn90] ChunkManager: time to load chunks for db16.coll16: 0ms sequenceNumber: 22 version: 2|5||55ac354dd2c1f750d1548403 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.003+0000 m31201| 2015-07-19T23:39:58.002+0000 I COMMAND [repl writer worker 14] CMD: dropIndexes db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.011+0000 m31100| 2015-07-19T23:39:58.011+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 110ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.011+0000 m31100| 2015-07-19T23:39:58.011+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:147 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 110ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.073+0000 m30998| 2015-07-19T23:39:58.073+0000 I NETWORK [conn89] end connection 10.139.123.131:36089 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.093+0000 m30998| 2015-07-19T23:39:58.092+0000 I NETWORK [conn90] end connection 10.139.123.131:36090 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.094+0000 m30999| 2015-07-19T23:39:58.094+0000 I NETWORK [conn90] end connection 10.139.123.131:57450 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.109+0000 m30998| 2015-07-19T23:39:58.109+0000 I NETWORK [conn91] end connection 10.139.123.131:36092 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.112+0000 m30999| 2015-07-19T23:39:58.111+0000 I NETWORK [conn91] end connection 10.139.123.131:57452 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.141+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.141+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.141+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.141+0000 jstests/concurrency/fsm_workloads/update_array_noindex.js: Workload completed in 243 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.141+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.141+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.142+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.142+0000 m30999| 2015-07-19T23:39:58.141+0000 I COMMAND [conn1] DROP: db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.142+0000 m30999| 2015-07-19T23:39:58.141+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:58.141+0000-55ac354ed2c1f750d1548405", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349198141), what: "dropCollection.start", ns: "db16.coll16", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.193+0000 m30999| 2015-07-19T23:39:58.193+0000 I SHARDING [conn1] distributed lock 'db16.coll16/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac354ed2c1f750d1548406 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.194+0000 m31100| 2015-07-19T23:39:58.193+0000 I COMMAND [conn127] CMD: drop db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.195+0000 m31200| 2015-07-19T23:39:58.194+0000 I COMMAND [conn14] CMD: drop db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.195+0000 m31200| 2015-07-19T23:39:58.195+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 109ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.195+0000 m31200| 2015-07-19T23:39:58.195+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 109ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.196+0000 m31102| 2015-07-19T23:39:58.196+0000 I COMMAND [repl writer worker 7] CMD: drop db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.197+0000 m31101| 2015-07-19T23:39:58.197+0000 I COMMAND [repl writer worker 12] CMD: drop db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.198+0000 m31201| 2015-07-19T23:39:58.198+0000 I COMMAND [repl writer worker 12] CMD: drop db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.198+0000 m31202| 2015-07-19T23:39:58.198+0000 I COMMAND [repl writer worker 5] CMD: drop db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.247+0000 m31100| 2015-07-19T23:39:58.247+0000 I SHARDING [conn127] remotely refreshing metadata for db16.coll16 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||55ac354dd2c1f750d1548403, current metadata version is 2|3||55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.248+0000 m31100| 2015-07-19T23:39:58.248+0000 W SHARDING [conn127] no chunks found when reloading db16.coll16, previous version was 0|0||55ac354dd2c1f750d1548403, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.248+0000 m31100| 2015-07-19T23:39:58.248+0000 I SHARDING [conn127] dropping metadata for db16.coll16 at shard version 2|3||55ac354dd2c1f750d1548403, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.248+0000 m31200| 2015-07-19T23:39:58.248+0000 I SHARDING [conn14] remotely refreshing metadata for db16.coll16 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||55ac354dd2c1f750d1548403, current metadata version is 2|5||55ac354dd2c1f750d1548403 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.249+0000 m31200| 2015-07-19T23:39:58.249+0000 W SHARDING [conn14] no chunks found when reloading db16.coll16, previous version was 0|0||55ac354dd2c1f750d1548403, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.249+0000 m31200| 2015-07-19T23:39:58.249+0000 I SHARDING [conn14] dropping metadata for db16.coll16 at shard version 2|5||55ac354dd2c1f750d1548403, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.249+0000 m30999| 2015-07-19T23:39:58.249+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:58.249+0000-55ac354ed2c1f750d1548407", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349198249), what: "dropCollection", ns: "db16.coll16", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.300+0000 m30999| 2015-07-19T23:39:58.300+0000 I SHARDING [conn1] distributed lock 'db16.coll16/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.352+0000 m30999| 2015-07-19T23:39:58.352+0000 I COMMAND [conn1] DROP DATABASE: db16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.352+0000 m30999| 2015-07-19T23:39:58.352+0000 I SHARDING [conn1] DBConfig::dropDatabase: db16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.352+0000 m30999| 2015-07-19T23:39:58.352+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:58.352+0000-55ac354ed2c1f750d1548408", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349198352), what: "dropDatabase.start", ns: "db16", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.453+0000 m30999| 2015-07-19T23:39:58.453+0000 I SHARDING [conn1] DBConfig::dropDatabase: db16 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.454+0000 m31200| 2015-07-19T23:39:58.453+0000 I COMMAND [conn111] dropDatabase db16 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.454+0000 m31200| 2015-07-19T23:39:58.453+0000 I COMMAND [conn111] dropDatabase db16 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.454+0000 m31200| 2015-07-19T23:39:58.454+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:109 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 52 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 253ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.455+0000 m30999| 2015-07-19T23:39:58.454+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:58.454+0000-55ac354ed2c1f750d1548409", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349198454), what: "dropDatabase", ns: "db16", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.455+0000 m31200| 2015-07-19T23:39:58.454+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:109 locks:{ Global: { acquireCount: { r: 4 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 93 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 254ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.455+0000 m31201| 2015-07-19T23:39:58.454+0000 I COMMAND [repl writer worker 15] dropDatabase db16 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.455+0000 m31201| 2015-07-19T23:39:58.454+0000 I COMMAND [repl writer worker 15] dropDatabase db16 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.455+0000 m31202| 2015-07-19T23:39:58.454+0000 I COMMAND [repl writer worker 14] dropDatabase db16 starting [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.455+0000 m31202| 2015-07-19T23:39:58.454+0000 I COMMAND [repl writer worker 14] dropDatabase db16 finished [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.510+0000 m31100| 2015-07-19T23:39:58.509+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 310ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.510+0000 m31100| 2015-07-19T23:39:58.509+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:116 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 310ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.515+0000 m31100| 2015-07-19T23:39:58.515+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.518+0000 m31101| 2015-07-19T23:39:58.518+0000 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.519+0000 m31102| 2015-07-19T23:39:58.518+0000 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.525+0000 m31200| 2015-07-19T23:39:58.525+0000 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.526+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.526+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.526+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.527+0000 jstests/concurrency/fsm_workloads/agg_base.js [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.527+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.527+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.527+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.528+0000 m31201| 2015-07-19T23:39:58.528+0000 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.528+0000 m31202| 2015-07-19T23:39:58.528+0000 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.529+0000 m30999| 2015-07-19T23:39:58.529+0000 I SHARDING [conn1] distributed lock 'db17/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac354ed2c1f750d154840a [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.531+0000 m30999| 2015-07-19T23:39:58.531+0000 I SHARDING [conn1] Placing [db17] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.531+0000 m30999| 2015-07-19T23:39:58.531+0000 I SHARDING [conn1] Enabling sharding for database [db17] in config db [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.582+0000 m30999| 2015-07-19T23:39:58.582+0000 I SHARDING [conn1] distributed lock 'db17/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.588+0000 m31200| 2015-07-19T23:39:58.588+0000 I INDEX [conn113] build index on: db17.coll17 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db17.coll17" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.589+0000 m31200| 2015-07-19T23:39:58.588+0000 I INDEX [conn113] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.590+0000 m31200| 2015-07-19T23:39:58.590+0000 I INDEX [conn113] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.591+0000 m30999| 2015-07-19T23:39:58.590+0000 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db17.coll17", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.592+0000 m30999| 2015-07-19T23:39:58.591+0000 I SHARDING [conn1] distributed lock 'db17.coll17/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac354ed2c1f750d154840b [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.592+0000 m30999| 2015-07-19T23:39:58.592+0000 I SHARDING [conn1] enable sharding on: db17.coll17 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.593+0000 m30999| 2015-07-19T23:39:58.592+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:58.592+0000-55ac354ed2c1f750d154840c", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349198592), what: "shardCollection.start", ns: "db17.coll17", details: { shardKey: { _id: "hashed" }, collection: "db17.coll17", primary: "test-rs1:test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.594+0000 m31201| 2015-07-19T23:39:58.594+0000 I INDEX [repl writer worker 4] build index on: db17.coll17 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db17.coll17" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.594+0000 m31201| 2015-07-19T23:39:58.594+0000 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.594+0000 m31202| 2015-07-19T23:39:58.594+0000 I INDEX [repl writer worker 12] build index on: db17.coll17 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db17.coll17" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.595+0000 m31202| 2015-07-19T23:39:58.594+0000 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.595+0000 m31201| 2015-07-19T23:39:58.595+0000 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.595+0000 m31202| 2015-07-19T23:39:58.595+0000 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.643+0000 m30999| 2015-07-19T23:39:58.643+0000 I SHARDING [conn1] going to create 2 chunk(s) for: db17.coll17 using new epoch 55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.745+0000 m30999| 2015-07-19T23:39:58.745+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db17.coll17: 0ms sequenceNumber: 75 version: 1|1||55ac354ed2c1f750d154840d based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.796+0000 m30999| 2015-07-19T23:39:58.796+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db17.coll17: 0ms sequenceNumber: 76 version: 1|1||55ac354ed2c1f750d154840d based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.797+0000 m31200| 2015-07-19T23:39:58.797+0000 I SHARDING [conn94] remotely refreshing metadata for db17.coll17 with requested shard version 1|1||55ac354ed2c1f750d154840d, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.797+0000 m31200| 2015-07-19T23:39:58.797+0000 I SHARDING [conn94] collection db17.coll17 was previously unsharded, new metadata loaded with shard version 1|1||55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.797+0000 m31200| 2015-07-19T23:39:58.797+0000 I SHARDING [conn94] collection version was loaded at version 1|1||55ac354ed2c1f750d154840d, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.798+0000 m30999| 2015-07-19T23:39:58.797+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:58.797+0000-55ac354ed2c1f750d154840e", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349198797), what: "shardCollection", ns: "db17.coll17", details: { version: "1|1||55ac354ed2c1f750d154840d" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.848+0000 m30999| 2015-07-19T23:39:58.848+0000 I SHARDING [conn1] distributed lock 'db17.coll17/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.849+0000 m30999| 2015-07-19T23:39:58.849+0000 I SHARDING [conn1] moving chunk ns: db17.coll17 moving ( ns: db17.coll17, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.849+0000 m31200| 2015-07-19T23:39:58.849+0000 I SHARDING [conn18] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.850+0000 m31200| 2015-07-19T23:39:58.849+0000 I SHARDING [conn18] received moveChunk request: { moveChunk: "db17.coll17", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", to: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac354ed2c1f750d154840d') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.850+0000 m31200| 2015-07-19T23:39:58.850+0000 I SHARDING [conn18] distributed lock 'db17.coll17/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac354ed9a63f6196b172b5 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.850+0000 m31200| 2015-07-19T23:39:58.850+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:58.850+0000-55ac354ed9a63f6196b172b6", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349198850), what: "moveChunk.start", ns: "db17.coll17", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.901+0000 m31200| 2015-07-19T23:39:58.901+0000 I SHARDING [conn18] remotely refreshing metadata for db17.coll17 based on current shard version 1|1||55ac354ed2c1f750d154840d, current metadata version is 1|1||55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.902+0000 m31200| 2015-07-19T23:39:58.901+0000 I SHARDING [conn18] metadata of collection db17.coll17 already up to date (shard version : 1|1||55ac354ed2c1f750d154840d, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.902+0000 m31200| 2015-07-19T23:39:58.902+0000 I SHARDING [conn18] moveChunk request accepted at version 1|1||55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.902+0000 m31200| 2015-07-19T23:39:58.902+0000 I SHARDING [conn18] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.902+0000 m31100| 2015-07-19T23:39:58.902+0000 I SHARDING [conn19] remotely refreshing metadata for db17.coll17, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.903+0000 m31100| 2015-07-19T23:39:58.903+0000 I SHARDING [conn19] collection db17.coll17 was previously unsharded, new metadata loaded with shard version 0|0||55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.903+0000 m31100| 2015-07-19T23:39:58.903+0000 I SHARDING [conn19] collection version was loaded at version 1|1||55ac354ed2c1f750d154840d, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.903+0000 m31100| 2015-07-19T23:39:58.903+0000 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db17.coll17 from test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202 at epoch 55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.904+0000 m31200| 2015-07-19T23:39:58.904+0000 I SHARDING [conn18] moveChunk data transfer progress: { active: true, ns: "db17.coll17", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.905+0000 m31100| 2015-07-19T23:39:58.905+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:110 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 384ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.906+0000 m31100| 2015-07-19T23:39:58.905+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:110 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 385ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.908+0000 m31200| 2015-07-19T23:39:58.906+0000 I SHARDING [conn18] moveChunk data transfer progress: { active: true, ns: "db17.coll17", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.909+0000 m31100| 2015-07-19T23:39:58.908+0000 I INDEX [migrateThread] build index on: db17.coll17 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db17.coll17" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.909+0000 m31100| 2015-07-19T23:39:58.908+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.910+0000 m31100| 2015-07-19T23:39:58.910+0000 I INDEX [migrateThread] build index on: db17.coll17 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db17.coll17" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.910+0000 m31100| 2015-07-19T23:39:58.910+0000 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.911+0000 m31200| 2015-07-19T23:39:58.911+0000 I SHARDING [conn18] moveChunk data transfer progress: { active: true, ns: "db17.coll17", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.914+0000 m31100| 2015-07-19T23:39:58.913+0000 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.914+0000 m31100| 2015-07-19T23:39:58.914+0000 I SHARDING [migrateThread] Deleter starting delete for: db17.coll17 from { _id: MinKey } -> { _id: 0 }, with opId: 37789 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.914+0000 m31100| 2015-07-19T23:39:58.914+0000 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db17.coll17 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.916+0000 m31101| 2015-07-19T23:39:58.916+0000 I INDEX [repl writer worker 11] build index on: db17.coll17 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db17.coll17" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.916+0000 m31101| 2015-07-19T23:39:58.916+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.918+0000 m31101| 2015-07-19T23:39:58.917+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.918+0000 m31100| 2015-07-19T23:39:58.918+0000 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.918+0000 m31100| 2015-07-19T23:39:58.918+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db17.coll17' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.919+0000 m31200| 2015-07-19T23:39:58.919+0000 I SHARDING [conn18] moveChunk data transfer progress: { active: true, ns: "db17.coll17", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.920+0000 m31200| 2015-07-19T23:39:58.919+0000 I SHARDING [conn18] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.920+0000 m31102| 2015-07-19T23:39:58.919+0000 I INDEX [repl writer worker 11] build index on: db17.coll17 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db17.coll17" } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.920+0000 m31102| 2015-07-19T23:39:58.919+0000 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.920+0000 m31200| 2015-07-19T23:39:58.919+0000 I SHARDING [conn18] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.921+0000 m31200| 2015-07-19T23:39:58.920+0000 I SHARDING [conn18] moveChunk setting version to: 2|0||55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.921+0000 m31102| 2015-07-19T23:39:58.920+0000 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.929+0000 m31100| 2015-07-19T23:39:58.929+0000 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db17.coll17' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.929+0000 m31100| 2015-07-19T23:39:58.929+0000 I SHARDING [migrateThread] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:58.929+0000-55ac354e68c42881b59cba4c", server: "ip-10-139-123-131", clientAddr: "", time: new Date(1437349198929), what: "moveChunk.to", ns: "db17.coll17", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 10, step 2 of 5: 4, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 10, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.980+0000 m31200| 2015-07-19T23:39:58.979+0000 I SHARDING [conn18] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db17.coll17", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.980+0000 m31200| 2015-07-19T23:39:58.980+0000 I SHARDING [conn18] moveChunk updating self version to: 2|1||55ac354ed2c1f750d154840d through { _id: 0 } -> { _id: MaxKey } for collection 'db17.coll17' [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:58.981+0000 m31200| 2015-07-19T23:39:58.980+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:58.980+0000-55ac354ed9a63f6196b172b7", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349198980), what: "moveChunk.commit", ns: "db17.coll17", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.031+0000 m31200| 2015-07-19T23:39:59.031+0000 I SHARDING [conn18] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.032+0000 m31200| 2015-07-19T23:39:59.031+0000 I SHARDING [conn18] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.032+0000 m31200| 2015-07-19T23:39:59.031+0000 I SHARDING [conn18] Deleter starting delete for: db17.coll17 from { _id: MinKey } -> { _id: 0 }, with opId: 52588 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.032+0000 m31200| 2015-07-19T23:39:59.031+0000 I SHARDING [conn18] rangeDeleter deleted 0 documents for db17.coll17 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.032+0000 m31200| 2015-07-19T23:39:59.031+0000 I SHARDING [conn18] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.032+0000 m31200| 2015-07-19T23:39:59.032+0000 I SHARDING [conn18] distributed lock 'db17.coll17/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.032+0000 m31200| 2015-07-19T23:39:59.032+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:59.032+0000-55ac354fd9a63f6196b172b8", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349199032), what: "moveChunk.from", ns: "db17.coll17", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 52, step 3 of 6: 1, step 4 of 6: 16, step 5 of 6: 112, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.083+0000 m31200| 2015-07-19T23:39:59.083+0000 I COMMAND [conn18] command db17.coll17 command: moveChunk { moveChunk: "db17.coll17", from: "test-rs1/ip-10-139-123-131:31200,ip-10-139-123-131:31201,ip-10-139-123-131:31202", to: "test-rs0/ip-10-139-123-131:31100,ip-10-139-123-131:31101,ip-10-139-123-131:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/ip-10-139-123-131:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('55ac354ed2c1f750d154840d') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 233ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.083+0000 m30999| 2015-07-19T23:39:59.083+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db17.coll17: 0ms sequenceNumber: 77 version: 2|1||55ac354ed2c1f750d154840d based on: 1|1||55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.084+0000 m31100| 2015-07-19T23:39:59.084+0000 I SHARDING [conn41] received splitChunk request: { splitChunk: "db17.coll17", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354ed2c1f750d154840d') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.085+0000 m31100| 2015-07-19T23:39:59.085+0000 I SHARDING [conn41] distributed lock 'db17.coll17/ip-10-139-123-131:31100:1437349130:1993228155' acquired, ts : 55ac354f68c42881b59cba4d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.085+0000 m31100| 2015-07-19T23:39:59.085+0000 I SHARDING [conn41] remotely refreshing metadata for db17.coll17 based on current shard version 0|0||55ac354ed2c1f750d154840d, current metadata version is 1|1||55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.086+0000 m31100| 2015-07-19T23:39:59.085+0000 I SHARDING [conn41] updating metadata for db17.coll17 from shard version 0|0||55ac354ed2c1f750d154840d to shard version 2|0||55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.086+0000 m31100| 2015-07-19T23:39:59.085+0000 I SHARDING [conn41] collection version was loaded at version 2|1||55ac354ed2c1f750d154840d, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.086+0000 m31100| 2015-07-19T23:39:59.085+0000 I SHARDING [conn41] splitChunk accepted at version 2|0||55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.086+0000 m31100| 2015-07-19T23:39:59.086+0000 I SHARDING [conn41] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:59.086+0000-55ac354f68c42881b59cba4e", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:47678", time: new Date(1437349199086), what: "split", ns: "db17.coll17", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('55ac354ed2c1f750d154840d') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('55ac354ed2c1f750d154840d') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.137+0000 m31100| 2015-07-19T23:39:59.137+0000 I SHARDING [conn41] distributed lock 'db17.coll17/ip-10-139-123-131:31100:1437349130:1993228155' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.138+0000 m30999| 2015-07-19T23:39:59.138+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db17.coll17: 0ms sequenceNumber: 78 version: 2|3||55ac354ed2c1f750d154840d based on: 2|1||55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.138+0000 m31200| 2015-07-19T23:39:59.138+0000 I SHARDING [conn18] received splitChunk request: { splitChunk: "db17.coll17", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/ip-10-139-123-131:29000", epoch: ObjectId('55ac354ed2c1f750d154840d') } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.139+0000 m31200| 2015-07-19T23:39:59.139+0000 I SHARDING [conn18] distributed lock 'db17.coll17/ip-10-139-123-131:31200:1437349131:182555922' acquired, ts : 55ac354fd9a63f6196b172b9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.140+0000 m31200| 2015-07-19T23:39:59.139+0000 I SHARDING [conn18] remotely refreshing metadata for db17.coll17 based on current shard version 2|0||55ac354ed2c1f750d154840d, current metadata version is 2|0||55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.140+0000 m31200| 2015-07-19T23:39:59.140+0000 I SHARDING [conn18] updating metadata for db17.coll17 from shard version 2|0||55ac354ed2c1f750d154840d to shard version 2|1||55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.140+0000 m31200| 2015-07-19T23:39:59.140+0000 I SHARDING [conn18] collection version was loaded at version 2|3||55ac354ed2c1f750d154840d, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.140+0000 m31200| 2015-07-19T23:39:59.140+0000 I SHARDING [conn18] splitChunk accepted at version 2|1||55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.141+0000 m31200| 2015-07-19T23:39:59.140+0000 I SHARDING [conn18] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:39:59.140+0000-55ac354fd9a63f6196b172ba", server: "ip-10-139-123-131", clientAddr: "10.139.123.131:39392", time: new Date(1437349199140), what: "split", ns: "db17.coll17", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('55ac354ed2c1f750d154840d') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('55ac354ed2c1f750d154840d') } } } [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.192+0000 m31200| 2015-07-19T23:39:59.191+0000 I SHARDING [conn18] distributed lock 'db17.coll17/ip-10-139-123-131:31200:1437349131:182555922' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.192+0000 m30999| 2015-07-19T23:39:59.192+0000 I SHARDING [conn1] ChunkManager: time to load chunks for db17.coll17: 0ms sequenceNumber: 79 version: 2|5||55ac354ed2c1f750d154840d based on: 2|3||55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.323+0000 m31100| 2015-07-19T23:39:59.323+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 407ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.324+0000 m31100| 2015-07-19T23:39:59.323+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 404ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.349+0000 m31200| 2015-07-19T23:39:59.349+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 754ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.350+0000 m31200| 2015-07-19T23:39:59.349+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 754ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.601+0000 m31200| 2015-07-19T23:39:59.601+0000 I COMMAND [conn113] command db17.$cmd command: insert { insert: "coll17", documents: 509, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('55ac354ed2c1f750d154840d') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 522, w: 522 } }, Database: { acquireCount: { w: 522 } }, Collection: { acquireCount: { w: 13 } }, Metadata: { acquireCount: { w: 509 } }, oplog: { acquireCount: { w: 509 } } } protocol:op_command 254ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.635+0000 m31100| 2015-07-19T23:39:59.634+0000 I COMMAND [conn26] command db17.$cmd command: insert { insert: "coll17", documents: 491, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('55ac354ed2c1f750d154840d') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 506, w: 506 } }, Database: { acquireCount: { w: 506 } }, Collection: { acquireCount: { w: 15 } }, Metadata: { acquireCount: { w: 491 } }, oplog: { acquireCount: { w: 491 } } } protocol:op_command 313ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.862+0000 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.923+0000 m30999| 2015-07-19T23:39:59.923+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57453 #92 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.924+0000 m30999| 2015-07-19T23:39:59.923+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57454 #93 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.925+0000 m30999| 2015-07-19T23:39:59.925+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:57455 #94 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.942+0000 m30998| 2015-07-19T23:39:59.941+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36097 #92 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.944+0000 m30998| 2015-07-19T23:39:59.943+0000 I NETWORK [mongosMain] connection accepted from 10.139.123.131:36098 #93 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.947+0000 setting random seed: 6511298846453 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.949+0000 setting random seed: 603994526900 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.949+0000 setting random seed: 3654719530604 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.949+0000 setting random seed: 8331287312321 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.949+0000 setting random seed: 8996855211444 [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.950+0000 m30998| 2015-07-19T23:39:59.950+0000 I SHARDING [conn93] ChunkManager: time to load chunks for db17.coll17: 0ms sequenceNumber: 23 version: 2|5||55ac354ed2c1f750d154840d based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.951+0000 m31101| 2015-07-19T23:39:59.950+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46956 #11 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.951+0000 m31101| 2015-07-19T23:39:59.950+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46957 #12 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.951+0000 m31101| 2015-07-19T23:39:59.951+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46958 #13 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.951+0000 m31102| 2015-07-19T23:39:59.951+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:37744 #11 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.952+0000 m31202| 2015-07-19T23:39:59.952+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:42263 #11 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.953+0000 m31101| 2015-07-19T23:39:59.953+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46962 #14 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.953+0000 m31201| 2015-07-19T23:39:59.953+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:36230 #11 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.953+0000 m31202| 2015-07-19T23:39:59.953+0000 I SHARDING [conn11] first cluster operation detected, adding sharding hook to enable versioning and authentication to remote servers [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.953+0000 m31201| 2015-07-19T23:39:59.953+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:36232 #12 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.954+0000 m31101| 2015-07-19T23:39:59.953+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46964 #15 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.954+0000 m31201| 2015-07-19T23:39:59.953+0000 I SHARDING [conn11] first cluster operation detected, adding sharding hook to enable versioning and authentication to remote servers [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.954+0000 m31101| 2015-07-19T23:39:59.954+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46965 #16 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.954+0000 m31202| 2015-07-19T23:39:59.954+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:42269 #12 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.955+0000 m31202| 2015-07-19T23:39:59.954+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:42270 #13 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.955+0000 m31201| 2015-07-19T23:39:59.955+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:36237 #13 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.956+0000 m31201| 2015-07-19T23:39:59.956+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:36238 #14 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.960+0000 m31102| 2015-07-19T23:39:59.959+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:37755 #12 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.963+0000 m31101| 2015-07-19T23:39:59.963+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46971 #17 (15 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.970+0000 m31202| 2015-07-19T23:39:59.970+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:42275 #14 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.970+0000 m31201| 2015-07-19T23:39:59.970+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:36242 #15 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.972+0000 m31101| 2015-07-19T23:39:59.971+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:46974 #18 (16 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:39:59.987+0000 m31201| 2015-07-19T23:39:59.987+0000 I NETWORK [initandlisten] connection accepted from 10.139.123.131:36244 #16 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.045+0000 m30999| 2015-07-19T23:40:00.044+0000 I NETWORK [conn94] end connection 10.139.123.131:57455 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.048+0000 m30998| 2015-07-19T23:40:00.048+0000 I NETWORK [conn92] end connection 10.139.123.131:36097 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.074+0000 m30998| 2015-07-19T23:40:00.074+0000 I NETWORK [conn93] end connection 10.139.123.131:36098 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.094+0000 m30999| 2015-07-19T23:40:00.093+0000 I NETWORK [conn92] end connection 10.139.123.131:57453 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.098+0000 m30999| 2015-07-19T23:40:00.098+0000 I NETWORK [conn93] end connection 10.139.123.131:57454 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.113+0000 m30999| 2015-07-19T23:40:00.113+0000 I COMMAND [conn1] DROP: db17.coll17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.114+0000 m30999| 2015-07-19T23:40:00.113+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:40:00.113+0000-55ac3550d2c1f750d154840f", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349200113), what: "dropCollection.start", ns: "db17.coll17", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.131+0000 m30999| 2015-07-19T23:40:00.130+0000 I SHARDING [conn1] distributed lock 'db17.coll17/ip-10-139-123-131:30999:1437349128:1804289383' acquired, ts : 55ac3550d2c1f750d1548410 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.131+0000 m31100| 2015-07-19T23:40:00.131+0000 I COMMAND [conn127] CMD: drop db17.coll17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.131+0000 m31100| 2015-07-19T23:40:00.131+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 495ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.132+0000 m31100| 2015-07-19T23:40:00.131+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 494ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.132+0000 m31200| 2015-07-19T23:40:00.132+0000 I COMMAND [conn14] CMD: drop db17.coll17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.132+0000 m31200| 2015-07-19T23:40:00.132+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:12105 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 532ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.133+0000 m31200| 2015-07-19T23:40:00.132+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:12105 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 532ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.134+0000 m31101| 2015-07-19T23:40:00.134+0000 I COMMAND [repl writer worker 4] CMD: drop db17.coll17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.135+0000 m31102| 2015-07-19T23:40:00.134+0000 I COMMAND [repl writer worker 12] CMD: drop db17.coll17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.136+0000 m31202| 2015-07-19T23:40:00.135+0000 I COMMAND [repl writer worker 2] CMD: drop db17.coll17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.136+0000 m31201| 2015-07-19T23:40:00.135+0000 I COMMAND [repl writer worker 6] CMD: drop db17.coll17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.185+0000 m31100| 2015-07-19T23:40:00.184+0000 I SHARDING [conn127] remotely refreshing metadata for db17.coll17 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||55ac354ed2c1f750d154840d, current metadata version is 2|3||55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.185+0000 m31100| 2015-07-19T23:40:00.185+0000 W SHARDING [conn127] no chunks found when reloading db17.coll17, previous version was 0|0||55ac354ed2c1f750d154840d, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.185+0000 m31100| 2015-07-19T23:40:00.185+0000 I SHARDING [conn127] dropping metadata for db17.coll17 at shard version 2|3||55ac354ed2c1f750d154840d, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.186+0000 m31200| 2015-07-19T23:40:00.185+0000 I SHARDING [conn14] remotely refreshing metadata for db17.coll17 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||55ac354ed2c1f750d154840d, current metadata version is 2|5||55ac354ed2c1f750d154840d [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.186+0000 m31200| 2015-07-19T23:40:00.186+0000 W SHARDING [conn14] no chunks found when reloading db17.coll17, previous version was 0|0||55ac354ed2c1f750d154840d, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.186+0000 m31200| 2015-07-19T23:40:00.186+0000 I SHARDING [conn14] dropping metadata for db17.coll17 at shard version 2|5||55ac354ed2c1f750d154840d, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.186+0000 m30999| 2015-07-19T23:40:00.186+0000 I SHARDING [conn1] about to log metadata event: { _id: "ip-10-139-123-131-2015-07-19T23:40:00.186+0000-55ac3550d2c1f750d1548411", server: "ip-10-139-123-131", clientAddr: "127.0.0.1:47275", time: new Date(1437349200186), what: "dropCollection", ns: "db17.coll17", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.237+0000 m30999| 2015-07-19T23:40:00.237+0000 I SHARDING [conn1] distributed lock 'db17.coll17/ip-10-139-123-131:30999:1437349128:1804289383' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.288+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.289+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.289+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.289+0000 jstests/concurrency/fsm_workloads/agg_base.js: Workload completed in 251 ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.289+0000 ---- [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.289+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.289+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.291+0000 m30999| 2015-07-19T23:40:00.291+0000 I CONTROL [signalProcessingThread] got signal 15 (Terminated), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.291+0000 m30999| 2015-07-19T23:40:00.291+0000 W SHARDING [LockPinger] removing distributed lock ping thread 'test-configRS/ip-10-139-123-131:29000/ip-10-139-123-131:30999:1437349128:1804289383' [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.291+0000 m30999| 2015-07-19T23:40:00.291+0000 I NETWORK [LockPinger] scoped connection to test-configRS/ip-10-139-123-131:29000 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.291+0000 m30999| 2015-07-19T23:40:00.291+0000 I SHARDING [signalProcessingThread] dbexit: rc:0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.291+0000 m29000| 2015-07-19T23:40:00.291+0000 I NETWORK [conn4] end connection 10.139.123.131:55237 (41 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.297+0000 m29000| 2015-07-19T23:40:00.293+0000 I NETWORK [conn3] end connection 10.139.123.131:55236 (40 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.297+0000 m31102| 2015-07-19T23:40:00.295+0000 I NETWORK [conn7] end connection 10.139.123.131:37324 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.297+0000 m31200| 2015-07-19T23:40:00.294+0000 I NETWORK [conn18] end connection 10.139.123.131:39392 (72 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.298+0000 m31200| 2015-07-19T23:40:00.294+0000 I NETWORK [conn98] end connection 10.139.123.131:39669 (71 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.298+0000 m31200| 2015-07-19T23:40:00.294+0000 I NETWORK [conn111] end connection 10.139.123.131:39683 (71 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.298+0000 m31200| 2015-07-19T23:40:00.294+0000 I NETWORK [conn90] end connection 10.139.123.131:39661 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.298+0000 m31200| 2015-07-19T23:40:00.294+0000 I NETWORK [conn30] end connection 10.139.123.131:39481 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.298+0000 m31200| 2015-07-19T23:40:00.294+0000 I NETWORK [conn19] end connection 10.139.123.131:39396 (67 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.298+0000 m31200| 2015-07-19T23:40:00.295+0000 I NETWORK [conn93] end connection 10.139.123.131:39664 (66 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.298+0000 m31200| 2015-07-19T23:40:00.295+0000 I NETWORK [conn94] end connection 10.139.123.131:39665 (65 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.298+0000 m31200| 2015-07-19T23:40:00.295+0000 I NETWORK [conn112] end connection 10.139.123.131:39694 (64 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.298+0000 m31200| 2015-07-19T23:40:00.295+0000 I NETWORK [conn28] end connection 10.139.123.131:39471 (63 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.299+0000 m31200| 2015-07-19T23:40:00.295+0000 I NETWORK [conn113] end connection 10.139.123.131:39695 (62 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.299+0000 m31200| 2015-07-19T23:40:00.295+0000 I NETWORK [conn97] end connection 10.139.123.131:39668 (61 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.299+0000 m31200| 2015-07-19T23:40:00.295+0000 I NETWORK [conn13] end connection 10.139.123.131:39369 (66 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.299+0000 m31200| 2015-07-19T23:40:00.296+0000 I NETWORK [conn131] end connection 10.139.123.131:39756 (59 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.299+0000 m31200| 2015-07-19T23:40:00.296+0000 I NETWORK [conn116] end connection 10.139.123.131:39700 (59 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.299+0000 m31200| 2015-07-19T23:40:00.296+0000 I NETWORK [conn133] end connection 10.139.123.131:39758 (57 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.299+0000 m31200| 2015-07-19T23:40:00.296+0000 I NETWORK [conn117] end connection 10.139.123.131:39721 (56 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.299+0000 m31200| 2015-07-19T23:40:00.296+0000 I NETWORK [conn119] end connection 10.139.123.131:39725 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.299+0000 m29000| 2015-07-19T23:40:00.294+0000 I NETWORK [conn17] end connection 10.139.123.131:55262 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.300+0000 m29000| 2015-07-19T23:40:00.294+0000 I NETWORK [conn8] end connection 10.139.123.131:55241 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.300+0000 m29000| 2015-07-19T23:40:00.294+0000 I NETWORK [conn5] end connection 10.139.123.131:55238 (37 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.300+0000 m29000| 2015-07-19T23:40:00.294+0000 I NETWORK [conn6] end connection 10.139.123.131:55239 (36 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.300+0000 m29000| 2015-07-19T23:40:00.294+0000 I NETWORK [conn9] end connection 10.139.123.131:55242 (35 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.300+0000 m29000| 2015-07-19T23:40:00.295+0000 I NETWORK [conn33] end connection 10.139.123.131:55354 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.300+0000 m29000| 2015-07-19T23:40:00.295+0000 I NETWORK [conn32] end connection 10.139.123.131:55348 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.300+0000 m29000| 2015-07-19T23:40:00.296+0000 I NETWORK [conn40] end connection 10.139.123.131:55701 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.300+0000 m29000| 2015-07-19T23:40:00.296+0000 I NETWORK [conn7] end connection 10.139.123.131:55240 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.300+0000 m31201| 2015-07-19T23:40:00.294+0000 I NETWORK [conn6] end connection 10.139.123.131:35733 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.300+0000 m31100| 2015-07-19T23:40:00.294+0000 I NETWORK [conn12] end connection 10.139.123.131:47531 (75 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.300+0000 m31100| 2015-07-19T23:40:00.294+0000 I NETWORK [conn27] end connection 10.139.123.131:47580 (74 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.301+0000 m31100| 2015-07-19T23:40:00.294+0000 I NETWORK [conn11] end connection 10.139.123.131:47530 (74 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.301+0000 m31100| 2015-07-19T23:40:00.294+0000 I NETWORK [conn33] end connection 10.139.123.131:47643 (72 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.301+0000 m31101| 2015-07-19T23:40:00.294+0000 I NETWORK [conn6] end connection 10.139.123.131:46461 (15 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.301+0000 m31100| 2015-07-19T23:40:00.294+0000 I NETWORK [conn16] end connection 10.139.123.131:47538 (72 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.301+0000 m29000| 2015-07-19T23:40:00.297+0000 I NETWORK [conn34] end connection 10.139.123.131:55389 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.301+0000 m31100| 2015-07-19T23:40:00.294+0000 I NETWORK [conn23] end connection 10.139.123.131:47575 (70 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.301+0000 m31100| 2015-07-19T23:40:00.295+0000 I NETWORK [conn15] end connection 10.139.123.131:47537 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.301+0000 m31100| 2015-07-19T23:40:00.295+0000 I NETWORK [conn34] end connection 10.139.123.131:47644 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.301+0000 m31100| 2015-07-19T23:40:00.295+0000 I NETWORK [conn26] end connection 10.139.123.131:47579 (67 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.301+0000 m31100| 2015-07-19T23:40:00.295+0000 I NETWORK [conn42] end connection 10.139.123.131:47679 (66 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.301+0000 m31100| 2015-07-19T23:40:00.295+0000 I NETWORK [conn57] end connection 10.139.123.131:47694 (66 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.302+0000 m31100| 2015-07-19T23:40:00.295+0000 I NETWORK [conn38] end connection 10.139.123.131:47651 (64 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.302+0000 m31100| 2015-07-19T23:40:00.295+0000 I NETWORK [conn45] end connection 10.139.123.131:47682 (64 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.302+0000 m31100| 2015-07-19T23:40:00.295+0000 I NETWORK [conn43] end connection 10.139.123.131:47680 (62 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.302+0000 m31100| 2015-07-19T23:40:00.295+0000 I NETWORK [conn51] end connection 10.139.123.131:47688 (61 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.302+0000 m31100| 2015-07-19T23:40:00.296+0000 I NETWORK [conn125] end connection 10.139.123.131:47906 (60 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.302+0000 m31100| 2015-07-19T23:40:00.296+0000 I NETWORK [conn49] end connection 10.139.123.131:47686 (60 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.302+0000 m31100| 2015-07-19T23:40:00.296+0000 I NETWORK [conn52] end connection 10.139.123.131:47689 (59 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.302+0000 m31100| 2015-07-19T23:40:00.294+0000 I NETWORK [conn117] end connection 10.139.123.131:47886 (72 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.302+0000 m31100| 2015-07-19T23:40:00.296+0000 I NETWORK [conn40] end connection 10.139.123.131:47677 (56 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.302+0000 m31100| 2015-07-19T23:40:00.296+0000 I NETWORK [conn116] end connection 10.139.123.131:47861 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.302+0000 m31100| 2015-07-19T23:40:00.296+0000 I NETWORK [conn41] end connection 10.139.123.131:47678 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.303+0000 m31100| 2015-07-19T23:40:00.296+0000 I NETWORK [conn115] end connection 10.139.123.131:47859 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.303+0000 m31100| 2015-07-19T23:40:00.296+0000 I NETWORK [conn126] end connection 10.139.123.131:47907 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.303+0000 m31100| 2015-07-19T23:40:00.296+0000 I NETWORK [conn118] end connection 10.139.123.131:47888 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.303+0000 m31200| 2015-07-19T23:40:00.296+0000 I NETWORK [conn118] end connection 10.139.123.131:39723 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.303+0000 m31200| 2015-07-19T23:40:00.295+0000 I NETWORK [conn33] end connection 10.139.123.131:39488 (62 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.303+0000 m31200| 2015-07-19T23:40:00.295+0000 I NETWORK [conn130] end connection 10.139.123.131:39744 (63 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.303+0000 m31200| 2015-07-19T23:40:00.297+0000 I NETWORK [conn14] end connection 10.139.123.131:39370 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.303+0000 m31200| 2015-07-19T23:40:00.297+0000 I NETWORK [conn123] end connection 10.139.123.131:39731 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.303+0000 m31100| 2015-07-19T23:40:00.297+0000 I NETWORK [conn127] end connection 10.139.123.131:47929 (50 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.303+0000 m31202| 2015-07-19T23:40:00.295+0000 I NETWORK [conn7] end connection 10.139.123.131:41843 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.303+0000 m31101| 2015-07-19T23:40:00.297+0000 I NETWORK [conn11] end connection 10.139.123.131:46956 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.304+0000 m31100| 2015-07-19T23:40:00.297+0000 I NETWORK [conn120] end connection 10.139.123.131:47894 (49 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.304+0000 m31100| 2015-07-19T23:40:00.297+0000 I NETWORK [conn119] end connection 10.139.123.131:47893 (49 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.304+0000 m31200| 2015-07-19T23:40:00.298+0000 I NETWORK [conn127] end connection 10.139.123.131:39736 (49 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.304+0000 m31200| 2015-07-19T23:40:00.298+0000 I NETWORK [conn128] end connection 10.139.123.131:39737 (49 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.304+0000 m31100| 2015-07-19T23:40:00.298+0000 I NETWORK [conn122] end connection 10.139.123.131:47903 (47 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.304+0000 m31101| 2015-07-19T23:40:00.298+0000 I NETWORK [conn12] end connection 10.139.123.131:46957 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.304+0000 m31200| 2015-07-19T23:40:00.298+0000 I NETWORK [conn142] end connection 10.139.123.131:39793 (47 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.304+0000 m31200| 2015-07-19T23:40:00.298+0000 I NETWORK [conn143] end connection 10.139.123.131:39796 (47 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.304+0000 m31102| 2015-07-19T23:40:00.298+0000 I NETWORK [conn11] end connection 10.139.123.131:37744 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.304+0000 m31200| 2015-07-19T23:40:00.298+0000 I NETWORK [conn146] end connection 10.139.123.131:39849 (45 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.304+0000 m31202| 2015-07-19T23:40:00.298+0000 I NETWORK [conn11] end connection 10.139.123.131:42263 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.304+0000 m31201| 2015-07-19T23:40:00.298+0000 I NETWORK [conn11] end connection 10.139.123.131:36230 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:00.305+0000 m31201| 2015-07-19T23:40:00.299+0000 I NETWORK [conn12] end connection 10.139.123.131:36232 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.136+0000 m31100| 2015-07-19T23:40:01.136+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.137+0000 m31100| 2015-07-19T23:40:01.136+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.137+0000 m31200| 2015-07-19T23:40:01.137+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.137+0000 m31200| 2015-07-19T23:40:01.137+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.291+0000 2015-07-19T23:40:01.291+0000 I - [main] shell: stopped mongo program on port 30999 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.292+0000 m30998| 2015-07-19T23:40:01.291+0000 I CONTROL [signalProcessingThread] got signal 15 (Terminated), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.292+0000 m30998| 2015-07-19T23:40:01.291+0000 W SHARDING [LockPinger] removing distributed lock ping thread 'test-configRS/ip-10-139-123-131:29000/ip-10-139-123-131:30998:1437349129:1804289383' [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.292+0000 m30998| 2015-07-19T23:40:01.291+0000 I NETWORK [LockPinger] scoped connection to test-configRS/ip-10-139-123-131:29000 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.292+0000 m30998| 2015-07-19T23:40:01.291+0000 I SHARDING [signalProcessingThread] dbexit: rc:0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.292+0000 m30998| 2015-07-19T23:40:01.291+0000 I SHARDING [mongosMain] dbexit: rc:48 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.292+0000 m29000| 2015-07-19T23:40:01.292+0000 I NETWORK [conn14] end connection 10.139.123.131:55258 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.296+0000 m31200| 2015-07-19T23:40:01.293+0000 I NETWORK [conn21] end connection 10.139.123.131:39404 (44 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.296+0000 m31200| 2015-07-19T23:40:01.293+0000 I NETWORK [conn25] end connection 10.139.123.131:39409 (43 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.297+0000 m31200| 2015-07-19T23:40:01.293+0000 I NETWORK [conn145] end connection 10.139.123.131:39804 (42 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.297+0000 m31101| 2015-07-19T23:40:01.293+0000 I NETWORK [conn9] end connection 10.139.123.131:46543 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.297+0000 m31102| 2015-07-19T23:40:01.294+0000 I NETWORK [conn8] end connection 10.139.123.131:37327 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.298+0000 m31200| 2015-07-19T23:40:01.294+0000 I NETWORK [conn32] end connection 10.139.123.131:39485 (41 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.298+0000 m31200| 2015-07-19T23:40:01.294+0000 I NETWORK [conn27] end connection 10.139.123.131:39469 (40 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.298+0000 m31200| 2015-07-19T23:40:01.294+0000 I NETWORK [conn22] end connection 10.139.123.131:39406 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.298+0000 m31200| 2015-07-19T23:40:01.294+0000 I NETWORK [conn102] end connection 10.139.123.131:39673 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.298+0000 m31200| 2015-07-19T23:40:01.294+0000 I NETWORK [conn134] end connection 10.139.123.131:39764 (37 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.298+0000 m31200| 2015-07-19T23:40:01.294+0000 I NETWORK [conn29] end connection 10.139.123.131:39473 (36 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.298+0000 m31200| 2015-07-19T23:40:01.294+0000 I NETWORK [conn135] end connection 10.139.123.131:39766 (36 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.298+0000 m31200| 2015-07-19T23:40:01.295+0000 I NETWORK [conn99] end connection 10.139.123.131:39670 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.298+0000 m31200| 2015-07-19T23:40:01.295+0000 I NETWORK [conn86] end connection 10.139.123.131:39656 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.298+0000 m31200| 2015-07-19T23:40:01.295+0000 I NETWORK [conn89] end connection 10.139.123.131:39660 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.299+0000 m31200| 2015-07-19T23:40:01.295+0000 I NETWORK [conn114] end connection 10.139.123.131:39698 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.299+0000 m31200| 2015-07-19T23:40:01.295+0000 I NETWORK [conn103] end connection 10.139.123.131:39674 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.299+0000 m31200| 2015-07-19T23:40:01.295+0000 I NETWORK [conn115] end connection 10.139.123.131:39699 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.299+0000 m31200| 2015-07-19T23:40:01.295+0000 I NETWORK [conn144] end connection 10.139.123.131:39797 (28 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.299+0000 m31200| 2015-07-19T23:40:01.296+0000 I NETWORK [conn31] end connection 10.139.123.131:39483 (27 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.299+0000 m31200| 2015-07-19T23:40:01.296+0000 I NETWORK [conn122] end connection 10.139.123.131:39728 (26 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.299+0000 m31200| 2015-07-19T23:40:01.296+0000 I NETWORK [conn121] end connection 10.139.123.131:39727 (26 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.299+0000 m31200| 2015-07-19T23:40:01.296+0000 I NETWORK [conn136] end connection 10.139.123.131:39767 (27 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.299+0000 m31200| 2015-07-19T23:40:01.296+0000 I NETWORK [conn125] end connection 10.139.123.131:39733 (24 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.300+0000 m31200| 2015-07-19T23:40:01.296+0000 I NETWORK [conn124] end connection 10.139.123.131:39732 (22 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.300+0000 m31200| 2015-07-19T23:40:01.296+0000 I NETWORK [conn88] end connection 10.139.123.131:39659 (21 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.300+0000 m31100| 2015-07-19T23:40:01.293+0000 I NETWORK [conn20] end connection 10.139.123.131:47567 (46 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.300+0000 m31100| 2015-07-19T23:40:01.293+0000 I NETWORK [conn21] end connection 10.139.123.131:47569 (45 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.300+0000 m31100| 2015-07-19T23:40:01.293+0000 I NETWORK [conn30] end connection 10.139.123.131:47632 (44 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.300+0000 m31100| 2015-07-19T23:40:01.293+0000 I NETWORK [conn22] end connection 10.139.123.131:47574 (43 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.300+0000 m31100| 2015-07-19T23:40:01.294+0000 I NETWORK [conn29] end connection 10.139.123.131:47631 (42 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.300+0000 m31100| 2015-07-19T23:40:01.294+0000 I NETWORK [conn25] end connection 10.139.123.131:47578 (42 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.300+0000 m31100| 2015-07-19T23:40:01.294+0000 I NETWORK [conn110] end connection 10.139.123.131:47816 (40 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.300+0000 m31100| 2015-07-19T23:40:01.294+0000 I NETWORK [conn31] end connection 10.139.123.131:47634 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.300+0000 m31100| 2015-07-19T23:40:01.294+0000 I NETWORK [conn39] end connection 10.139.123.131:47676 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.301+0000 m31100| 2015-07-19T23:40:01.294+0000 I NETWORK [conn46] end connection 10.139.123.131:47683 (37 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.301+0000 m31100| 2015-07-19T23:40:01.294+0000 I NETWORK [conn55] end connection 10.139.123.131:47692 (36 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.301+0000 m31100| 2015-07-19T23:40:01.294+0000 I NETWORK [conn54] end connection 10.139.123.131:47691 (35 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.301+0000 m31100| 2015-07-19T23:40:01.295+0000 I NETWORK [conn112] end connection 10.139.123.131:47818 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.301+0000 m31100| 2015-07-19T23:40:01.295+0000 I NETWORK [conn48] end connection 10.139.123.131:47685 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.301+0000 m31100| 2015-07-19T23:40:01.295+0000 I NETWORK [conn50] end connection 10.139.123.131:47687 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.301+0000 m31100| 2015-07-19T23:40:01.295+0000 I NETWORK [conn47] end connection 10.139.123.131:47684 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.301+0000 m31100| 2015-07-19T23:40:01.295+0000 I NETWORK [conn35] end connection 10.139.123.131:47646 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.301+0000 m31100| 2015-07-19T23:40:01.295+0000 I NETWORK [conn53] end connection 10.139.123.131:47690 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.301+0000 m31100| 2015-07-19T23:40:01.295+0000 I NETWORK [conn36] end connection 10.139.123.131:47648 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.301+0000 m31100| 2015-07-19T23:40:01.295+0000 I NETWORK [conn32] end connection 10.139.123.131:47636 (27 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.302+0000 m31100| 2015-07-19T23:40:01.295+0000 I NETWORK [conn58] end connection 10.139.123.131:47695 (26 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.302+0000 m31100| 2015-07-19T23:40:01.296+0000 I NETWORK [conn37] end connection 10.139.123.131:47650 (25 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.302+0000 m31100| 2015-07-19T23:40:01.296+0000 I NETWORK [conn113] end connection 10.139.123.131:47819 (24 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.302+0000 m31100| 2015-07-19T23:40:01.296+0000 I NETWORK [conn111] end connection 10.139.123.131:47817 (23 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.302+0000 m31100| 2015-07-19T23:40:01.296+0000 I NETWORK [conn123] end connection 10.139.123.131:47904 (22 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.302+0000 m31100| 2015-07-19T23:40:01.296+0000 I NETWORK [conn44] end connection 10.139.123.131:47681 (21 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.302+0000 m29000| 2015-07-19T23:40:01.293+0000 I NETWORK [conn11] end connection 10.139.123.131:55251 (28 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.302+0000 m29000| 2015-07-19T23:40:01.293+0000 I NETWORK [conn15] end connection 10.139.123.131:55259 (27 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.302+0000 m29000| 2015-07-19T23:40:01.293+0000 I NETWORK [conn13] end connection 10.139.123.131:55253 (26 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.302+0000 m29000| 2015-07-19T23:40:01.294+0000 I NETWORK [conn10] end connection 10.139.123.131:55250 (25 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.302+0000 m29000| 2015-07-19T23:40:01.294+0000 I NETWORK [conn12] end connection 10.139.123.131:55252 (24 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.303+0000 m29000| 2015-07-19T23:40:01.295+0000 I NETWORK [conn30] end connection 10.139.123.131:55335 (23 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.303+0000 m29000| 2015-07-19T23:40:01.296+0000 I NETWORK [conn37] end connection 10.139.123.131:55668 (22 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.303+0000 m29000| 2015-07-19T23:40:01.296+0000 I NETWORK [conn35] end connection 10.139.123.131:55666 (22 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.303+0000 m31101| 2015-07-19T23:40:01.296+0000 I NETWORK [conn14] end connection 10.139.123.131:46962 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.303+0000 m29000| 2015-07-19T23:40:01.296+0000 I NETWORK [conn36] end connection 10.139.123.131:55667 (20 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.303+0000 m31100| 2015-07-19T23:40:01.296+0000 I NETWORK [conn56] end connection 10.139.123.131:47693 (20 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.303+0000 m29000| 2015-07-19T23:40:01.297+0000 I NETWORK [conn16] end connection 10.139.123.131:55260 (19 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.303+0000 m31202| 2015-07-19T23:40:01.294+0000 I NETWORK [conn8] end connection 10.139.123.131:41847 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.303+0000 m31202| 2015-07-19T23:40:01.296+0000 I NETWORK [conn13] end connection 10.139.123.131:42270 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.303+0000 m31100| 2015-07-19T23:40:01.297+0000 I NETWORK [conn114] end connection 10.139.123.131:47821 (19 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.303+0000 m31101| 2015-07-19T23:40:01.297+0000 I NETWORK [conn13] end connection 10.139.123.131:46958 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.304+0000 m31201| 2015-07-19T23:40:01.295+0000 I NETWORK [conn9] end connection 10.139.123.131:35814 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.304+0000 m31201| 2015-07-19T23:40:01.296+0000 I NETWORK [conn13] end connection 10.139.123.131:36237 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.304+0000 m31200| 2015-07-19T23:40:01.297+0000 I NETWORK [conn87] end connection 10.139.123.131:39658 (20 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.304+0000 m31200| 2015-07-19T23:40:01.297+0000 I NETWORK [conn132] end connection 10.139.123.131:39757 (19 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.304+0000 m29000| 2015-07-19T23:40:01.297+0000 I NETWORK [conn38] end connection 10.139.123.131:55669 (18 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.304+0000 m31200| 2015-07-19T23:40:01.297+0000 I NETWORK [conn120] end connection 10.139.123.131:39726 (18 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.304+0000 m31100| 2015-07-19T23:40:01.297+0000 I NETWORK [conn124] end connection 10.139.123.131:47905 (18 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.304+0000 m31100| 2015-07-19T23:40:01.297+0000 I NETWORK [conn121] end connection 10.139.123.131:47899 (17 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:01.304+0000 m31200| 2015-07-19T23:40:01.297+0000 I NETWORK [conn129] end connection 10.139.123.131:39738 (17 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.139+0000 m31100| 2015-07-19T23:40:02.139+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:22742224137 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.139+0000 m31100| 2015-07-19T23:40:02.139+0000 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349108000|1 } } cursorid:23412116704 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.140+0000 m31200| 2015-07-19T23:40:02.139+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.140+0000 m31200| 2015-07-19T23:40:02.140+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.291+0000 2015-07-19T23:40:02.291+0000 I - [main] shell: stopped mongo program on port 30998 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.292+0000 2015-07-19T23:40:02.291+0000 I - [main] No db started on port: 30000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.292+0000 2015-07-19T23:40:02.291+0000 I - [main] shell: stopped mongo program on port 30000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.292+0000 2015-07-19T23:40:02.291+0000 I - [main] No db started on port: 30001 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.292+0000 2015-07-19T23:40:02.291+0000 I - [main] shell: stopped mongo program on port 30001 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.292+0000 ReplSetTest n: 0 ports: [ 31100, 31101, 31102 ] 31100 number [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.292+0000 ReplSetTest stop *** Shutting down mongod in port 31100 *** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.293+0000 m31100| 2015-07-19T23:40:02.292+0000 I CONTROL [signalProcessingThread] got signal 15 (Terminated), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.293+0000 m31100| 2015-07-19T23:40:02.292+0000 I REPL [signalProcessingThread] Stopping replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.912+0000 m31100| 2015-07-19T23:40:02.911+0000 I STORAGE [conn3] got request after shutdown() [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.912+0000 m31102| 2015-07-19T23:40:02.912+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31100; HostUnreachable network error while attempting to run command 'replSetHeartbeat' on host 'ip-10-139-123-131:31100' [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.917+0000 m31100| 2015-07-19T23:40:02.917+0000 I STORAGE [conn2] got request after shutdown() [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.917+0000 m31101| 2015-07-19T23:40:02.917+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31100; HostUnreachable network error while attempting to run command 'replSetHeartbeat' on host 'ip-10-139-123-131:31100' [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.932+0000 m31100| 2015-07-19T23:40:02.931+0000 W SHARDING [LockPinger] removing distributed lock ping thread 'test-configRS/ip-10-139-123-131:29000/ip-10-139-123-131:31100:1437349130:1993228155' [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.932+0000 m31100| 2015-07-19T23:40:02.931+0000 W SHARDING [LockPinger] Error encountered while stopping ping on ip-10-139-123-131:31100:1437349130:1993228155 :: caused by :: 17382 Can't use connection pool during shutdown [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.932+0000 m31100| 2015-07-19T23:40:02.932+0000 I CONTROL [signalProcessingThread] now exiting [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.932+0000 m31100| 2015-07-19T23:40:02.932+0000 I NETWORK [signalProcessingThread] shutdown: going to close listening sockets... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.932+0000 m31100| 2015-07-19T23:40:02.932+0000 I NETWORK [signalProcessingThread] closing listening socket: 9 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.932+0000 m31100| 2015-07-19T23:40:02.932+0000 I NETWORK [signalProcessingThread] closing listening socket: 10 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.932+0000 m31100| 2015-07-19T23:40:02.932+0000 I NETWORK [signalProcessingThread] closing listening socket: 11 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.932+0000 m31100| 2015-07-19T23:40:02.932+0000 I NETWORK [signalProcessingThread] removing socket file: /tmp/mongodb-31100.sock [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.933+0000 m31100| 2015-07-19T23:40:02.932+0000 I NETWORK [signalProcessingThread] shutdown: going to flush diaglog... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.933+0000 m31100| 2015-07-19T23:40:02.932+0000 I NETWORK [signalProcessingThread] shutdown: going to close sockets... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.933+0000 m31101| 2015-07-19T23:40:02.932+0000 I NETWORK [ReplExecNetThread-0] Socket recv() errno:104 Connection reset by peer 10.139.123.131:31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.933+0000 m31101| 2015-07-19T23:40:02.932+0000 I NETWORK [ReplExecNetThread-0] SocketException: remote: 10.139.123.131:31100 error: 9001 socket exception [RECV_ERROR] server [10.139.123.131:31100] [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.933+0000 m31101| 2015-07-19T23:40:02.932+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31100; HostUnreachable Failed attempt to connect to ip-10-139-123-131:31100; network error while attempting to run command 'isMaster' on host 'ip-10-139-123-131:31100' [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.933+0000 m31100| 2015-07-19T23:40:02.932+0000 I STORAGE [signalProcessingThread] WiredTigerKVEngine shutting down [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.933+0000 m31100| 2015-07-19T23:40:02.932+0000 I NETWORK [conn1] end connection 127.0.0.1:46330 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.933+0000 m31102| 2015-07-19T23:40:02.932+0000 I NETWORK [conn3] end connection 10.139.123.131:37165 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.934+0000 m31200| 2015-07-19T23:40:02.932+0000 I NETWORK [conn15] end connection 10.139.123.131:39382 (16 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.934+0000 m31102| 2015-07-19T23:40:02.932+0000 I NETWORK [ReplExecNetThread-1] Socket recv() errno:104 Connection reset by peer 10.139.123.131:31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.934+0000 m31102| 2015-07-19T23:40:02.932+0000 I NETWORK [ReplExecNetThread-1] SocketException: remote: 10.139.123.131:31100 error: 9001 socket exception [RECV_ERROR] server [10.139.123.131:31100] [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.934+0000 m31101| 2015-07-19T23:40:02.933+0000 I NETWORK [conn3] end connection 10.139.123.131:46381 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.934+0000 m31102| 2015-07-19T23:40:02.933+0000 E REPL [rsBackgroundSync] sync producer problem: 10278 dbclient error communicating with server: ip-10-139-123-131:31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.934+0000 m31101| 2015-07-19T23:40:02.933+0000 E REPL [rsBackgroundSync] sync producer problem: 10278 dbclient error communicating with server: ip-10-139-123-131:31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.934+0000 m31101| 2015-07-19T23:40:02.933+0000 I REPL [ReplicationExecutor] could not find member to sync from [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.934+0000 m31100| 2015-07-19T23:40:02.933+0000 I NETWORK [conn28] end connection 10.139.123.131:47616 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.934+0000 m31100| 2015-07-19T23:40:02.933+0000 I NETWORK [conn8] end connection 10.139.123.131:47494 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.934+0000 m31102| 2015-07-19T23:40:02.933+0000 I NETWORK [conn9] end connection 10.139.123.131:37334 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.935+0000 m31101| 2015-07-19T23:40:02.933+0000 I NETWORK [conn10] end connection 10.139.123.131:46547 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.935+0000 m31200| 2015-07-19T23:40:02.933+0000 I NETWORK [conn16] end connection 10.139.123.131:39383 (15 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.935+0000 m31100| 2015-07-19T23:40:02.933+0000 I NETWORK [conn109] end connection 10.139.123.131:47802 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.935+0000 m31100| 2015-07-19T23:40:02.933+0000 I NETWORK [conn13] end connection 10.139.123.131:47535 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.935+0000 m31100| 2015-07-19T23:40:02.933+0000 I NETWORK [conn18] end connection 10.139.123.131:47551 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.935+0000 m31100| 2015-07-19T23:40:02.933+0000 I NETWORK [conn14] end connection 10.139.123.131:47536 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.936+0000 m31100| 2015-07-19T23:40:02.934+0000 I NETWORK [conn19] end connection 10.139.123.131:47552 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.936+0000 m29000| 2015-07-19T23:40:02.934+0000 I NETWORK [conn20] end connection 10.139.123.131:55276 (17 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.936+0000 m29000| 2015-07-19T23:40:02.934+0000 I NETWORK [conn19] end connection 10.139.123.131:55275 (16 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.936+0000 m29000| 2015-07-19T23:40:02.934+0000 I NETWORK [conn18] end connection 10.139.123.131:55274 (16 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.936+0000 m31201| 2015-07-19T23:40:02.934+0000 I NETWORK [conn7] end connection 10.139.123.131:35746 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.936+0000 m31202| 2015-07-19T23:40:02.934+0000 I NETWORK [conn9] end connection 10.139.123.131:41853 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.936+0000 m29000| 2015-07-19T23:40:02.934+0000 I NETWORK [conn21] end connection 10.139.123.131:55277 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.936+0000 m29000| 2015-07-19T23:40:02.934+0000 I NETWORK [conn22] end connection 10.139.123.131:55278 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.937+0000 m31200| 2015-07-19T23:40:02.934+0000 I NETWORK [conn17] end connection 10.139.123.131:39389 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.937+0000 m29000| 2015-07-19T23:40:02.935+0000 I NETWORK [conn26] end connection 10.139.123.131:55289 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.937+0000 m31100| 2015-07-19T23:40:02.935+0000 I NETWORK [conn130] end connection 10.139.123.131:47939 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.937+0000 m31100| 2015-07-19T23:40:02.935+0000 I NETWORK [conn129] end connection 10.139.123.131:47938 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.937+0000 m31100| 2015-07-19T23:40:02.935+0000 I NETWORK [conn131] end connection 10.139.123.131:47941 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.937+0000 m31100| 2015-07-19T23:40:02.935+0000 I NETWORK [conn132] end connection 10.139.123.131:47944 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.937+0000 m31100| 2015-07-19T23:40:02.935+0000 I NETWORK [conn128] end connection 10.139.123.131:47936 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.938+0000 m31102| 2015-07-19T23:40:02.935+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31100; HostUnreachable Failed attempt to connect to ip-10-139-123-131:31100; network error while attempting to run command 'isMaster' on host 'ip-10-139-123-131:31100' [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.938+0000 m31102| 2015-07-19T23:40:02.935+0000 I REPL [ReplicationExecutor] could not find member to sync from [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.938+0000 m31101| 2015-07-19T23:40:02.936+0000 W NETWORK [ReplExecNetThread-3] Failed to connect to 10.139.123.131:31100, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.938+0000 m31101| 2015-07-19T23:40:02.936+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31100; HostUnreachable Failed attempt to connect to ip-10-139-123-131:31100; couldn't connect to server ip-10-139-123-131:31100, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.938+0000 m31101| 2015-07-19T23:40:02.936+0000 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.938+0000 m31102| 2015-07-19T23:40:02.936+0000 W NETWORK [ReplExecNetThread-3] Failed to connect to 10.139.123.131:31100, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.939+0000 m31101| 2015-07-19T23:40:02.936+0000 I REPL [ReplicationExecutor] not electing self, ip-10-139-123-131:31102 would veto with 'ip-10-139-123-131:31101 is trying to elect itself but ip-10-139-123-131:31100 is already primary and more up-to-date' [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.939+0000 m31101| 2015-07-19T23:40:02.936+0000 I REPL [ReplicationExecutor] not electing self, we are not freshest [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.939+0000 m31102| 2015-07-19T23:40:02.936+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31100; HostUnreachable Failed attempt to connect to ip-10-139-123-131:31100; couldn't connect to server ip-10-139-123-131:31100, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.939+0000 m31102| 2015-07-19T23:40:02.936+0000 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.939+0000 m31102| 2015-07-19T23:40:02.936+0000 I REPL [ReplicationExecutor] possible election tie; sleeping 1043ms until 2015-07-19T23:40:03.979+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.995+0000 m31100| 2015-07-19T23:40:02.995+0000 I STORAGE [signalProcessingThread] shutdown: removing fs lock... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:02.995+0000 m31100| 2015-07-19T23:40:02.995+0000 I CONTROL [signalProcessingThread] dbexit: rc: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:03.142+0000 m31200| 2015-07-19T23:40:03.142+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:03.143+0000 m31200| 2015-07-19T23:40:03.142+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:03.292+0000 2015-07-19T23:40:03.292+0000 I - [main] shell: stopped mongo program on port 31100 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:03.292+0000 ReplSetTest stop *** Mongod in port 31100 shutdown with code (0) *** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:03.293+0000 ReplSetTest n: 1 ports: [ 31100, 31101, 31102 ] 31101 number [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:03.293+0000 ReplSetTest stop *** Shutting down mongod in port 31101 *** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:03.293+0000 m31101| 2015-07-19T23:40:03.292+0000 I CONTROL [signalProcessingThread] got signal 15 (Terminated), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:03.293+0000 m31101| 2015-07-19T23:40:03.293+0000 I REPL [signalProcessingThread] Stopping replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:03.558+0000 m31101| 2015-07-19T23:40:03.558+0000 I STORAGE [WiredTigerRecordStoreThread for local.oplog.rs] shutting down [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:03.980+0000 m31102| 2015-07-19T23:40:03.979+0000 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:03.980+0000 m31101| 2015-07-19T23:40:03.980+0000 I STORAGE [conn5] got request after shutdown() [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:03.980+0000 m31102| 2015-07-19T23:40:03.980+0000 I REPL [ReplicationExecutor] not electing self, we could not contact enough voting members [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.140+0000 m31101| 2015-07-19T23:40:04.140+0000 I CONTROL [signalProcessingThread] now exiting [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.141+0000 m31101| 2015-07-19T23:40:04.140+0000 I NETWORK [signalProcessingThread] shutdown: going to close listening sockets... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.141+0000 m31101| 2015-07-19T23:40:04.140+0000 I NETWORK [signalProcessingThread] closing listening socket: 12 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.141+0000 m31101| 2015-07-19T23:40:04.140+0000 I NETWORK [signalProcessingThread] closing listening socket: 13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.141+0000 m31101| 2015-07-19T23:40:04.140+0000 I NETWORK [signalProcessingThread] closing listening socket: 14 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.141+0000 m31101| 2015-07-19T23:40:04.140+0000 I NETWORK [signalProcessingThread] removing socket file: /tmp/mongodb-31101.sock [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.141+0000 m31101| 2015-07-19T23:40:04.140+0000 I NETWORK [signalProcessingThread] shutdown: going to flush diaglog... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.141+0000 m31101| 2015-07-19T23:40:04.140+0000 I NETWORK [signalProcessingThread] shutdown: going to close sockets... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.141+0000 m31101| 2015-07-19T23:40:04.140+0000 I STORAGE [signalProcessingThread] WiredTigerKVEngine shutting down [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.142+0000 m31101| 2015-07-19T23:40:04.140+0000 I NETWORK [conn1] end connection 127.0.0.1:35764 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.142+0000 m31101| 2015-07-19T23:40:04.140+0000 I NETWORK [conn8] end connection 10.139.123.131:46536 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.142+0000 m31102| 2015-07-19T23:40:04.140+0000 I NETWORK [conn5] end connection 10.139.123.131:37174 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.142+0000 m31101| 2015-07-19T23:40:04.140+0000 I NETWORK [conn7] end connection 10.139.123.131:46482 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.142+0000 m31101| 2015-07-19T23:40:04.141+0000 I NETWORK [conn17] end connection 10.139.123.131:46971 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.142+0000 m31101| 2015-07-19T23:40:04.141+0000 I NETWORK [conn15] end connection 10.139.123.131:46964 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.142+0000 m31101| 2015-07-19T23:40:04.141+0000 I NETWORK [conn16] end connection 10.139.123.131:46965 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.142+0000 m31101| 2015-07-19T23:40:04.141+0000 I NETWORK [conn18] end connection 10.139.123.131:46974 (0 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.145+0000 m31200| 2015-07-19T23:40:04.145+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.145+0000 m31200| 2015-07-19T23:40:04.145+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.173+0000 m31101| 2015-07-19T23:40:04.173+0000 I STORAGE [signalProcessingThread] shutdown: removing fs lock... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.173+0000 m31101| 2015-07-19T23:40:04.173+0000 I CONTROL [signalProcessingThread] dbexit: rc: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.293+0000 2015-07-19T23:40:04.292+0000 I - [main] shell: stopped mongo program on port 31101 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.293+0000 ReplSetTest stop *** Mongod in port 31101 shutdown with code (0) *** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.293+0000 ReplSetTest n: 2 ports: [ 31100, 31101, 31102 ] 31102 number [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.293+0000 ReplSetTest stop *** Shutting down mongod in port 31102 *** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.293+0000 m31102| 2015-07-19T23:40:04.293+0000 I CONTROL [signalProcessingThread] got signal 15 (Terminated), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.293+0000 m31102| 2015-07-19T23:40:04.293+0000 I REPL [signalProcessingThread] Stopping replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:04.325+0000 m31102| 2015-07-19T23:40:04.325+0000 I STORAGE [WiredTigerRecordStoreThread for local.oplog.rs] shutting down [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.140+0000 m31102| 2015-07-19T23:40:05.140+0000 I CONTROL [signalProcessingThread] now exiting [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.140+0000 m31102| 2015-07-19T23:40:05.140+0000 I NETWORK [signalProcessingThread] shutdown: going to close listening sockets... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.140+0000 m31102| 2015-07-19T23:40:05.140+0000 I NETWORK [signalProcessingThread] closing listening socket: 15 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.141+0000 m31102| 2015-07-19T23:40:05.140+0000 I NETWORK [signalProcessingThread] closing listening socket: 16 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.141+0000 m31102| 2015-07-19T23:40:05.140+0000 I NETWORK [signalProcessingThread] closing listening socket: 17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.141+0000 m31102| 2015-07-19T23:40:05.140+0000 I NETWORK [signalProcessingThread] removing socket file: /tmp/mongodb-31102.sock [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.141+0000 m31102| 2015-07-19T23:40:05.140+0000 I NETWORK [signalProcessingThread] shutdown: going to flush diaglog... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.141+0000 m31102| 2015-07-19T23:40:05.140+0000 I NETWORK [signalProcessingThread] shutdown: going to close sockets... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.141+0000 m31102| 2015-07-19T23:40:05.140+0000 I STORAGE [signalProcessingThread] WiredTigerKVEngine shutting down [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.141+0000 m31102| 2015-07-19T23:40:05.140+0000 I NETWORK [conn6] end connection 10.139.123.131:37320 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.141+0000 m31102| 2015-07-19T23:40:05.140+0000 I NETWORK [conn10] end connection 10.139.123.131:37336 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.141+0000 m31102| 2015-07-19T23:40:05.140+0000 I NETWORK [conn1] end connection 127.0.0.1:53864 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.141+0000 m31102| 2015-07-19T23:40:05.140+0000 I NETWORK [conn12] end connection 10.139.123.131:37755 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.148+0000 m31200| 2015-07-19T23:40:05.147+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.148+0000 m31200| 2015-07-19T23:40:05.147+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.212+0000 m31102| 2015-07-19T23:40:05.212+0000 I STORAGE [signalProcessingThread] shutdown: removing fs lock... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.212+0000 m31102| 2015-07-19T23:40:05.212+0000 I CONTROL [signalProcessingThread] dbexit: rc: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.293+0000 2015-07-19T23:40:05.293+0000 I - [main] shell: stopped mongo program on port 31102 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.293+0000 ReplSetTest stop *** Mongod in port 31102 shutdown with code (0) *** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.293+0000 ReplSetTest stopSet deleting all dbpaths [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.325+0000 ReplSetTest stopSet *** Shut down repl set - test worked **** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.325+0000 ReplSetTest n: 0 ports: [ 31200, 31201, 31202 ] 31200 number [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.325+0000 ReplSetTest stop *** Shutting down mongod in port 31200 *** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.326+0000 m31200| 2015-07-19T23:40:05.325+0000 I CONTROL [signalProcessingThread] got signal 15 (Terminated), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.326+0000 m31200| 2015-07-19T23:40:05.326+0000 I REPL [signalProcessingThread] Stopping replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:05.446+0000 m31200| 2015-07-19T23:40:05.446+0000 I STORAGE [WiredTigerRecordStoreThread for local.oplog.rs] shutting down [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.150+0000 m31200| 2015-07-19T23:40:06.150+0000 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:22234108465 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.151+0000 m31200| 2015-07-19T23:40:06.150+0000 I QUERY [conn11] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1437349120000|1 } } cursorid:21838946049 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:0 reslen:20 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1000ms [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.153+0000 m31202| 2015-07-19T23:40:06.153+0000 I NETWORK [rsBackgroundSync] Socket flush send() errno:32 Broken pipe 10.139.123.131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.153+0000 m31202| 2015-07-19T23:40:06.153+0000 I - [rsBackgroundSync] caught exception (socket exception [SEND_ERROR] for 10.139.123.131:31200) in destructor (~PiggyBackData) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.153+0000 m31202| 2015-07-19T23:40:06.153+0000 E REPL [rsBackgroundSync] sync producer problem: 10278 dbclient error communicating with server: ip-10-139-123-131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.153+0000 m31201| 2015-07-19T23:40:06.153+0000 I NETWORK [rsBackgroundSync] Socket flush send() errno:32 Broken pipe 10.139.123.131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.154+0000 m31201| 2015-07-19T23:40:06.153+0000 I - [rsBackgroundSync] caught exception (socket exception [SEND_ERROR] for 10.139.123.131:31200) in destructor (~PiggyBackData) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.154+0000 m31201| 2015-07-19T23:40:06.153+0000 E REPL [rsBackgroundSync] sync producer problem: 10278 dbclient error communicating with server: ip-10-139-123-131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.154+0000 m31201| 2015-07-19T23:40:06.153+0000 I REPL [ReplicationExecutor] could not find member to sync from [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.154+0000 m31202| 2015-07-19T23:40:06.153+0000 I REPL [ReplicationExecutor] could not find member to sync from [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.231+0000 m31200| 2015-07-19T23:40:06.231+0000 I STORAGE [conn2] got request after shutdown() [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.231+0000 m31202| 2015-07-19T23:40:06.231+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31200; HostUnreachable network error while attempting to run command 'replSetHeartbeat' on host 'ip-10-139-123-131:31200' [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.237+0000 m31200| 2015-07-19T23:40:06.236+0000 I STORAGE [conn3] got request after shutdown() [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.237+0000 m31201| 2015-07-19T23:40:06.237+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31200; HostUnreachable network error while attempting to run command 'replSetHeartbeat' on host 'ip-10-139-123-131:31200' [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.241+0000 m31200| 2015-07-19T23:40:06.241+0000 W SHARDING [LockPinger] removing distributed lock ping thread 'test-configRS/ip-10-139-123-131:29000/ip-10-139-123-131:31200:1437349131:182555922' [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.241+0000 m31200| 2015-07-19T23:40:06.241+0000 W SHARDING [LockPinger] Error encountered while stopping ping on ip-10-139-123-131:31200:1437349131:182555922 :: caused by :: 17382 Can't use connection pool during shutdown [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.241+0000 m31200| 2015-07-19T23:40:06.241+0000 I CONTROL [signalProcessingThread] now exiting [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.241+0000 m31200| 2015-07-19T23:40:06.241+0000 I NETWORK [signalProcessingThread] shutdown: going to close listening sockets... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.241+0000 m31200| 2015-07-19T23:40:06.241+0000 I NETWORK [signalProcessingThread] closing listening socket: 18 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.242+0000 m31200| 2015-07-19T23:40:06.241+0000 I NETWORK [signalProcessingThread] closing listening socket: 19 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.242+0000 m31200| 2015-07-19T23:40:06.241+0000 I NETWORK [signalProcessingThread] closing listening socket: 20 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.243+0000 m31200| 2015-07-19T23:40:06.241+0000 I NETWORK [signalProcessingThread] removing socket file: /tmp/mongodb-31200.sock [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.243+0000 m31200| 2015-07-19T23:40:06.241+0000 I NETWORK [signalProcessingThread] shutdown: going to flush diaglog... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.243+0000 m31200| 2015-07-19T23:40:06.241+0000 I NETWORK [signalProcessingThread] shutdown: going to close sockets... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.243+0000 m31200| 2015-07-19T23:40:06.241+0000 I STORAGE [signalProcessingThread] WiredTigerKVEngine shutting down [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.243+0000 m31200| 2015-07-19T23:40:06.241+0000 I NETWORK [conn1] end connection 127.0.0.1:40634 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.243+0000 m31201| 2015-07-19T23:40:06.241+0000 I NETWORK [conn3] end connection 10.139.123.131:35682 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.244+0000 m31201| 2015-07-19T23:40:06.241+0000 I NETWORK [ReplExecNetThread-1] Socket recv() errno:104 Connection reset by peer 10.139.123.131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.244+0000 m31201| 2015-07-19T23:40:06.241+0000 I NETWORK [ReplExecNetThread-1] SocketException: remote: 10.139.123.131:31200 error: 9001 socket exception [RECV_ERROR] server [10.139.123.131:31200] [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.244+0000 m31202| 2015-07-19T23:40:06.241+0000 I NETWORK [ReplExecNetThread-2] Socket recv() errno:104 Connection reset by peer 10.139.123.131:31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.244+0000 m31202| 2015-07-19T23:40:06.241+0000 I NETWORK [conn3] end connection 10.139.123.131:41717 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.244+0000 m31201| 2015-07-19T23:40:06.241+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31200; HostUnreachable Failed attempt to connect to ip-10-139-123-131:31200; network error while attempting to run command 'isMaster' on host 'ip-10-139-123-131:31200' [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.245+0000 m31202| 2015-07-19T23:40:06.241+0000 I NETWORK [ReplExecNetThread-2] SocketException: remote: 10.139.123.131:31200 error: 9001 socket exception [RECV_ERROR] server [10.139.123.131:31200] [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.245+0000 m31202| 2015-07-19T23:40:06.241+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31200; HostUnreachable Failed attempt to connect to ip-10-139-123-131:31200; network error while attempting to run command 'isMaster' on host 'ip-10-139-123-131:31200' [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.245+0000 m31200| 2015-07-19T23:40:06.242+0000 I NETWORK [conn10] end connection 10.139.123.131:39335 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.245+0000 m31200| 2015-07-19T23:40:06.242+0000 I NETWORK [conn12] end connection 10.139.123.131:39348 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.245+0000 m29000| 2015-07-19T23:40:06.242+0000 I NETWORK [conn29] end connection 10.139.123.131:55293 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.245+0000 m29000| 2015-07-19T23:40:06.242+0000 I NETWORK [conn23] end connection 10.139.123.131:55282 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.245+0000 m29000| 2015-07-19T23:40:06.242+0000 I NETWORK [conn24] end connection 10.139.123.131:55283 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.245+0000 m31200| 2015-07-19T23:40:06.242+0000 I NETWORK [conn8] end connection 10.139.123.131:39331 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.245+0000 m31202| 2015-07-19T23:40:06.242+0000 W NETWORK [ReplExecNetThread-3] Failed to connect to 10.139.123.131:31200, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.245+0000 m29000| 2015-07-19T23:40:06.242+0000 I NETWORK [conn27] end connection 10.139.123.131:55291 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.245+0000 m31202| 2015-07-19T23:40:06.242+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31200; HostUnreachable Failed attempt to connect to ip-10-139-123-131:31200; couldn't connect to server ip-10-139-123-131:31200, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.246+0000 m31202| 2015-07-19T23:40:06.242+0000 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.246+0000 m29000| 2015-07-19T23:40:06.242+0000 I NETWORK [conn25] end connection 10.139.123.131:55288 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.246+0000 m31202| 2015-07-19T23:40:06.243+0000 I REPL [ReplicationExecutor] not electing self, ip-10-139-123-131:31201 would veto with 'ip-10-139-123-131:31202 is trying to elect itself but ip-10-139-123-131:31200 is already primary and more up-to-date' [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.246+0000 m31202| 2015-07-19T23:40:06.243+0000 I REPL [ReplicationExecutor] not electing self, we are not freshest [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.246+0000 m29000| 2015-07-19T23:40:06.242+0000 I NETWORK [conn28] end connection 10.139.123.131:55292 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.246+0000 m31200| 2015-07-19T23:40:06.243+0000 I NETWORK [conn85] end connection 10.139.123.131:39641 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.246+0000 m29000| 2015-07-19T23:40:06.243+0000 I NETWORK [conn41] end connection 10.139.123.131:55715 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.246+0000 m29000| 2015-07-19T23:40:06.243+0000 I NETWORK [conn42] end connection 10.139.123.131:55746 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.246+0000 m31202| 2015-07-19T23:40:06.243+0000 I NETWORK [conn10] end connection 10.139.123.131:42041 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.246+0000 m31201| 2015-07-19T23:40:06.243+0000 W NETWORK [ReplExecNetThread-2] Failed to connect to 10.139.123.131:31200, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.246+0000 m31201| 2015-07-19T23:40:06.243+0000 I NETWORK [conn10] end connection 10.139.123.131:36005 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.247+0000 m31201| 2015-07-19T23:40:06.243+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31200; HostUnreachable Failed attempt to connect to ip-10-139-123-131:31200; couldn't connect to server ip-10-139-123-131:31200, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.247+0000 m31201| 2015-07-19T23:40:06.244+0000 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.247+0000 m29000| 2015-07-19T23:40:06.244+0000 I NETWORK [conn39] end connection 10.139.123.131:55692 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.247+0000 m31201| 2015-07-19T23:40:06.244+0000 I REPL [ReplicationExecutor] possible election tie; sleeping 144ms until 2015-07-19T23:40:06.388+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.247+0000 m31200| 2015-07-19T23:40:06.244+0000 I NETWORK [conn141] end connection 10.139.123.131:39781 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.247+0000 m31200| 2015-07-19T23:40:06.244+0000 I NETWORK [conn137] end connection 10.139.123.131:39773 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.247+0000 m31200| 2015-07-19T23:40:06.244+0000 I NETWORK [conn138] end connection 10.139.123.131:39776 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.247+0000 m31200| 2015-07-19T23:40:06.244+0000 I NETWORK [conn140] end connection 10.139.123.131:39779 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.247+0000 m31200| 2015-07-19T23:40:06.244+0000 I NETWORK [conn139] end connection 10.139.123.131:39778 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.388+0000 m31201| 2015-07-19T23:40:06.388+0000 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.389+0000 m31201| 2015-07-19T23:40:06.388+0000 I REPL [ReplicationExecutor] running for election [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.389+0000 m31202| 2015-07-19T23:40:06.389+0000 I REPL [ReplicationExecutor] replSetElect voting yea for ip-10-139-123-131:31201 (1) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.389+0000 m31201| 2015-07-19T23:40:06.389+0000 I REPL [ReplicationExecutor] received vote: 1 votes from ip-10-139-123-131:31202 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.389+0000 m31201| 2015-07-19T23:40:06.389+0000 I REPL [ReplicationExecutor] election succeeded, assuming primary role [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.389+0000 m31201| 2015-07-19T23:40:06.389+0000 I REPL [ReplicationExecutor] transition to PRIMARY [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.423+0000 m31200| 2015-07-19T23:40:06.422+0000 I STORAGE [signalProcessingThread] shutdown: removing fs lock... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:06.423+0000 m31200| 2015-07-19T23:40:06.422+0000 I CONTROL [signalProcessingThread] dbexit: rc: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.154+0000 m31201| 2015-07-19T23:40:07.154+0000 I REPL [rsSync] transition to primary complete; database writes are now permitted [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.326+0000 2015-07-19T23:40:07.326+0000 I - [main] shell: stopped mongo program on port 31200 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.326+0000 ReplSetTest stop *** Mongod in port 31200 shutdown with code (0) *** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.326+0000 ReplSetTest n: 1 ports: [ 31200, 31201, 31202 ] 31201 number [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.326+0000 ReplSetTest stop *** Shutting down mongod in port 31201 *** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.326+0000 m31201| 2015-07-19T23:40:07.326+0000 I CONTROL [signalProcessingThread] got signal 15 (Terminated), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.326+0000 m31201| 2015-07-19T23:40:07.326+0000 I REPL [signalProcessingThread] Stopping replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.395+0000 m31201| 2015-07-19T23:40:07.394+0000 I STORAGE [WiredTigerRecordStoreThread for local.oplog.rs] shutting down [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.507+0000 2015-07-19T23:40:07.507+0000 I NETWORK [ReplicaSetMonitorWatcher] Socket closed remotely, no longer connected (idle 10 secs, remote host 10.139.123.131:31100) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.508+0000 2015-07-19T23:40:07.508+0000 W NETWORK [ReplicaSetMonitorWatcher] Failed to connect to 10.139.123.131:31100, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.508+0000 2015-07-19T23:40:07.508+0000 I NETWORK [ReplicaSetMonitorWatcher] Socket closed remotely, no longer connected (idle 10 secs, remote host 10.139.123.131:31101) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.509+0000 2015-07-19T23:40:07.508+0000 W NETWORK [ReplicaSetMonitorWatcher] Failed to connect to 10.139.123.131:31101, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.509+0000 2015-07-19T23:40:07.509+0000 I NETWORK [ReplicaSetMonitorWatcher] Socket closed remotely, no longer connected (idle 10 secs, remote host 10.139.123.131:31102) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.509+0000 2015-07-19T23:40:07.509+0000 W NETWORK [ReplicaSetMonitorWatcher] Failed to connect to 10.139.123.131:31102, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.509+0000 2015-07-19T23:40:07.509+0000 W NETWORK [ReplicaSetMonitorWatcher] No primary detected for set test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.509+0000 2015-07-19T23:40:07.509+0000 I NETWORK [ReplicaSetMonitorWatcher] All nodes for set test-rs0 are down. This has happened for 1 checks in a row. Polling will stop after 29 more failed checks [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.510+0000 2015-07-19T23:40:07.509+0000 I NETWORK [ReplicaSetMonitorWatcher] Socket closed remotely, no longer connected (idle 10 secs, remote host 10.139.123.131:31200) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.510+0000 2015-07-19T23:40:07.510+0000 W NETWORK [ReplicaSetMonitorWatcher] Failed to connect to 10.139.123.131:31200, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.510+0000 m31201| 2015-07-19T23:40:07.510+0000 I STORAGE [conn8] got request after shutdown() [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.510+0000 2015-07-19T23:40:07.510+0000 I NETWORK [ReplicaSetMonitorWatcher] Detected bad connection created at 1437349137494164 microSec, clearing pool for ip-10-139-123-131:31201 of 0 connections [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:07.510+0000 2015-07-19T23:40:07.510+0000 W NETWORK [ReplicaSetMonitorWatcher] No primary detected for set test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.156+0000 m31201| 2015-07-19T23:40:08.155+0000 I CONTROL [signalProcessingThread] now exiting [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.156+0000 m31201| 2015-07-19T23:40:08.156+0000 I NETWORK [signalProcessingThread] shutdown: going to close listening sockets... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.156+0000 m31201| 2015-07-19T23:40:08.156+0000 I NETWORK [signalProcessingThread] closing listening socket: 21 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.156+0000 m31201| 2015-07-19T23:40:08.156+0000 I NETWORK [signalProcessingThread] closing listening socket: 22 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.157+0000 m31201| 2015-07-19T23:40:08.156+0000 I NETWORK [signalProcessingThread] closing listening socket: 23 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.157+0000 m31201| 2015-07-19T23:40:08.156+0000 I NETWORK [signalProcessingThread] removing socket file: /tmp/mongodb-31201.sock [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.157+0000 m31202| 2015-07-19T23:40:08.156+0000 I NETWORK [conn5] end connection 10.139.123.131:41725 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.157+0000 m31201| 2015-07-19T23:40:08.156+0000 I NETWORK [signalProcessingThread] shutdown: going to flush diaglog... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.157+0000 m31201| 2015-07-19T23:40:08.156+0000 I NETWORK [signalProcessingThread] shutdown: going to close sockets... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.157+0000 m31201| 2015-07-19T23:40:08.156+0000 I STORAGE [signalProcessingThread] WiredTigerKVEngine shutting down [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.157+0000 m31201| 2015-07-19T23:40:08.156+0000 I NETWORK [conn1] end connection 127.0.0.1:33124 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.157+0000 m31201| 2015-07-19T23:40:08.156+0000 I NETWORK [conn14] end connection 10.139.123.131:36238 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.157+0000 m31201| 2015-07-19T23:40:08.156+0000 I NETWORK [conn15] end connection 10.139.123.131:36242 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.158+0000 m31201| 2015-07-19T23:40:08.156+0000 I NETWORK [conn16] end connection 10.139.123.131:36244 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.158+0000 m31201| 2015-07-19T23:40:08.156+0000 I NETWORK [conn5] end connection 10.139.123.131:35690 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.236+0000 m31202| 2015-07-19T23:40:08.236+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31201; HostUnreachable network error while attempting to run command 'replSetHeartbeat' on host 'ip-10-139-123-131:31201' [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.237+0000 m31202| 2015-07-19T23:40:08.237+0000 W NETWORK [ReplExecNetThread-2] Failed to connect to 10.139.123.131:31201, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.237+0000 m31202| 2015-07-19T23:40:08.237+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31201; HostUnreachable Failed attempt to connect to ip-10-139-123-131:31201; couldn't connect to server ip-10-139-123-131:31201, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.237+0000 m31202| 2015-07-19T23:40:08.237+0000 W NETWORK [ReplExecNetThread-3] Failed to connect to 10.139.123.131:31201, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.238+0000 m31202| 2015-07-19T23:40:08.237+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31201; HostUnreachable Failed attempt to connect to ip-10-139-123-131:31201; couldn't connect to server ip-10-139-123-131:31201, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.243+0000 m31202| 2015-07-19T23:40:08.243+0000 W NETWORK [ReplExecNetThread-2] Failed to connect to 10.139.123.131:31200, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.243+0000 m31202| 2015-07-19T23:40:08.243+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31200; HostUnreachable Failed attempt to connect to ip-10-139-123-131:31200; couldn't connect to server ip-10-139-123-131:31200, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.244+0000 m31202| 2015-07-19T23:40:08.243+0000 W NETWORK [ReplExecNetThread-3] Failed to connect to 10.139.123.131:31200, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.244+0000 m31202| 2015-07-19T23:40:08.243+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31200; HostUnreachable Failed attempt to connect to ip-10-139-123-131:31200; couldn't connect to server ip-10-139-123-131:31200, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.244+0000 m31202| 2015-07-19T23:40:08.244+0000 W NETWORK [ReplExecNetThread-2] Failed to connect to 10.139.123.131:31200, reason: errno:111 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.244+0000 m31202| 2015-07-19T23:40:08.244+0000 I REPL [ReplicationExecutor] Error in heartbeat request to ip-10-139-123-131:31200; HostUnreachable Failed attempt to connect to ip-10-139-123-131:31200; couldn't connect to server ip-10-139-123-131:31200, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.322+0000 m31201| 2015-07-19T23:40:08.322+0000 I STORAGE [signalProcessingThread] shutdown: removing fs lock... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:08.322+0000 m31201| 2015-07-19T23:40:08.322+0000 I CONTROL [signalProcessingThread] dbexit: rc: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:09.326+0000 2015-07-19T23:40:09.326+0000 I - [main] shell: stopped mongo program on port 31201 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:09.326+0000 ReplSetTest stop *** Mongod in port 31201 shutdown with code (0) *** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:09.326+0000 ReplSetTest n: 2 ports: [ 31200, 31201, 31202 ] 31202 number [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:09.327+0000 ReplSetTest stop *** Shutting down mongod in port 31202 *** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:09.327+0000 m31202| 2015-07-19T23:40:09.326+0000 I CONTROL [signalProcessingThread] got signal 15 (Terminated), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:09.327+0000 m31202| 2015-07-19T23:40:09.327+0000 I REPL [signalProcessingThread] Stopping replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:09.358+0000 m31202| 2015-07-19T23:40:09.358+0000 I STORAGE [WiredTigerRecordStoreThread for local.oplog.rs] shutting down [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.154+0000 m31202| 2015-07-19T23:40:10.154+0000 I CONTROL [signalProcessingThread] now exiting [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.155+0000 m31202| 2015-07-19T23:40:10.154+0000 I NETWORK [signalProcessingThread] shutdown: going to close listening sockets... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.155+0000 m31202| 2015-07-19T23:40:10.154+0000 I NETWORK [signalProcessingThread] closing listening socket: 24 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.155+0000 m31202| 2015-07-19T23:40:10.154+0000 I NETWORK [signalProcessingThread] closing listening socket: 25 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.155+0000 m31202| 2015-07-19T23:40:10.154+0000 I NETWORK [signalProcessingThread] closing listening socket: 26 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.155+0000 m31202| 2015-07-19T23:40:10.154+0000 I NETWORK [signalProcessingThread] removing socket file: /tmp/mongodb-31202.sock [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.155+0000 m31202| 2015-07-19T23:40:10.154+0000 I NETWORK [signalProcessingThread] shutdown: going to flush diaglog... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.156+0000 m31202| 2015-07-19T23:40:10.154+0000 I NETWORK [signalProcessingThread] shutdown: going to close sockets... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.156+0000 m31202| 2015-07-19T23:40:10.154+0000 I STORAGE [signalProcessingThread] WiredTigerKVEngine shutting down [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.156+0000 m31202| 2015-07-19T23:40:10.155+0000 I NETWORK [conn6] end connection 10.139.123.131:41840 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.156+0000 m31202| 2015-07-19T23:40:10.155+0000 I NETWORK [conn14] end connection 10.139.123.131:42275 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.156+0000 m31202| 2015-07-19T23:40:10.155+0000 I NETWORK [conn1] end connection 127.0.0.1:38330 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.156+0000 m31202| 2015-07-19T23:40:10.155+0000 I NETWORK [conn12] end connection 10.139.123.131:42269 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.315+0000 m31202| 2015-07-19T23:40:10.315+0000 I STORAGE [signalProcessingThread] shutdown: removing fs lock... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.316+0000 m31202| 2015-07-19T23:40:10.315+0000 I CONTROL [signalProcessingThread] dbexit: rc: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.326+0000 2015-07-19T23:40:10.326+0000 I - [main] shell: stopped mongo program on port 31202 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.327+0000 ReplSetTest stop *** Mongod in port 31202 shutdown with code (0) *** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.327+0000 ReplSetTest stopSet deleting all dbpaths [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.338+0000 ReplSetTest stopSet *** Shut down repl set - test worked **** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.338+0000 ReplSetTest n: 0 ports: [ 29000 ] 29000 number [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.338+0000 ReplSetTest stop *** Shutting down mongod in port 29000 *** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.338+0000 m29000| 2015-07-19T23:40:10.338+0000 I CONTROL [signalProcessingThread] got signal 15 (Terminated), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.338+0000 m29000| 2015-07-19T23:40:10.338+0000 I REPL [signalProcessingThread] Stopping replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.750+0000 m29000| 2015-07-19T23:40:10.750+0000 I STORAGE [WiredTigerRecordStoreThread for local.oplog.rs] shutting down [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.751+0000 m29000| 2015-07-19T23:40:10.750+0000 I CONTROL [signalProcessingThread] now exiting [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.751+0000 m29000| 2015-07-19T23:40:10.750+0000 I NETWORK [signalProcessingThread] shutdown: going to close listening sockets... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.751+0000 m29000| 2015-07-19T23:40:10.750+0000 I NETWORK [signalProcessingThread] closing listening socket: 29 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.751+0000 m29000| 2015-07-19T23:40:10.750+0000 I NETWORK [signalProcessingThread] closing listening socket: 30 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.751+0000 m29000| 2015-07-19T23:40:10.751+0000 I NETWORK [signalProcessingThread] closing listening socket: 31 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.751+0000 m29000| 2015-07-19T23:40:10.751+0000 I NETWORK [signalProcessingThread] removing socket file: /tmp/mongodb-29000.sock [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.752+0000 m29000| 2015-07-19T23:40:10.751+0000 I NETWORK [signalProcessingThread] shutdown: going to flush diaglog... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.752+0000 m29000| 2015-07-19T23:40:10.751+0000 I NETWORK [signalProcessingThread] shutdown: going to close sockets... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.752+0000 m29000| 2015-07-19T23:40:10.751+0000 I STORAGE [signalProcessingThread] WiredTigerKVEngine shutting down [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.752+0000 m29000| 2015-07-19T23:40:10.751+0000 I NETWORK [conn2] end connection 10.139.123.131:55234 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.752+0000 m29000| 2015-07-19T23:40:10.751+0000 I NETWORK [conn1] end connection 127.0.0.1:54348 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.752+0000 m29000| 2015-07-19T23:40:10.751+0000 I NETWORK [conn31] end connection 10.139.123.131:55343 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.795+0000 m29000| 2015-07-19T23:40:10.795+0000 I STORAGE [signalProcessingThread] shutdown: removing fs lock... [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:10.795+0000 m29000| 2015-07-19T23:40:10.795+0000 I CONTROL [signalProcessingThread] dbexit: rc: 0 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.338+0000 2015-07-19T23:40:11.338+0000 I - [main] shell: stopped mongo program on port 29000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.338+0000 ReplSetTest stop *** Mongod in port 29000 shutdown with code (0) *** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.338+0000 ReplSetTest stopSet deleting all dbpaths [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.340+0000 ReplSetTest stopSet *** Shut down repl set - test worked **** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.340+0000 *** ShardingTest test completed successfully in 105.288 seconds *** [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.340+0000 2015-07-19T23:40:11.340+0000 E QUERY [main] Error: 5 threads threw [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.340+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.340+0000 Error: [999] != [1000] are not equal : undefined [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.341+0000 at quietlyDoAssert (jstests/concurrency/fsm_libs/assert.js:53:15) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.341+0000 at Function.assert.eq (src/mongo/shell/assert.js:38:5) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.341+0000 at wrapAssertFn (jstests/concurrency/fsm_libs/assert.js:60:16) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.341+0000 at Function.assertWithLevel.(anonymous function) [as eq] (jstests/concurrency/fsm_libs/assert.js:99:13) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.341+0000 at Object.query (jstests/concurrency/fsm_workloads/agg_base.js:44:31) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.341+0000 at Object.runFSM [as run] (jstests/concurrency/fsm_libs/fsm.js:19:16) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.341+0000 at :8:13 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.341+0000 at Object.main (jstests/concurrency/fsm_libs/worker_thread.js:81:17) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.341+0000 at ____MongoToV8_newFunction_temp (:5:25) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.342+0000 at ____MongoToV8_newFunction_temp (:3:24) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.342+0000 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.342+0000 at throwError (jstests/concurrency/fsm_libs/runner.js:268:23) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.342+0000 at jstests/concurrency/fsm_libs/runner.js:396:17 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.342+0000 at Array.forEach (native) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.342+0000 at runWorkloads (jstests/concurrency/fsm_libs/runner.js:351:22) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.342+0000 at serial (jstests/concurrency/fsm_libs/runner.js:415:13) [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.342+0000 at jstests/concurrency/fsm_all_sharded_replication.js:68:1 at jstests/concurrency/fsm_libs/runner.js:280 [js_test:fsm_all_sharded_replication] 2015-07-19T23:40:11.342+0000 failed to load: jstests/concurrency/fsm_all_sharded_replication.js